Merge "msm: ipa: Add IPA Ethernet endpoints" into msm-4.9
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index 405da11..d11af52 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -42,24 +42,26 @@
 will be updated when new workarounds are committed and backported to
 stable kernels.
 
-| Implementor    | Component       | Erratum ID      | Kconfig                 |
-+----------------+-----------------+-----------------+-------------------------+
-| ARM            | Cortex-A53      | #826319         | ARM64_ERRATUM_826319    |
-| ARM            | Cortex-A53      | #827319         | ARM64_ERRATUM_827319    |
-| ARM            | Cortex-A53      | #824069         | ARM64_ERRATUM_824069    |
-| ARM            | Cortex-A53      | #819472         | ARM64_ERRATUM_819472    |
-| ARM            | Cortex-A53      | #845719         | ARM64_ERRATUM_845719    |
-| ARM            | Cortex-A53      | #843419         | ARM64_ERRATUM_843419    |
-| ARM            | Cortex-A57      | #832075         | ARM64_ERRATUM_832075    |
-| ARM            | Cortex-A57      | #852523         | N/A                     |
-| ARM            | Cortex-A57      | #834220         | ARM64_ERRATUM_834220    |
-| ARM            | Cortex-A72      | #853709         | N/A                     |
-| ARM            | MMU-500         | #841119,#826419 | N/A                     |
-|                |                 |                 |                         |
-| Cavium         | ThunderX ITS    | #22375, #24313  | CAVIUM_ERRATUM_22375    |
-| Cavium         | ThunderX ITS    | #23144          | CAVIUM_ERRATUM_23144    |
-| Cavium         | ThunderX GICv3  | #23154          | CAVIUM_ERRATUM_23154    |
-| Cavium         | ThunderX Core   | #27456          | CAVIUM_ERRATUM_27456    |
-| Cavium         | ThunderX SMMUv2 | #27704          | N/A		       |
-|                |                 |                 |                         |
-| Freescale/NXP  | LS2080A/LS1043A | A-008585        | FSL_ERRATUM_A008585     |
+| Implementor    | Component       | Erratum ID      | Kconfig                     |
++----------------+-----------------+-----------------+-----------------------------+
+| ARM            | Cortex-A53      | #826319         | ARM64_ERRATUM_826319        |
+| ARM            | Cortex-A53      | #827319         | ARM64_ERRATUM_827319        |
+| ARM            | Cortex-A53      | #824069         | ARM64_ERRATUM_824069        |
+| ARM            | Cortex-A53      | #819472         | ARM64_ERRATUM_819472        |
+| ARM            | Cortex-A53      | #845719         | ARM64_ERRATUM_845719        |
+| ARM            | Cortex-A53      | #843419         | ARM64_ERRATUM_843419        |
+| ARM            | Cortex-A57      | #832075         | ARM64_ERRATUM_832075        |
+| ARM            | Cortex-A57      | #852523         | N/A                         |
+| ARM            | Cortex-A57      | #834220         | ARM64_ERRATUM_834220        |
+| ARM            | Cortex-A72      | #853709         | N/A                         |
+| ARM            | MMU-500         | #841119,#826419 | N/A                         |
+|                |                 |                 |                             |
+| Cavium         | ThunderX ITS    | #22375, #24313  | CAVIUM_ERRATUM_22375        |
+| Cavium         | ThunderX ITS    | #23144          | CAVIUM_ERRATUM_23144        |
+| Cavium         | ThunderX GICv3  | #23154          | CAVIUM_ERRATUM_23154        |
+| Cavium         | ThunderX Core   | #27456          | CAVIUM_ERRATUM_27456        |
+| Cavium         | ThunderX SMMUv2 | #27704          | N/A                         |
+|                |                 |                 |                             |
+| Freescale/NXP  | LS2080A/LS1043A | A-008585        | FSL_ERRATUM_A008585         |
+|                |                 |                 |                             |
+| Qualcomm Tech. | QDF2400 ITS     | E0065           | QCOM_QDF2400_ERRATUM_0065   |
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index 0cf9a6b..472122f 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -227,49 +227,7 @@
 usage, similar to "ondemand" and "conservative" governors, but with a
 different set of configurable behaviors.
 
-The tunable values for this governor are:
-
-above_hispeed_delay: When speed is at or above hispeed_freq, wait for
-this long before raising speed in response to continued high load.
-The format is a single delay value, optionally followed by pairs of
-CPU speeds and the delay to use at or above those speeds.  Colons can
-be used between the speeds and associated delays for readability.  For
-example:
-
-   80000 1300000:200000 1500000:40000
-
-uses delay 80000 uS until CPU speed 1.3 GHz, at which speed delay
-200000 uS is used until speed 1.5 GHz, at which speed (and above)
-delay 40000 uS is used.  If speeds are specified these must appear in
-ascending order.  Default is 20000 uS.
-
-boost: If non-zero, immediately boost speed of all CPUs to at least
-hispeed_freq until zero is written to this attribute.  If zero, allow
-CPU speeds to drop below hispeed_freq according to load as usual.
-Default is zero.
-
-boostpulse: On each write, immediately boost speed of all CPUs to
-hispeed_freq for at least the period of time specified by
-boostpulse_duration, after which speeds are allowed to drop below
-hispeed_freq according to load as usual. Its a write-only file.
-
-boostpulse_duration: Length of time to hold CPU speed at hispeed_freq
-on a write to boostpulse, before allowing speed to drop according to
-load as usual.  Default is 80000 uS.
-
-go_hispeed_load: The CPU load at which to ramp to hispeed_freq.
-Default is 99%.
-
-hispeed_freq: An intermediate "high speed" at which to initially ramp
-when CPU load hits the value specified in go_hispeed_load.  If load
-stays high for the amount of time specified in above_hispeed_delay,
-then speed may be bumped higher.  Default is the maximum speed allowed
-by the policy at governor initialization time.
-
-io_is_busy: If set, the governor accounts IO time as CPU busy time.
-
-min_sample_time: The minimum amount of time to spend at the current
-frequency before ramping down. Default is 80000 uS.
+The tuneable values for this governor are:
 
 target_loads: CPU load values used to adjust speed to influence the
 current CPU load toward that value.  In general, the lower the target
@@ -288,6 +246,32 @@
 values also usually appear in an ascending order. The default is
 target load 90% for all speeds.
 
+min_sample_time: The minimum amount of time to spend at the current
+frequency before ramping down. Default is 80000 uS.
+
+hispeed_freq: An intermediate "hi speed" at which to initially ramp
+when CPU load hits the value specified in go_hispeed_load.  If load
+stays high for the amount of time specified in above_hispeed_delay,
+then speed may be bumped higher.  Default is the maximum speed
+allowed by the policy at governor initialization time.
+
+go_hispeed_load: The CPU load at which to ramp to hispeed_freq.
+Default is 99%.
+
+above_hispeed_delay: When speed is at or above hispeed_freq, wait for
+this long before raising speed in response to continued high load.
+The format is a single delay value, optionally followed by pairs of
+CPU speeds and the delay to use at or above those speeds.  Colons can
+be used between the speeds and associated delays for readability.  For
+example:
+
+   80000 1300000:200000 1500000:40000
+
+uses delay 80000 uS until CPU speed 1.3 GHz, at which speed delay
+200000 uS is used until speed 1.5 GHz, at which speed (and above)
+delay 40000 uS is used.  If speeds are specified these must appear in
+ascending order.  Default is 20000 uS.
+
 timer_rate: Sample rate for reevaluating CPU load when the CPU is not
 idle.  A deferrable timer is used, such that the CPU will not be woken
 from idle to service this timer until something else needs to run.
@@ -304,6 +288,65 @@
 when not at lowest speed.  A value of -1 means defer timers
 indefinitely at all speeds.  Default is 80000 uS.
 
+boost: If non-zero, immediately boost speed of all CPUs to at least
+hispeed_freq until zero is written to this attribute.  If zero, allow
+CPU speeds to drop below hispeed_freq according to load as usual.
+Default is zero.
+
+boostpulse: On each write, immediately boost speed of all CPUs to
+hispeed_freq for at least the period of time specified by
+boostpulse_duration, after which speeds are allowed to drop below
+hispeed_freq according to load as usual.
+
+boostpulse_duration: Length of time to hold CPU speed at hispeed_freq
+on a write to boostpulse, before allowing speed to drop according to
+load as usual.  Default is 80000 uS.
+
+align_windows: If non-zero, align governor timer window to fire at
+multiples of number of jiffies timer_rate converts to.
+
+use_sched_load: If non-zero, query scheduler for CPU busy time,
+instead of collecting it directly in governor. This would allow
+scheduler to adjust the busy time of each CPU to account for known
+information such as migration. If non-zero, this also implies governor
+sampling windows are aligned across CPUs, with same timer_rate,
+regardless what align_windows is set to. Default is zero.
+
+use_migration_notif: If non-zero, schedule hrtimer to fire in 1ms
+to reevaluate frequency of notified CPU, unless the hrtimer is already
+pending. If zero, ignore scheduler notification. Default is zero.
+
+max_freq_hysteresis: Each time freq evaluation chooses policy->max,
+next max_freq_hysteresis is considered as hysteresis period. During
+this period, frequency target will not drop below hispeed_freq, no
+matter how light actual workload is. If CPU load of any sampling
+window exceeds go_hispeed_load during this period, governor will
+directly increase frequency back to policy->max. Default is 0 uS.
+
+ignore_hispeed_on_notif: If non-zero, do not apply hispeed related
+logic if frequency evaluation is triggered by scheduler notification.
+This includes ignoring go_hispeed_load, hispeed_freq in frequency
+selection, and ignoring above_hispeed_delay that prevents frequency
+ramp up. For evaluation triggered by timer, hispeed logic is still
+always applied. ignore_hispeed_on_notif has no effect if
+use_migration_notif is set to zero. Default is zero.
+
+fast_ramp_down: If non-zero, do not apply min_sample_time if
+frequency evaluation is triggered by scheduler notification. For
+evaluation triggered by timer, min_sample_time is still always
+enforced. fast_ramp_down has no effect if use_migration_notif is
+set to zero. Default is zero.
+
+enable_prediction: If non-zero, two frequencies will be calculated
+during each sampling period: one based on busy time in previous sampling
+period (f_prev), and the other based on prediction provided by scheduler
+(f_pred). Max of both will be selected as final frequency. Hispeed
+related logic, including both frequency selection and delay is ignored
+if enable_prediction is set. If only f_pred but not f_prev picked
+policy->max, max_freq_hysteresis period is not started/extended.
+use_sched_load must be turned on before enabling this feature.
+Default is zero.
+
 3. The Governor Interface in the CPUfreq Core
 =============================================
 
diff --git a/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt b/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
index 6ddc725..a6537eb 100644
--- a/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
+++ b/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
@@ -108,6 +108,8 @@
 - qcom,sysmon-id: platform device id that sysmon is probed with for the subsystem.
 - qcom,pil-force-shutdown: Boolean. If set, the SSR framework will not trigger graceful shutdown
 			   on behalf of the subsystem driver.
+- qcom,mdm-link-info: a string indicating additional info about the physical link.
+			For example: "devID_domain.bus.slot" in case of PCIe.
 
 Example:
 	mdm0: qcom,mdm0 {
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index 0450145..baae281 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -263,6 +263,7 @@
 compatible = "qcom,sdm845-cdp"
 compatible = "qcom,sdm845-mtp"
 compatible = "qcom,sdm845-mtp"
+compatible = "qcom,sdm845-qrd"
 compatible = "qcom,sdm830-sim"
 compatible = "qcom,sdm830-rumi"
 compatible = "qcom,sdm830-cdp"
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_qmp.txt b/Documentation/devicetree/bindings/arm/msm/msm_qmp.txt
new file mode 100644
index 0000000..0a5c0b3
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_qmp.txt
@@ -0,0 +1,55 @@
+Qualcomm Technologies, Inc. QTI Mailbox Protocol
+
+QMP Driver
+===================
+
+Required properties:
+- compatible : should be "qcom,qmp-mbox".
+- label : the name of the remote proc this link connects to.
+- reg : The location and size of shared memory.
+	The irq register base address for triggering interrupts.
+- reg-names : "msgram" - string to identify the shared memory region.
+	"irq-reg-base" - string to identify the irq register region.
+- qcom,irq-mask : the bitmask to trigger an interrupt.
+- interrupt : the receiving interrupt line.
+- mbox-desc-offset : offset of mailbox descriptor from start of the msgram.
+- #mbox-cells: Common mailbox binding property to identify the number of cells
+		required for the mailbox specifier, should be 1.
+
+Optional properties:
+- mbox-offset : offset of the mcore mailbox from the offset of msgram. If this
+			property is not used, qmp will use the configuration
+			provided by the ucore.
+- mbox-size : size of the mcore mailbox. If this property is not used, qmp will
+			use the configuration provided by the ucore.
+
+Example:
+	qmp_aop: qcom,qmp-aop {
+		compatible = "qcom,qmp-mbox";
+		label = "aop";
+		reg = <0xc300000 0x100000>,
+			<0x1799000C 0x4>;
+		reg-names = "msgram", "irq-reg-base";
+		qcom,irq-mask = <0x1>;
+		interrupt = <0 389 1>;
+		mbox-desc-offset = <0x100>;
+		mbox-offset = <0x500>;
+		mbox-size = <0x400>;
+		#mbox-cells = <1>;
+	};
+
+Mailbox Client
+==============
+"mboxes" and the optional "mbox-names" (please see
+Documentation/devicetree/bindings/mailbox/mailbox.txt for details). Each value
+of the mboxes property should contain a phandle to the mailbox controller
+device node and second argument is the channel index. It must be 0 (qmp
+supports only one channel).The equivalent "mbox-names" property value can be
+used to give a name to the communication channel to be used by the client user.
+
+Example:
+	qmp-client {
+		compatible = "qcom,qmp-client";
+		mbox-names = "aop";
+		mboxes = <&qmp_aop 0>,
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
index 90ddc27..50488b4 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
@@ -49,6 +49,16 @@
 	Value Type: <stringlist>
 	Definition: Address names. Must be "llcc"
 
+- llcc-bank-off:
+	Usage: required
+	Value Type: <u32 array>
+	Definition: Offsets of llcc banks from llcc base address starting from
+		    LLCC bank0.
+- llcc-broadcast-off:
+	Usage: required
+	Value Type: <u32>
+	Definition: Offset of broadcast register from LLCC bank0 address.
+
 - #cache-cells:
 	Usage: required
 	Value Type: <u32>
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
new file mode 100644
index 0000000..964fea6
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
@@ -0,0 +1,476 @@
+Qualcomm Technologies, Inc. OSM Bindings
+
+Operating State Manager (OSM) is a hardware engine used by some Qualcomm
+Technologies, Inc. (QTI) SoCs to manage frequency and voltage scaling
+in hardware. OSM is capable of controlling frequency and voltage requests
+for multiple clusters via the existence of multiple OSM domains.
+
+Properties:
+- compatible
+	Usage:      required
+	Value type: <string>
+	Definition: must be "qcom,clk-cpu-osm".
+
+- reg
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Addresses and sizes for the memory of the OSM controller,
+		    cluster PLL management, and APCS common register regions.
+		    Optionally, the address of the efuse registers used to
+		    determine the pwrcl or perfcl speed-bins and/or the ACD
+		    register space to initialize prior to enabling OSM.
+
+- reg-names
+	Usage:      required
+	Value type: <stringlist>
+	Definition: Address names. Must be "osm_l3_base", "osm_pwrcl_base",
+		    "osm_perfcl_base", "l3_pll", "pwrcl_pll", "perfcl_pll",
+		    "l3_sequencer", "pwrcl_sequencer", "perfcl_sequencer" or
+		    "apps_itm_ctl". Optionally, "l3_efuse", "pwrcl_efuse"
+		    "perfcl_efuse".
+		    Must be specified in the same order as the corresponding
+		    addresses are specified in the reg property.
+
+- vdd-l3-supply
+	Usage:      required
+	Value type: <phandle>
+	Definition: phandle of the underlying regulator device that manages
+		    the voltage supply of the L3 cluster.
+
+- vdd-pwrcl-supply
+	Usage:      required
+	Value type: <phandle>
+	Definition: phandle of the underlying regulator device that manages
+		    the voltage supply of the Power cluster.
+
+- vdd-perfcl-supply
+	Usage:      required
+	Value type: <phandle>
+	Definition: phandle of the underlying regulator device that manages
+		    the voltage supply of the Performance cluster.
+
+- interrupts
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: OSM interrupt specifier.
+
+- interrupt-names
+	Usage:      required
+	Value type: <stringlist>
+	Definition: Interrupt names. this list must match up 1-to-1 with the
+		    interrupts specified in the 'interrupts' property.
+		    "pwrcl-irq" and "perfcl-irq" must be specified.
+
+- qcom,l3-speedbinX-v0
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the frequency in Hertz, frequency,
+		    PLL override data, ACC level, and virtual corner used
+		    by the OSM hardware for each supported DCVS setpoint
+		    of the L3 cluster.
+
+- qcom,pwrcl-speedbinX-v0
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the frequency in Hertz, frequency,
+		    PLL override data, ACC level, and virtual corner used
+		    by the OSM hardware for each supported DCVS setpoint
+		    of the Power cluster.
+
+- qcom,perfcl-speedbinX-v0
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the frequency in Hertz, frequency,
+		    PLL override data, ACC level and virtual corner used
+		    by the OSM hardware for each supported DCVS setpoint
+		    of the Performance cluster.
+
+- qcom,l3-min-cpr-vc-binX
+	Usage:	    required
+	Value type: <u32>
+	Definition: First virtual corner which does not use PLL post-divider
+		    for the L3 clock domain.
+
+- qcom,pwrcl-min-cpr-vc-binX
+	Usage:      required
+	Value type: <u32>
+	Definition: First virtual corner which does not use PLL post-divider
+		    for the power cluster.
+
+- qcom,perfcl-min-cpr-vc-binX
+	Usage:      required
+	Value type: <u32>
+	Definition: First virtual corner which does not use PLL post-divider
+		    for the performance cluster.
+
+- qcom,osm-no-tz
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates that there is no programming
+		    of the OSM hardware performed by the secure world.
+
+- qcom,osm-pll-setup
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates that the PLL setup sequence
+		    must be executed for each clock domain managed by the OSM
+		    controller.
+
+- qcom,up-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the DCVS up timer value in nanoseconds
+		    for each of the three clock domains managed by the OSM
+		    controller.
+
+- qcom,down-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the DCVS down timer value in nanoseconds
+		    for each of the three clock domains managed by the OSM
+		    controller.
+
+- qcom,pc-override-index
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the OSM performance index to be used
+		    when each cluster enters certain low power modes.
+
+- qcom,set-ret-inactive
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if domains in retention must
+		    be treated as inactive.
+
+- qcom,enable-llm-freq-vote
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if Limits hardware frequency
+		    votes must be honored by OSM.
+
+- qcom,llm-freq-up-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the LLM frequency up timer value in
+		    nanoseconds for each of the three clock domains managed by
+		    the OSM controller.
+
+- qcom,llm-freq-down-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the LLM frequency down timer value in
+		    nanoseconds for each of the three clock domains managed by
+		    the OSM controller.
+
+- qcom,enable-llm-volt-vote
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if Limits hardware voltage
+		    votes must be honored by OSM.
+
+- qcom,llm-volt-up-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the LLM voltage up timer value in
+		    nanoseconds for each of the three clock domains managed by
+		    the OSM controller.
+
+- qcom,llm-volt-down-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the LLM voltage down timer value in
+		    nanoseconds for each of the three clock domains managed by
+		    the OSM controller.
+
+- qcom,cc-reads
+	Usage:      optional
+	Value type: <integer>
+	Definition: Defines the number of times the cycle counters must be
+		    read to determine the performance level of each clock
+		    domain.
+
+- qcom,l-val-base
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the register addresses of the L_VAL
+		    control register for each of the three clock domains
+		    managed by the OSM controller.
+
+- qcom,apcs-pll-user-ctl
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the register addresses of the PLL
+		    user control register for each of the three clock domains
+		    managed by the OSM controller.
+
+- qcom,perfcl-apcs-apm-threshold-voltage
+	Usage:      required
+	Value type: <u32>
+	Definition: Specifies the APM threshold voltage in microvolts.  If the
+		    VDD_APCC supply voltage is above or at this level, then the
+		    APM is switched to use VDD_APCC.  If VDD_APCC is below
+		    this level, then the APM is switched to use VDD_MX.
+
+- qcom,apm-mode-ctl
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the register addresses of the APM
+		    control register for each of the two clusters managed
+		    by the OSM controller.
+
+- qcom,apm-status-ctrl
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the register addresses of the APM
+		    controller status register for each of the three clock
+		    domains managed by the OSM controller.
+
+- qcom,perfcl-isense-addr
+	Usage:      required
+	Value type: <u32>
+	Definition: Contains the ISENSE register address.
+
+- qcom,l3-mem-acc-addr
+	Usage:      required if qcom,osm-no-tz is specified
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the addresses of the mem-acc
+		    configuration registers for the L3 cluster.
+		    The array must contain exactly three elements.
+
+- qcom,pwrcl-mem-acc-addr
+	Usage:      required if qcom,osm-no-tz is specified
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the addresses of the mem-acc
+		    configuration registers for the Power cluster.
+		    The array must contain exactly three elements.
+
+- qcom,perfcl-mem-acc-addr
+	Usage:      required if qcom,osm-no-tz is specified
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the addresses of the mem-acc
+		    configuration registers for the Performance cluster.
+		    The array must contain exactly three elements.
+
+		    corresponding CPRh device.
+
+- qcom,perfcl-apcs-mem-acc-threshold-voltage
+	Usage:      optional
+	Value type: <u32>
+	Definition: Specifies the highest MEM ACC threshold voltage in
+		    microvolts for the Performance cluster.  This voltage is
+		    used to determine which MEM ACC setting is used for the
+		    highest frequencies.  If specified, the voltage must match
+		    the MEM ACC threshold voltage specified for the
+		    corresponding CPRh device.
+
+- qcom,apcs-cbc-addr
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the addresses of the APCS_CBC_ADDR
+		    registers for all three clock domains.
+
+- qcom,apcs-ramp-ctl-addr
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the addresses of the APCS_RAMP_CTL_ADDR
+		    registers for all three clock domains.
+
+- qcom,red-fsm-en
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the reduction FSM
+		    should be enabled.
+
+- qcom,boost-fsm-en
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the boost FSM should
+		    be enabled.
+
+- qcom,safe-fsm-en
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the safe FSM should
+		    be enabled.
+
+- qcom,ps-fsm-en
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the PS FSM should be
+		    enabled.
+
+- qcom,droop-fsm-en
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the droop FSM should
+		    be enabled.
+
+- qcom,set-c3-active
+	Usage:	    optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the cores in C3 are to
+		    be treated as active for core count calculations.
+
+- qcom,set-c2-active
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the cores in C2 are to
+		    be treated as active for core count calculations.
+
+- qcom,disable-cc-dvcs
+	Usage:	    optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if core count based DCVS is
+		    to be disabled.
+
+- qcom,apcs-pll-min-freq
+	Usage:	    required
+	Value type: <u32>
+	Definition: Contains the addresses of the RAILx_CLKDOMy_PLL_MIN_FREQ
+		    registers for the three clock domains.
+
+- clock-names
+	Usage:      required
+	Value type: <string>
+	Definition: Must be "aux_clk".
+
+- clocks
+	Usage:      required
+	Value type: <phandle>
+	Definition: Phandle to the aux clock device.
+
+Example:
+	clock_cpucc: qcom,cpucc@0x17d41000 {
+		compatible = "qcom,clk-cpu-osm";
+		reg = <0x17d41000 0x1400>,
+			<0x17d43000 0x1400>,
+			<0x17d45800 0x1400>,
+			<0x178d0000 0x1000>,
+			<0x178c0000 0x1000>,
+			<0x178b0000 0x1000>,
+			<0x17d42400 0x0c00>,
+			<0x17d44400 0x0c00>,
+			<0x17d46c00 0x0c00>,
+			<0x17810090 0x8>;
+		reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base",
+			"l3_pll", "pwrcl_pll", "perfcl_pll",
+			"l3_sequencer", "pwrcl_sequencer",
+			"perfcl_sequencer", "apps_itm_ctl";
+
+		vdd-l3-supply = <&apc0_l3_vreg>;
+		vdd-pwrcl-supply = <&apc0_pwrcl_vreg>;
+		vdd-perfcl-supply = <&apc1_perfcl_vreg>;
+
+		qcom,l3-speedbin0-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x501c0422 0x00002020 0x1 5 >,
+			<   729600000 0x501c0526 0x00002020 0x1 6 >,
+			<   806400000 0x501c062a 0x00002222 0x1 7 >,
+			<   883200000 0x4024072b 0x00002525 0x1 8 >,
+			<   960000000 0x40240832 0x00002828 0x2 9 >;
+
+		qcom,pwrcl-speedbin0-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x501c0422 0x00002020 0x1 5 >,
+			<   748800000 0x501c0527 0x00002020 0x1 6 >,
+			<   825600000 0x401c062b 0x00002222 0x1 7 >,
+			<   902400000 0x4024072f 0x00002626 0x1 8 >,
+			<   979200000 0x40240833 0x00002929 0x1 9 >,
+			<  1056000000 0x402c0937 0x00002c2c 0x1 10 >,
+			<  1132800000 0x402c0a3b 0x00002f2f 0x1 11 >,
+			<  1209600000 0x402c0b3f 0x00003333 0x1 12 >,
+			<  1286400000 0x40340c43 0x00003636 0x1 13 >,
+			<  1363200000 0x40340d47 0x00003939 0x1 14 >,
+			<  1440000000 0x403c0e4b 0x00003c3c 0x1 15 >,
+			<  1516800000 0x403c0f4f 0x00004040 0x2 16 >,
+			<  1593600000 0x403c1053 0x00004343 0x2 17 >;
+
+		qcom,perfcl-speedbin0-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x501c0422 0x00002020 0x1 5 >,
+			<   729600000 0x501c0526 0x00002020 0x1 6 >,
+			<   806400000 0x501c062a 0x00002222 0x1 7 >,
+			<   883200000 0x4024072b 0x00002525 0x1 8 >,
+			<   960000000 0x40240832 0x00002828 0x1 9 >,
+			<  1036800000 0x40240936 0x00002b2b 0x1 10 >,
+			<  1113600000 0x402c0a3a 0x00002e2e 0x1 11 >,
+			<  1190400000 0x402c0b3e 0x00003232 0x1 12 >,
+			<  1267200000 0x40340c42 0x00003535 0x1 13 >,
+			<  1344000000 0x40340d46 0x00003838 0x1 14 >,
+			<  1420800000 0x40340e4a 0x00003b3b 0x1 15 >,
+			<  1497600000 0x403c0f4e 0x00003e3e 0x1 16 >,
+			<  1574400000 0x403c1052 0x00004242 0x2 17 >,
+			<  1651200000 0x403c1156 0x00004545 0x2 18 >,
+			<  1728000000 0x4044125a 0x00004848 0x2 19 >,
+			<  1804800000 0x4044135e 0x00004b4b 0x2 20 >,
+			<  1881600000 0x404c1462 0x00004e4e 0x2 21 >,
+			<  1958400000 0x404c1566 0x00005252 0x3 22 >;
+
+		qcom,l3-min-cpr-vc-bin0 = <7>;
+		qcom,pwrcl-min-cpr-vc-bin0 = <6>;
+		qcom,perfcl-min-cpr-vc-bin0 = <7>;
+
+		qcom,up-timer =
+			<1000 1000 1000>;
+		qcom,down-timer =
+			<100000 100000 100000>;
+		qcom,pc-override-index =
+			<0 0 0>;
+		qcom,set-ret-inactive;
+		qcom,enable-llm-freq-vote;
+		qcom,llm-freq-up-timer =
+			<1000 1000 1000>;
+		qcom,llm-freq-down-timer =
+			<327675 327675 327675>;
+		qcom,enable-llm-volt-vote;
+		qcom,llm-volt-up-timer =
+			<1000 1000 1000>;
+		qcom,llm-volt-down-timer =
+			<327675 327675 327675>;
+		qcom,cc-reads = <10>;
+		qcom,cc-delay = <5>;
+		qcom,cc-factor = <100>;
+		qcom,osm-clk-rate = <100000000>;
+		qcom,xo-clk-rate = <19200000>;
+
+		qcom,l-val-base =
+			<0x178d0004 0x178c0004 0x178b0004>;
+		qcom,apcs-pll-user-ctl =
+			<0x178d000c 0x178c000c 0x178b000c>;
+		qcom,apcs-pll-min-freq =
+			<0x17d41094 0x17d43094 0x17d45894>;
+		qcom,apm-mode-ctl =
+			<0x0 0x0 0x17d20010>;
+		qcom,apm-status-ctrl =
+			<0x0 0x0 0x17d20000>;
+		qcom,perfcl-isense-addr = <0x17871480>;
+		qcom,l3-mem-acc-addr = <0x17990170 0x17990170 0x17990170>;
+		qcom,pwrcl-mem-acc-addr = <0x17990160 0x17990164 0x17990164>;
+		qcom,perfcl-mem-acc-addr = <0x17990168 0x1799016c 0x1799016c>;
+		qcom,cfg-gfmux-addr =<0x178d0084 0x178c0084 0x178b0084>;
+		qcom,apcs-cbc-addr = <0x178d008c 0x178c008c 0x178b008c>;
+		qcom,apcs-ramp-ctl-addr = <0x17840904 0x17840904 0x17830904>;
+
+		qcom,perfcl-apcs-apm-threshold-voltage = <800000>;
+		qcom,perfcl-apcs-mem-acc-threshold-voltage = <852000>;
+		qcom,boost-fsm-en;
+		qcom,safe-fsm-en;
+		qcom,ps-fsm-en;
+		qcom,droop-fsm-en;
+		qcom,osm-no-tz;
+		qcom,osm-pll-setup;
+
+		clock-names = "xo_ao";
+		clocks = <&clock_rpmh RPMH_CXO_CLK_A>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/clock/qcom,camcc.txt b/Documentation/devicetree/bindings/clock/qcom,camcc.txt
new file mode 100644
index 0000000..dc93b35
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,camcc.txt
@@ -0,0 +1,25 @@
+Qualcomm Technologies Camera Clock & Reset Controller Binding
+----------------------------------------------------
+
+Required properties :
+- compatible : shall contain "qcom,cam_cc-sdm845"
+- reg : shall contain base register location and length
+- reg-names: names of registers listed in the same order as in
+	     the reg property.
+- #clock-cells : shall contain 1
+- #reset-cells : shall contain 1
+
+Optional properties :
+- vdd_<rail>-supply: The logic rail supply.
+
+Example:
+	clock_camcc: qcom,camcc@ad00000 {
+		compatible = "qcom,cam_cc-sdm845";
+		reg = <0xad00000 0x10000>;
+		reg-names = "cc_base";
+		vdd_cx-supply = <&pm8998_s9_level>;
+		vdd_mx-supply = <&pm8998_s6_level>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
diff --git a/Documentation/devicetree/bindings/clock/qcom,dispcc.txt b/Documentation/devicetree/bindings/clock/qcom,dispcc.txt
new file mode 100644
index 0000000..92828e0
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,dispcc.txt
@@ -0,0 +1,23 @@
+Qualcomm Technologies, Inc. Display Clock & Reset Controller Binding
+----------------------------------------------------
+
+Required properties :
+- compatible : shall contain "qcom,dispcc-sdm845".
+- reg : shall contain base register location and length.
+- reg-names: names of registers listed in the same order as in
+	     the reg property.
+- #clock-cells : shall contain 1.
+- #reset-cells : shall contain 1.
+
+Optional properties :
+- vdd_<rail>-supply: The logic rail supply.
+
+Example:
+	clock_dispcc: qcom,dispcc@af00000 {
+		compatible = "qcom,dispcc-sdm845";
+		reg = <0xaf00000 0x100000>;
+		reg-names = "cc_base";
+		vdd_cx-supply = <&pm8998_s9_level>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
index 7405115..d95aa59 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
@@ -17,6 +17,7 @@
 			"qcom,gcc-msm8996"
 			"qcom,gcc-mdm9615"
 			"qcom,gcc-sdm845"
+			"qcom,debugcc-sdm845"
 
 - reg : shall contain base register location and length
 - #clock-cells : shall contain 1
diff --git a/Documentation/devicetree/bindings/clock/qcom,gpucc.txt b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
new file mode 100644
index 0000000..f214c58
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
@@ -0,0 +1,33 @@
+Qualcomm Technologies, Inc. Graphics Clock & Reset Controller Binding
+--------------------------------------------------------------------
+
+Required properties :
+- compatible : shall contain only one of the following:
+		"qcom,gpucc-sdm845",
+		"qcom,gfxcc-sdm845"
+
+- reg : shall contain base register offset and size.
+- #clock-cells : shall contain 1.
+- #reset-cells : shall contain 1.
+- #vdd_<rail>-supply : The logic rail supply.
+
+Optional properties :
+- #power-domain-cells : shall contain 1.
+
+Example:
+	clock_gfx: qcom,gfxcc@5090000 {
+		compatible = "qcom,gfxcc-sdm845";
+		reg = <0x5090000 0x9000>;
+		vdd_gfx-supply = <&pm8005_s1_level>;
+		vdd_mx-supply = <&pm8998_s6_level>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	clock_gpucc: qcom,gpucc@5090000 {
+		compatible = "qcom,gpucc-sdm845";
+		reg = <0x5090000 0x9000>;
+		vdd_cx-supply = <&pm8998_s9_level>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/cnss/icnss.txt b/Documentation/devicetree/bindings/cnss/icnss.txt
index 15feda3..c801e848 100644
--- a/Documentation/devicetree/bindings/cnss/icnss.txt
+++ b/Documentation/devicetree/bindings/cnss/icnss.txt
@@ -12,13 +12,22 @@
   - reg-names: Names of the memory regions defined in reg entry
   - interrupts: Copy engine interrupt table
   - qcom,wlan-msa-memory: MSA memory size
+  - clocks: List of clock phandles
+  - clock-names: List of clock names corresponding to the "clocks" property
   - iommus: SMMUs and corresponding Stream IDs needed by WLAN
   - qcom,wlan-smmu-iova-address: I/O virtual address range as <start length>
     format to be used for allocations associated between WLAN and SMMU
 
 Optional properties:
+  - <supply-name>-supply: phandle to the regulator device tree node
+			   optional "supply-name" is "vdd-0.8-cx-mx".
+  - qcom,<supply>-config: Specifies voltage levels for supply. Should be
+			   specified in pairs (min, max), units uV.  There can
+			   be optional load in uA and Regulator settle delay in
+			   uS.
   - qcom,icnss-vadc: VADC handle for vph_pwr read APIs.
   - qcom,icnss-adc_tm: VADC handle for vph_pwr notification APIs.
+  - qcom,smmu-s1-bypass: Boolean context flag to set SMMU to S1 bypass
 
 Example:
 
@@ -26,6 +35,8 @@
         compatible = "qcom,icnss";
         reg = <0x0a000000 0x1000000>;
         reg-names = "membase";
+        clocks = <&clock_gcc clk_aggre2_noc_clk>;
+        clock-names = "smmu_aggre2_noc_clk";
         iommus = <&anoc2_smmu 0x1900>,
                  <&anoc2_smmu 0x1901>;
         qcom,wlan-smmu-iova-address = <0 0x10000000>;
@@ -43,4 +54,7 @@
 		   <0 140 0 /* CE10 */ >,
 		   <0 141 0 /* CE11 */ >;
         qcom,wlan-msa-memory = <0x200000>;
+	qcom,smmu-s1-bypass;
+	vdd-0.8-cx-mx-supply = <&pm8998_l5>;
+	qcom,vdd-0.8-cx-mx-config = <800000 800000 2400 1000>;
     };
diff --git a/Documentation/devicetree/bindings/cpufreq/msm-cpufreq.txt b/Documentation/devicetree/bindings/cpufreq/msm-cpufreq.txt
new file mode 100644
index 0000000..9427123
--- /dev/null
+++ b/Documentation/devicetree/bindings/cpufreq/msm-cpufreq.txt
@@ -0,0 +1,47 @@
+Qualcomm MSM CPUfreq device
+
+msm-cpufreq is a device that represents the list of usable CPU frequencies
+and provides a device handle for the CPUfreq driver to get the CPU and cache
+clocks.
+
+Required properties:
+- compatible:		Must be "qcom,msm-cpufreq"
+- qcom,cpufreq-table, or qcom,cpufreq-table-<X>:
+			A list of usable CPU frequencies (KHz).
+			Use "qcom,cpufreq-table" if all CPUs in the system
+			should share same list of frequencies.
+			Use "qcom,cpufreq-table-<cpuid>" to describe
+			different CPU freq tables for different CPUs.
+			The table should be listed only for the first CPU
+			if multiple CPUs are synchronous.
+
+Optional properties:
+- clock-names:		When DT based binding of clock is available, this
+			provides a list of CPU subsystem clocks.
+			"cpuX_clk" for every CPU that's present.
+			"l2_clk" when an async cache/CCI is present.
+
+Optional properties:
+- qcom,governor-per-policy:	This property denotes that governor tunables
+				should be associated with each cpufreq policy
+				group instead of being global.
+
+Example:
+	qcom,msm-cpufreq {
+		compatible = "qcom,msm-cpufreq";
+		qcom,cpufreq-table =
+			<  300000 >,
+			<  422400 >,
+			<  652800 >,
+			<  729600 >,
+			<  883200 >,
+			<  960000 >,
+			< 1036800 >,
+			< 1190400 >,
+			< 1267200 >,
+			< 1497600 >,
+			< 1574400 >,
+			< 1728000 >,
+			< 1958400 >,
+			< 2265600 >;
+	};
diff --git a/Documentation/devicetree/bindings/devfreq/arm-memlat-mon.txt b/Documentation/devicetree/bindings/devfreq/arm-memlat-mon.txt
new file mode 100644
index 0000000..67dc991
--- /dev/null
+++ b/Documentation/devicetree/bindings/devfreq/arm-memlat-mon.txt
@@ -0,0 +1,31 @@
+ARM CPU memory latency monitor device
+
+arm-memlat-mon is a device that represents the use of the PMU in ARM cores
+to measure the parameters for latency driven memory access patterns.
+
+Required properties:
+- compatible:			Must be "qcom,arm-memlat-mon"
+- qcom,cpulist:			List of CPU phandles to be monitored in a cluster
+- qcom,target-dev:		The DT device that corresponds to this master port
+- qcom,core-dev-table:		A mapping table of core frequency to a required bandwidth vote at the
+				given core frequency.
+
+Optional properties:
+- qcom,cachemiss-ev:		The cache miss event that this monitor is supposed to measure.
+				Defaults to 0x17 if not specified.
+- qcom,inst-ev:			The instruction count event that this monitor is supposed to measure.
+				Defaults to 0x08 if not specified.
+
+
+Example:
+	qcom,arm-memlat-mon {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist = <&CPU0 &CPU1>;
+		qcom,target-dev = <&memlat0>;
+		qcom,cachemiss-ev = <0x2A>;
+		qcom,inst-ev = <0x08>;
+		qcom,core-dev-table =
+			<  300000 1525>,
+			<  499200 3143>,
+			< 1881600 5859>;
+	};
diff --git a/Documentation/devicetree/bindings/devfreq/bimc-bwmon.txt b/Documentation/devicetree/bindings/devfreq/bimc-bwmon.txt
new file mode 100644
index 0000000..c77f84b
--- /dev/null
+++ b/Documentation/devicetree/bindings/devfreq/bimc-bwmon.txt
@@ -0,0 +1,29 @@
+MSM BIMC bandwidth monitor device
+
+bimc-bwmon is a device that represents the MSM BIMC bandwidth monitors that
+can be used to measure the bandwidth of read/write traffic from the BIMC
+master ports. For example, the CPU subsystem sits on one BIMC master port.
+
+Required properties:
+- compatible:		Must be "qcom,bimc-bwmon", "qcom,bimc-bwmon2",
+			"qcom,bimc-bwmon3" or "qcom,bimc-bwmon4"
+- reg:			Pairs of physical base addresses and region sizes of
+			memory mapped registers.
+- reg-names:		Names of the bases for the above registers. Expected
+			bases are: "base", "global_base"
+- interrupts:		Lists the threshold IRQ.
+- qcom,mport:		The hardware master port that this device can monitor
+- qcom,target-dev:	The DT device that corresponds to this master port
+- qcom,hw-timer-hz:	Hardware sampling rate in Hz. This field must be
+			specified for "qcom,bimc-bwmon4"
+
+Example:
+	qcom,cpu-bwmon {
+		compatible = "qcom,bimc-bwmon";
+		reg = <0xfc388000 0x300>, <0xfc381000 0x200>;
+		reg-names = "base", "global_base";
+		interrupts = <0 183 1>;
+		qcom,mport = <0>;
+		qcom,target-dev = <&cpubw>;
+		qcom,hw-timer-hz = <19200000>;
+	};
diff --git a/Documentation/devicetree/bindings/devfreq/devbw.txt b/Documentation/devicetree/bindings/devfreq/devbw.txt
new file mode 100644
index 0000000..ece0fa7
--- /dev/null
+++ b/Documentation/devicetree/bindings/devfreq/devbw.txt
@@ -0,0 +1,39 @@
+MSM device bandwidth device
+
+devbw is a device that represents a MSM device's BW requirements from its
+master port(s) to a different device's slave port(s) in a MSM SoC. This
+device is typically used to vote for BW requirements from a device's (Eg:
+CPU, GPU) master port(s) to the slave (Eg: DDR) port(s).
+
+Required properties:
+- compatible:		Must be "qcom,devbw"
+- qcom,src-dst-ports:	A list of tuples where each tuple consists of a bus
+			master port number and a bus slave port number.
+- qcom,bw-tbl:		A list of meaningful instantaneous bandwidth values
+			(in MB/s) that can be requested from the device
+			master port to the slave port. The list of values
+			depend on the supported bus/slave frequencies and the
+			bus width.
+
+Optional properties:
+- qcom,active-only:	Indicates that the bandwidth votes need to be
+			enforced only when the CPU subsystem is active.
+- governor:		Initial governor to use for the device.
+			Default: "performance"
+
+Example:
+
+	qcom,cpubw {
+		compatible = "qcom,devbw";
+		qcom,src-dst-ports = <1 512>, <2 512>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			<  572 /*  75 MHz */ >,
+			< 1144 /* 150 MHz */ >,
+			< 1525 /* 200 MHz */ >,
+			< 2342 /* 307 MHz */ >,
+			< 3509 /* 460 MHz */ >,
+			< 4684 /* 614 MHz */ >,
+			< 6103 /* 800 MHz */ >,
+			< 7102 /* 931 MHz */ >;
+	};
diff --git a/Documentation/devicetree/bindings/devfreq/devfreq-cpufreq.txt b/Documentation/devicetree/bindings/devfreq/devfreq-cpufreq.txt
new file mode 100644
index 0000000..6537538
--- /dev/null
+++ b/Documentation/devicetree/bindings/devfreq/devfreq-cpufreq.txt
@@ -0,0 +1,53 @@
+Devfreq CPUfreq governor
+
+devfreq-cpufreq is a parent device that contains one or more child devices.
+Each child device provides CPU frequency to device frequency mapping for a
+specific device. Examples of devices that could use this are: DDR, cache and
+CCI.
+
+Parent device name shall be "devfreq-cpufreq".
+
+Required child device properties:
+- cpu-to-dev-map, or cpu-to-dev-map-<X>:
+			A list of tuples where each tuple consists of a
+			CPU frequency (KHz) and the corresponding device
+			frequency. CPU frequencies not listed in the table
+			will use the device frequency that corresponds to the
+			next rounded up CPU frequency.
+			Use "cpu-to-dev-map" if all CPUs in the system should
+			share same mapping.
+			Use cpu-to-dev-map-<cpuid> to describe different
+			mappings for different CPUs. The property should be
+			listed only for the first CPU if multiple CPUs are
+			synchronous.
+- target-dev:		Phandle to device that this mapping applies to.
+
+Example:
+	devfreq-cpufreq {
+		cpubw-cpufreq {
+			target-dev = <&cpubw>;
+			cpu-to-dev-map =
+				<  300000  1144 >,
+				<  422400  2288 >,
+				<  652800  3051 >,
+				<  883200  5996 >,
+				< 1190400  8056 >,
+				< 1497600 10101 >,
+				< 1728000 12145 >,
+				< 2649600 16250 >;
+		};
+
+		cache-cpufreq {
+			target-dev = <&cache>;
+			cpu-to-dev-map =
+				<  300000  300000 >,
+				<  422400  422400 >,
+				<  652800  499200 >,
+				<  883200  576000 >,
+				<  960000  960000 >,
+				< 1497600 1036800 >,
+				< 1574400 1574400 >,
+				< 1728000 1651200 >,
+				< 2649600 1728000 >;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/devfreq/devfreq-simple-dev.txt b/Documentation/devicetree/bindings/devfreq/devfreq-simple-dev.txt
new file mode 100644
index 0000000..d00ebd8
--- /dev/null
+++ b/Documentation/devicetree/bindings/devfreq/devfreq-simple-dev.txt
@@ -0,0 +1,48 @@
+Devfreq simple device
+
+devfreq-simple-dev is a device that represents a simple device that cannot do
+any status reporting and uses a clock that can be scaled by one of more
+devfreq governors.  It provides a list of usable frequencies for the device
+and some additional optional parameters.
+
+Required properties:
+- compatible:		Must be "devfreq-simple-dev"
+- clock-names:		Must be "devfreq_clk"
+- clocks:		Must refer to the clock that's fed to the device.
+- freq-tbl-khz:		A list of usable frequencies (in KHz) for the device
+			clock.
+Optional properties:
+- polling-ms:	Polling interval for the device in milliseconds. Default: 50
+- governor:	Initial governor to user for the device. Default: "performance"
+- qcom,prepare-clk:	Prepare the device clock during initialization.
+
+Example:
+
+	qcom,cache {
+		compatible = "devfreq-simple-dev";
+		clock-names = "devfreq_clk";
+		clocks = <&clock_krait clk_l2_clk>;
+		polling-ms = 50;
+		governor = "cpufreq";
+		freq-tbl-khz =
+			<  300000 >,
+			<  345600 >,
+			<  422400 >,
+			<  499200 >,
+			<  576000 >,
+			<  652800 >,
+			<  729600 >,
+			<  806400 >,
+			<  883200 >,
+			<  960000 >,
+			< 1036800 >,
+			< 1113600 >,
+			< 1190400 >,
+			< 1267200 >,
+			< 1344000 >,
+			< 1420800 >,
+			< 1497600 >,
+			< 1574400 >,
+			< 1651200 >,
+			< 1728000 >;
+	};
diff --git a/Documentation/devicetree/bindings/devfreq/m4m-hwmon.txt b/Documentation/devicetree/bindings/devfreq/m4m-hwmon.txt
new file mode 100644
index 0000000..d063089
--- /dev/null
+++ b/Documentation/devicetree/bindings/devfreq/m4m-hwmon.txt
@@ -0,0 +1,22 @@
+MSM M4M hardware monitor device
+
+m4m-hwmon is a device that represents the MSM M4M hardware monitors
+that can be used to measure the various types of requests in the MSM M4M.
+
+Required properties:
+- compatible:		Must be "qcom,m4m-hwmon"
+- reg:			Pairs of physical base addresses and region sizes of
+			memory mapped registers.
+- interrupts:		Lists the threshold IRQ.
+- qcom,counter-event-sel: Array of counter and event selection values.
+- qcom,target-dev:	The DT device that is monitored by this MSM M4M
+			counter configuration.
+
+Example:
+	qcom,m4m-hwmon {
+		compatible = "qcom,m4m-hwmon";
+		reg = <0x6530000 0x160>;
+		interrupts = <0 19 4>;
+		qcom,counter-event-sel = <4 0x100>;
+		qcom,target-dev = <&m4m_cache>;
+	};
diff --git a/Documentation/devicetree/bindings/devfreq/msmcci-hwmon.txt b/Documentation/devicetree/bindings/devfreq/msmcci-hwmon.txt
new file mode 100644
index 0000000..bce08a3
--- /dev/null
+++ b/Documentation/devicetree/bindings/devfreq/msmcci-hwmon.txt
@@ -0,0 +1,28 @@
+MSM CCI hardware monitor device
+
+msmcci-hwmon is a device that represents the MSM CCI hardware monitors
+that can be used to measure the various types of requests in the MSM CCI.
+
+Required properties:
+- compatible:		Must be "qcom,msmcci-hwmon"
+- reg:			Pairs of physical base addresses and region sizes of
+			memory mapped registers.
+- interrupts:		Lists the threshold IRQ.
+- qcom,counter-event-sel:	Array of event selection values.
+- qcom,target-dev:	The DT device that is monitored by this MSM CCI
+			counter configuration.
+
+Optional properties:
+- qcom,secure_io	Indicates register access are secured.
+- qcom,shared-irq       Indicates ccci-hwmon counters share the interrupt.
+
+Example:
+	qcom,msmcci-hwmon {
+		compatible = "qcom,msmcci-hwmon";
+		reg = <0xf910f000 0xb0>,
+		      <0xf910f004 0xb0>;
+		interrupts = <0 345 4>,
+			     <0 346 4>;
+		qcom,counter-event-sel = <1 2>;
+		qcom,target-dev = <&cci_cache>;
+	};
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index c38b45c..bc226a7 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -279,6 +279,9 @@
 - qcom,sde-data-bus:		Property to provide Bus scaling for data bus access for
 				mdss blocks.
 
+- qcom,sde-inline-rotator:	A 2 cell property, with format of (rotator phandle,
+				instance id), of inline rotator device.
+
 Bus Scaling Data:
 - qcom,msm-bus,name:		String property describing client name.
 - qcom,msm-bus,num-cases:	This is the number of Bus Scaling use cases
@@ -492,6 +495,8 @@
         compatible = "qcom,msm-hdmi-audio-codec-rx";
     };
 
+    qcom,sde-inline-rotator = <&mdss_rotator 0>;
+
     qcom,platform-supply-entries {
        #address-cells = <1>;
        #size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
index 62efecc..3e7fcb7 100644
--- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
@@ -341,6 +341,28 @@
 					2A/2B command.
 - qcom,dcs-cmd-by-left:			Boolean to indicate that dcs command are sent
 					through the left DSI controller only in a dual-dsi configuration
+- qcom,mdss-dsi-panel-hdr-enabled:      Boolean to indicate HDR support in panel.
+- qcom,mdss-dsi-panel-hdr-color-primaries:
+                                        Array of 8 unsigned integers denoting chromaticity of panel.These
+                                        values are specified in nits units. The value range is 0 through 50000.
+                                        To obtain real chromacity, these values should be divided by factor of
+                                        50000. The structure of array is defined in below order
+                                        value 1: x value of white chromaticity of display panel
+                                        value 2: y value of white chromaticity of display panel
+                                        value 3: x value of red chromaticity of display panel
+                                        value 4: y value of red chromaticity of display panel
+                                        value 5: x value of green chromaticity of display panel
+                                        value 6: y value of green chromaticity of display panel
+                                        value 7: x value of blue chromaticity of display panel
+                                        value 8: y value of blue chromaticity of display panel
+- qcom,mdss-dsi-panel-peak-brightness:  Maximum brightness supported by panel.In absence of maximum value
+                                        typical value becomes peak brightness. Value is specified in nits units.
+                                        To obtain real peak brightness, this value should be divided by factor of
+                                        10000.
+- qcom,mdss-dsi-panel-blackness-level:  Blackness level supported by panel. Blackness level is defined as
+                                        ratio of peak brightness to contrast. Value is specified in nits units.
+                                        To obtain real blackness level, this value should be divided by factor of
+                                        10000.
 - qcom,mdss-dsi-lp11-init:		Boolean used to enable the DSI clocks and data lanes (low power 11)
 					before issuing hardware reset line.
 - qcom,mdss-dsi-init-delay-us:		Delay in microseconds(us) before performing any DSI activity in lp11
diff --git a/Documentation/devicetree/bindings/fb/mdss-pll.txt b/Documentation/devicetree/bindings/fb/mdss-pll.txt
index b028dda..d0d7fff 100644
--- a/Documentation/devicetree/bindings/fb/mdss-pll.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-pll.txt
@@ -15,7 +15,7 @@
                         "qcom,mdss_hdmi_pll_8996_v2", "qcom,mdss_dsi_pll_8996_v2",
                         "qcom,mdss_hdmi_pll_8996_v3", "qcom,mdss_hdmi_pll_8996_v3_1p8",
                         "qcom,mdss_edp_pll_8996_v3", "qcom,mdss_edp_pll_8996_v3_1p8",
-                        "qcom,mdss_dsi_pll_8998", "qcom,mdss_dp_pll_8998",
+                        "qcom,mdss_dsi_pll_10nm", "qcom,mdss_dp_pll_8998",
                         "qcom,mdss_hdmi_pll_8998"
 - cell-index:		Specifies the controller used
 - reg:			offset and length of the register set for the device.
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index b6544961..f4b6013 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -152,6 +152,11 @@
 				baseAddr - base address of the gpu channels in the qdss stm memory region
 				size     - size of the gpu stm region
 
+- qcom,gpu-qtimer:
+				<baseAddr size>
+				baseAddr - base address of the qtimer memory region
+				size     - size of the qtimer region
+
 - qcom,tsens-name:
 				Specify the name of GPU temperature sensor. This name will be used
 				to get the temperature from the thermal driver API.
diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt b/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
index f6b7552..a244d6c 100644
--- a/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
+++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
@@ -6,6 +6,10 @@
    * "qcom,i2c-geni.
  - reg: Should contain QUP register address and length.
  - interrupts: Should contain I2C interrupt.
+ - clocks: Serial engine core clock, and AHB clocks needed by the device.
+ - pinctrl-names/pinctrl-0/1: The GPIOs assigned to this core. The names
+   should be "active" and "sleep" for the pin confuguration when core is active
+   or when entering sleep state.
  - #address-cells: Should be <1> Address cells for i2c device address
  - #size-cells: Should be <0> as i2c addresses have no size component
 
@@ -17,6 +21,13 @@
 	compatible = "qcom,i2c-geni";
 	reg = <0xa94000 0x4000>;
 	interrupts = <GIC_SPI 358 0>;
+	clock-names = "se-clk", "m-ahb", "s-ahb";
+	clocks = <&clock_gcc GCC_QUPV3_WRAP0_S5_CLK>,
+		<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+		<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&qup_1_i2c_5_active>;
+	pinctrl-1 = <&qup_1_i2c_5_sleep>;
 	#address-cells = <1>;
 	#size-cells = <0>;
 };
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index ef6c04a..2d971b7a 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -25,6 +25,8 @@
 
 - reg           : Base address and size of the SMMU.
 
+- reg-names	: For the "qcom,qsmmu-v500" device "tcu-base" is expected.
+
 - #global-interrupts : The number of global interrupts exposed by the
                        device.
 
@@ -176,6 +178,9 @@
 		"base" is the main TBU register region.
 		"status-reg" indicates whether hw can process a new request.
 
+-qcom,stream-id-range:
+		Pair of values describing the smallest supported stream-id
+		and the size of the entire set.
 
 Example:
 smmu {
@@ -186,5 +191,6 @@
 			<0x2000 0x8>;
 		reg-names = "base",
 			"status-reg";
+		qcom,stream-id-range = <0x800 0x400>;
 	};
 };
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt
index a77a291..1e6aac5 100644
--- a/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt
@@ -75,6 +75,9 @@
 - qcom,lcd-auto-pfm-thresh	: Specify the auto-pfm threshold, if the headroom voltage level
 				  falls below this threshold and auto PFM is enabled, boost
 				  controller will enter into PFM mode automatically.
+- qcom,lcd-psm-ctrl	: A boolean property to specify if PSM needs to be
+			  controlled dynamically when WLED module is enabled
+			  or disabled.
 
 Optional properties if 'qcom,disp-type-amoled' is mentioned in DT:
 - qcom,loop-comp-res-kohm	: control to select the compensation resistor in kohm. default is 320.
diff --git a/Documentation/devicetree/bindings/mailbox/qcom-tcs.txt b/Documentation/devicetree/bindings/mailbox/qcom-tcs.txt
index 4ef34bf..318ef30 100644
--- a/Documentation/devicetree/bindings/mailbox/qcom-tcs.txt
+++ b/Documentation/devicetree/bindings/mailbox/qcom-tcs.txt
@@ -81,6 +81,10 @@
 		ACTIVE_TCS
 		CONTROL_TCS
 	- Cell #2 (Number of TCS): <u32>
+- label:
+	Usage: optional
+	Value type: <string>
+	Definition: Name for the mailbox. The name would be used in trace logs.
 
 EXAMPLE 1:
 
@@ -92,6 +96,7 @@
 
 	apps_rsc: mailbox@179e000 {
 		compatible = "qcom,tcs_drv";
+		label = "apps_rsc";
 		reg = <0x179E0000 0x10000>, <0x179E0D00 0x3000>;
 		interrupts = <0 5 0>;
 		#mbox-cells = <1>;
@@ -110,17 +115,18 @@
 Second tuple: 0xAF20000 + 0x1C00
 
 	disp_rsc: mailbox@af20000 {
-			status = "disabled";
-			compatible = "qcom,tcs-drv";
-			reg = <0xAF20000 0x10000>, <0xAF21C00 0x3000>;
-			interrupts = <0 129 0>;
-			#mbox-cells = <1>;
-			qcom,drv-id = <0>;
-			qcom,tcs-config = <SLEEP_TCS 1>,
-					<WAKE_TCS    1>,
-					<ACTIVE_TCS  0>,
-					<CONTROL_TCS 1>;
-		};
+		status = "disabled";
+		label = "disp_rsc";
+		compatible = "qcom,tcs-drv";
+		reg = <0xAF20000 0x10000>, <0xAF21C00 0x3000>;
+		interrupts = <0 129 0>;
+		#mbox-cells = <1>;
+		qcom,drv-id = <0>;
+		qcom,tcs-config = <SLEEP_TCS 1>,
+				<WAKE_TCS    1>,
+				<ACTIVE_TCS  0>,
+				<CONTROL_TCS 1>;
+	};
 
 
 CLIENT:
diff --git a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
index bd35d80..0295e1b 100644
--- a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
@@ -15,6 +15,8 @@
 - clocks:		List of Phandles for clock device nodes
 			needed by the device.
 - clock-names:		List of clock names needed by the device.
+- #list-cells:		Number of rotator cells, must be 1
+
 Bus Scaling Data:
 - qcom,msm-bus,name:		String property describing rotator client.
 - qcom,msm-bus,num-cases:	This is the the number of Bus Scaling use cases
@@ -81,6 +83,17 @@
 				  priority for rotator clients.
 - qcom,mdss-rot-mode:		This is integer value indicates operation mode
 				of the rotator device
+- qcom,mdss-sbuf-headroom:	This integer value indicates stream buffer headroom in lines.
+- qcom,mdss-rot-linewidth:	This integer value indicates rotator line width supported in pixels.
+- cache-slice-names:		A set of names that identify the usecase names of a client that uses
+				cache slice. These strings are used to look up the cache slice
+				entries by name.
+- cache-slices:			The tuple has phandle to llcc device as the first argument and the
+				second argument is the usecase id of the client.
+- qcom,sde-ubwc-malsize:	A u32 property to specify the default UBWC
+				minimum allowable length configuration value.
+- qcom,sde-ubwc-swizzle:	A u32 property to specify the default UBWC
+				swizzle configuration value.
 
 Subnode properties:
 - compatible:		Compatible name used in smmu v2.
@@ -102,6 +115,9 @@
 		reg = <0xfd900000 0x22100>,
 			<0xfd925000 0x1000>;
 		reg-names = "mdp_phys", "rot_vbif_phys";
+
+		#list-cells = <1>;
+
 		interrupt-parent = <&mdss_mdp>;
 		interrupts = <2 0>;
 
@@ -115,6 +131,8 @@
 		clock-names = "iface_clk", "rot_core_clk";
 
 		qcom,mdss-highest-bank-bit = <0x2>;
+		qcom,sde-ubwc-malsize = <0>;
+		qcom,sde-ubwc-swizzle = <1>;
 
 		/* Bus Scale Settings */
 		qcom,msm-bus,name = "mdss_rotator";
@@ -131,6 +149,10 @@
 		qcom,mdss-default-ot-rd-limit = <8>;
 		qcom,mdss-default-ot-wr-limit = <16>;
 
+		qcom,mdss-sbuf-headroom = <20>;
+		cache-slice-names = "rotator";
+		cache-slices = <&llcc 4>;
+
 		smmu_rot_unsec: qcom,smmu_rot_unsec_cb {
 			compatible = "qcom,smmu_sde_rot_unsec";
 			iommus = <&mdp_smmu 0xe00>;
diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
index b894c31..6d72e8b 100644
--- a/Documentation/devicetree/bindings/media/video/msm-vidc.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
@@ -16,25 +16,6 @@
 - interrupts : should contain the vidc interrupt.
 - qcom,platform-version : mask and shift of the platform version bits
     in efuse register.
-- qcom,load-freq-tbl : load (in macroblocks/sec) and corresponding vcodec
-  clock required along with codec's config, which is a bitmap that describes
-  what the clock is used for. The bitmaps are as follows:
-    supports mvc encoder = 0x00000001
-    supports mvc decoder = 0x00000003
-    supports h264 encoder = 0x00000004
-    supports h264 decoder = 0x0000000c
-    supports mpeg1 encoder = 0x00000040
-    supports mpeg1 decoder = 0x000000c0
-    supports mpeg2 encoder = 0x00000100
-    supports mpeg2 decoder = 0x00000300
-    supports vp6 encoder = 0x00100000
-    supports vp6 decoder = 0x00300000
-    supports vp7 encoder = 0x00400000
-    supports vp7 decoder = 0x00c00000
-    supports vp8 encoder = 0x01000000
-    supports vp8 decoder = 0x03000000
-    supports hevc encoder = 0x04000000
-    supports hevc decoder = 0x0c000000
 - qcom,reg-presets : list of offset-value pairs for registers to be written.
   The offsets are from the base offset specified in 'reg'. This is mainly
   used for QoS, VBIF, etc. presets for video.
@@ -57,9 +38,26 @@
 - qcom,clock-freq-tbl = node containing individual domain nodes, each with:
      - qcom,codec-mask: a bitmap of supported codec types, every two bits
        represents a codec type.
+         supports mvc encoder = 0x00000001
+         supports mvc decoder = 0x00000003
+         supports h264 encoder = 0x00000004
+         supports h264 decoder = 0x0000000c
+         supports mpeg1 encoder = 0x00000040
+         supports mpeg1 decoder = 0x000000c0
+         supports mpeg2 encoder = 0x00000100
+         supports mpeg2 decoder = 0x00000300
+         supports vp6 encoder = 0x00100000
+         supports vp6 decoder = 0x00300000
+         supports vp7 encoder = 0x00400000
+         supports vp7 decoder = 0x00c00000
+         supports vp8 encoder = 0x01000000
+         supports vp8 decoder = 0x03000000
+         supports hevc encoder = 0x04000000
+         supports hevc decoder = 0x0c000000
      - qcom,cycles-per-mb: number of cycles required to process each macro
        block.
-     - qcom,low-power-mode-factor: the factor which needs to be multiple with
+     - qcom,low-power-cycles-per-mb: number of cycles required to process each
+       macro block in low power mode.
        the required frequency to get the final frequency, the factor is
        represented in Q16 format.
 - qcom,sw-power-collapse = A bool indicating if video hardware core can be
@@ -167,13 +165,6 @@
 		venus-supply = <&gdsc>;
 		venus-core0-supply = <&gdsc1>;
 		venus-core1-supply = <&gdsc2>;
-		qcom,load-freq-tbl =
-			<489600 266670000 0x030fcfff>, /* Legacy decoder 1080p 60fps  */
-			<108000 133330000 0x030fcfff>, /* Legacy decoder 720p 30fps   */
-			<108000 200000000 0x01000414>, /* Legacy encoder 720p 30fps   */
-			<72000 133330000 0x0c000000>, /* HEVC decoder VGA 60fps   */
-			<36000 133330000 0x0c000000>, /* HEVC VGA 30 fps  */
-			<36000 133330000 0x01000414>; /* Legacy encoder VGA 30 fps   */
 		qcom,hfi-version = "3xx";
 		qcom,reg-presets = <0x80004 0x1>,
 			<0x80178 0x00001FFF>;
@@ -190,6 +181,7 @@
 		qcom,use-non-secure-pil;
 		qcom,use_dynamic_bw_update;
 		qcom,fw-bias = <0xe000000>;
+		qcom,allowed-clock-rates = <200000000 300000000 400000000>;
 		msm_vidc_cb1: msm_vidc_cb1 {
 			compatible = "qcom,msm-vidc,context-bank";
 			label = "venus_ns";
diff --git a/Documentation/devicetree/bindings/misc/qpnp-misc.txt b/Documentation/devicetree/bindings/misc/qpnp-misc.txt
new file mode 100644
index 0000000..a34cbde
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/qpnp-misc.txt
@@ -0,0 +1,25 @@
+QPNP-MISC
+
+QPNP-MISC provides a way to read the PMIC part number and revision.
+
+Required properties:
+- compatible : should be "qcom,qpnp-misc"
+- reg : offset and length of the PMIC peripheral register map.
+
+Optional properties:
+- qcom,pwm-sel:			Select PWM source. Possible values:
+				0: LOW
+				1: PWM1_in
+				2: PWM2_in
+				3: PWM1_in & PWM2_in
+- qcom,enable-gp-driver:	Enable the GP driver. Should only be specified
+				if a non-zero PWM source is specified under
+				"qcom,pwm-sel" property.
+
+Example:
+	qcom,misc@900 {
+		compatible = "qcom,qpnp-misc";
+		reg = <0x900 0x100>;
+		qcom,pwm-sel = <2>;
+		qcom,enable-gp-driver;
+	};
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
index 485483a..6111c88 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
@@ -1,55 +1,81 @@
-* Qualcomm SDHCI controller (sdhci-msm)
+Qualcomm Technologies, Inc. Standard Secure Digital Host Controller (SDHC)
 
-This file documents differences between the core properties in mmc.txt
-and the properties used by the sdhci-msm driver.
+Secure Digital Host Controller provides standard host interface to SD/MMC/SDIO cards.
 
 Required properties:
-- compatible: Should contain "qcom,sdhci-msm-v4".
-- reg: Base address and length of the register in the following order:
-	- Host controller register map (required)
-	- SD Core register map (required)
-- interrupts: Should contain an interrupt-specifiers for the interrupts:
-	- Host controller interrupt (required)
-- pinctrl-names: Should contain only one value - "default".
-- pinctrl-0: Should specify pin control groups used for this controller.
-- clocks: A list of phandle + clock-specifier pairs for the clocks listed in clock-names.
-- clock-names: Should contain the following:
-	"iface" - Main peripheral bus clock (PCLK/HCLK - AHB Bus clock) (required)
-	"core"	- SDC MMC clock (MCLK) (required)
-	"bus"	- SDCC bus voter clock (optional)
+  - compatible : should be "qcom,sdhci-msm"
+  - reg : should contain SDHC, SD Core register map.
+  - reg-names : indicates various resources passed to driver (via reg proptery) by name.
+		Required "reg-names" are "hc_mem" and "core_mem"
+  - interrupts : should contain SDHC interrupts.
+  - interrupt-names : indicates interrupts passed to driver (via interrupts property) by name.
+		      Required "interrupt-names" are "hc_irq" and "pwr_irq".
+  - <supply-name>-supply: phandle to the regulator device tree node
+			  Required "supply-name" are "vdd" and "vdd-io".
+
+Required alias:
+- The slot number is specified via an alias with the following format
+	'sdhc{n}' where n is the slot number.
+
+Optional Properties:
+	- interrupt-names - "status_irq". This status_irq will be used for card
+			     detection.
+	- qcom,bus-width - defines the bus I/O width that controller supports.
+			   Units - number of bits. The valid bus-width values are
+			   1, 4 and 8.
+	- qcom,nonremovable - specifies whether the card in slot is
+			      hot pluggable or hard wired.
+	- qcom,bus-speed-mode - specifies supported bus speed modes by host.
+				The supported bus speed modes are :
+				"HS200_1p8v" - indicates that host can support HS200 at 1.8v.
+				"HS200_1p2v" - indicates that host can support HS200 at 1.2v.
+				"DDR_1p8v" - indicates that host can support DDR mode at 1.8v.
+				"DDR_1p2v" - indicates that host can support DDR mode at 1.2v.
+
+In the following, <supply> can be vdd (flash core voltage) or vdd-io (I/O voltage).
+	- qcom,<supply>-always-on - specifies whether supply should be kept "on" always.
+	- qcom,<supply>-lpm_sup - specifies whether supply can be kept in low power mode (lpm).
+	- qcom,<supply>-voltage_level - specifies voltage levels for supply. Should be
+					specified in pairs (min, max), units uV.
+	- qcom,<supply>-current_level - specifies load levels for supply in lpm or
+					high power mode (hpm). Should be specified in
+					pairs (lpm, hpm), units uA.
+
+	- gpios - specifies gpios assigned for sdhc slot.
+	- qcom,gpio-names -  a list of strings that map in order to the list of gpios
 
 Example:
 
-	sdhc_1: sdhci@f9824900 {
-		compatible = "qcom,sdhci-msm-v4";
-		reg = <0xf9824900 0x11c>, <0xf9824000 0x800>;
-		interrupts = <0 123 0>;
-		bus-width = <8>;
-		non-removable;
-
-		vmmc-supply = <&pm8941_l20>;
-		vqmmc-supply = <&pm8941_s3>;
-
-		pinctrl-names = "default";
-		pinctrl-0 = <&sdc1_clk &sdc1_cmd &sdc1_data>;
-
-		clocks = <&gcc GCC_SDCC1_APPS_CLK>, <&gcc GCC_SDCC1_AHB_CLK>;
-		clock-names = "core", "iface";
+	aliases {
+		sdhc1 = &sdhc_1;
 	};
 
-	sdhc_2: sdhci@f98a4900 {
-		compatible = "qcom,sdhci-msm-v4";
-		reg = <0xf98a4900 0x11c>, <0xf98a4000 0x800>;
-		interrupts = <0 125 0>;
-		bus-width = <4>;
-		cd-gpios = <&msmgpio 62 0x1>;
+	sdhc_1: qcom,sdhc@f9824900 {
+		compatible = "qcom,sdhci-msm";
+                reg = <0xf9824900 0x11c>, <0xf9824000 0x800>;
+                reg-names = "hc_mem", "core_mem";
+                interrupts = <0 123 0>, <0 138 0>;
+                interrupt-names = "hc_irq", "pwr_irq";
 
-		vmmc-supply = <&pm8941_l21>;
-		vqmmc-supply = <&pm8941_l13>;
+		vdd-supply = <&pm8941_l21>;
+		vdd-io-supply = <&pm8941_l13>;
+		qcom,vdd-voltage-level = <2950000 2950000>;
+		qcom,vdd-current-level = <9000 800000>;
 
-		pinctrl-names = "default";
-		pinctrl-0 = <&sdc2_clk &sdc2_cmd &sdc2_data>;
+		qcom,vdd-io-always-on;
+		qcom,vdd-io-lpm-sup;
+		qcom,vdd-io-voltage-level = <1800000 2950000>;
+		qcom,vdd-io-current-level = <6 22000>;
 
-		clocks = <&gcc GCC_SDCC2_APPS_CLK>, <&gcc GCC_SDCC2_AHB_CLK>;
-		clock-names = "core", "iface";
+                qcom,bus-width = <4>;
+		qcom,nonremovable;
+		qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+
+		gpios = <&msmgpio 40 0>, /* CLK */
+			<&msmgpio 39 0>, /* CMD */
+			<&msmgpio 38 0>, /* DATA0 */
+			<&msmgpio 37 0>, /* DATA1 */
+			<&msmgpio 36 0>, /* DATA2 */
+			<&msmgpio 35 0>; /* DATA3 */
+		qcom,gpio-names = "CLK", "CMD", "DAT0", "DAT1", "DAT2", "DAT3";
 	};
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,lpi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,lpi-pinctrl.txt
new file mode 100644
index 0000000..57510ec
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,lpi-pinctrl.txt
@@ -0,0 +1,154 @@
+Qualcomm Technologies, Inc. LPI GPIO controller driver
+
+This DT bindings describes the GPIO controller driver
+being added for supporting LPI (Low Power Island) TLMM
+from QTI chipsets.
+
+Following properties are for LPI GPIO controller device main node.
+- compatible:
+	Usage: required
+	Value type: <string>
+	Definition: must be "qcom,lpi-pinctrl"
+
+- reg:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: Register base of the GPIO controller and length.
+
+- qcom,num-gpios:
+	Usage: required
+	Value type: <u32>
+	Definition: Number of GPIOs supported by the controller.
+
+- gpio-controller:
+	Usage: required
+	Value type: <none>
+	Definition: Used to mark the device node as a GPIO controller.
+
+- #gpio-cells:
+	Usage: required
+	Value type: <u32>
+	Definition: Must be 2;
+		    The first cell will be used to define gpio number and the
+		    second denotes the flags for this gpio.
+
+Please refer to ../gpio/gpio.txt for general description of GPIO bindings.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+The pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin or a list of pins. This configuration can include the
+mux function to select on those pin(s), and various pin configuration
+parameters, as listed below.
+
+SUBNODES:
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function.
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pin configuration subnode:
+
+- pins:
+	Usage: required
+	Value type: <string-array>
+	Definition: List of gpio pins affected by the properties specified in
+		    this subnode.  Valid pins are: gpio0-gpio31 for LPI.
+
+- function:
+	Usage: required
+	Value type: <string>
+	Definition: Specify the alternative function to be configured for the
+		    specified pins. Valid values are:
+			"gpio",
+			"func1",
+			"func2",
+			"func3",
+			"func4",
+			"func5"
+
+- bias-disable:
+	Usage: optional
+	Value type: <none>
+	Definition: The specified pins should be configured as no pull.
+
+- bias-pull-down:
+	Usage: optional
+	Value type: <none>
+	Definition: The specified pins should be configured as pull down.
+
+- bias-bus-hold:
+	Usage: optional
+	Value type: <none>
+	Definition: The specified pins should be configured as bus-keeper mode.
+
+- bias-pull-up:
+	Usage: optional
+	Value type: <empty>
+	Definition: The specified pins should be configured as pull up.
+
+- input-enable:
+	Usage: optional
+	Value type: <none>
+	Definition: The specified pins are put in input mode.
+
+- output-high:
+	Usage: optional
+	Value type: <none>
+	Definition: The specified pins are configured in output mode, driven
+		    high.
+
+- output-low:
+	Usage: optional
+	Value type: <none>
+	Definition: The specified pins are configured in output mode, driven
+		    low.
+
+- qcom,drive-strength:
+	Usage: optional
+	Value type: <u32>
+	Definition: Selects the drive strength for the specified pins.
+
+Example:
+
+	lpi_tlmm: lpi_pinctrl@152c000 {
+		compatible = "qcom,lpi-pinctrl";
+		qcom,num-gpios = <32>;
+		reg = <0x152c000 0>;
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		hph_comp_active: hph_comp_active {
+			mux {
+				pins = "gpio22";
+				function = "func1";
+			};
+
+			config {
+				pins = "gpio22";
+				output-high;
+				qcom,drive-strength = <8>;
+			};
+		};
+
+		hph_comp_sleep: hph_comp_sleep {
+			mux {
+				pins = "gpio22";
+				function = "func1";
+			};
+
+			config {
+				pins = "gpio22";
+				qcom,drive-strength = <2>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
index 521c783..c01036d 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
@@ -43,6 +43,17 @@
 		    the first cell will be used to define gpio number and the
 		    second denotes the flags for this gpio
 
+- qcom,gpios-disallowed:
+	Usage: optional
+	Value type: <prop-encoded-array>
+	Definition: Array of the GPIO hardware numbers corresponding to GPIOs
+		    which the APSS processor is not allowed to configure.
+		    The hardware numbers are indexed from 1.
+		    The interrupt resources for these GPIOs must not be defined
+		    in "interrupts" and "interrupt-names" properties.
+		    GPIOs defined in this array won't be registered as pins
+		    in the pinctrl device or gpios in the gpio chip.
+
 Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
 a general description of GPIO and interrupt bindings.
 
@@ -233,6 +244,7 @@
 
 		gpio-controller;
 		#gpio-cells = <2>;
+		qcom,gpios-disallowed = <1 20>;
 
 		pm8921_gpio_keys: gpio-keys {
 			volume-keys {
diff --git a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
index c7024e0..d8934c0 100644
--- a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
@@ -10,9 +10,13 @@
 - qcom,ipa-loaduC: indicate that ipa uC should be loaded
 - qcom,ipa-advertise-sg-support: determine how to respond to a query
 regarding scatter-gather capability
+- qcom,ipa-napi-enable: Boolean context flag to indicate whether
+                        to enable napi framework or not
+- qcom,wan-rx-desc-size: size of WAN rx desc fifo ring, default is 256
 
 Example:
 	qcom,rmnet-ipa {
 		compatible = "qcom,rmnet-ipa";
+		qcom,wan-rx-desc-size = <256>;
 	}
 
diff --git a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt
index 3f55312..e9575f1 100644
--- a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt
+++ b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt
@@ -10,9 +10,13 @@
 - qcom,ipa-loaduC: indicate that ipa uC should be loaded
 - qcom,ipa-advertise-sg-support: determine how to respond to a query
 regarding scatter-gather capability
+- qcom,ipa-napi-enable: Boolean context flag to indicate whether
+                        to enable napi framework or not
+- qcom,wan-rx-desc-size: size of WAN rx desc fifo ring, default is 256
 
 Example:
 	qcom,rmnet-ipa3 {
 		compatible = "qcom,rmnet-ipa3";
+		qcom,wan-rx-desc-size = <256>;
 	}
 
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
new file mode 100644
index 0000000..9638888
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
@@ -0,0 +1,422 @@
+Qualcomm Techonologies, Inc. QPNP PMIC Fuel Gauge Gen3 Device
+
+QPNP PMIC FG Gen3 device provides interface to the clients to read properties
+related to the battery. Its main function is to retrieve the State of Charge
+(SOC), in percentage scale representing the amount of charge left in the
+battery.
+
+=======================
+Required Node Structure
+=======================
+
+FG Gen3 device must be described in two levels of device nodes.  The first
+level describes the FG Gen3 device.  The second level describes one or more
+peripherals managed by FG Gen3 driver. All the peripheral specific parameters
+such as base address, interrupts etc., should be under second level node.
+
+====================================
+First Level Node - FG Gen3 device
+====================================
+
+- compatible
+	Usage:      required
+	Value type: <string>
+	Definition: Should be "qcom,fg-gen3".
+
+- qcom,pmic-revid
+	Usage:      required
+	Value type: <phandle>
+	Definition: Should specify the phandle of PMIC revid module. This is
+		    used to identify the PMIC subtype.
+
+- io-channels
+- io-channel-names
+	Usage:      required
+	Value type: <phandle>
+	Definition: For details about IIO bindings see:
+		    Documentation/devicetree/bindings/iio/iio-bindings.txt
+
+- qcom,rradc-base
+	Usage:      required
+	Value type: <u32>
+	Definition: Should specify the base address of RR_ADC peripheral. This
+		    is used for reading certain peripheral registers under it.
+
+- qcom,fg-cutoff-voltage
+	Usage:      optional
+	Value type: <u32>
+	Definition: The voltage (in mV) where the fuel gauge will steer the SOC
+		    to be zero. For example, if the cutoff voltage is set to
+		    3400mv, the fuel gauge will try to count SoC so that the
+		    battery SOC will be 0 when it is 3400mV. If this property
+		    is not specified, then the default value used will be
+		    3200mV.
+
+- qcom,fg-empty-voltage
+	Usage:      optional
+	Value type: <u32>
+	Definition: The voltage threshold (in mV) based on which the empty soc
+		    interrupt will be triggered. When the empty soc interrupt
+		    fires, battery soc will be set to 0 and the userspace will
+		    be notified via the power supply framework. The userspace
+		    will read 0% soc and immediately shutdown. If this property
+		    is not specified, then the default value used will be
+		    2800mV.
+
+- qcom,fg-vbatt-low-thr
+	Usage:      optional
+	Value type: <u32>
+	Definition: The voltage threshold (in mV) which upon set will be used
+		    for configuring the low battery voltage threshold.
+
+- qcom,fg-recharge-voltage
+	Usage:      optional
+	Value type: <u32>
+	Definition: The voltage threshold (in mV) based on which the charging
+		    will be resumed once the charging is complete. If this
+		    property is not specified, then the default value will be
+		    4250mV.
+
+- qcom,fg-chg-term-current
+	Usage:      optional
+	Value type: <u32>
+	Definition: Battery current (in mA) at which the fuel gauge will issue
+		    an end of charge if the charger is configured to use the
+		    fuel gauge ADC for end of charge detection. If this
+		    property is not specified, then the default value used
+		    will be 100mA.
+
+- qcom,fg-sys-term-current
+	Usage:      optional
+	Value type: <u32>
+	Definition: Battery current (in mA) at which the fuel gauge will try to
+		    scale towards 100%. When the charge current goes above this
+		    the SOC should be at 100%. If this property is not
+		    specified, then the default value used will be -125mA.
+		    This value has to be specified in negative values for
+		    the charging current.
+
+- qcom,fg-delta-soc-thr
+	Usage:      optional
+	Value type: <u32>
+	Definition: Percentage of SOC increase upon which the delta monotonic &
+		    battery SOC interrupts will be triggered. If this property
+		    is not specified, then the default value will be 1.
+		    Possible values are in the range of 0 to 12.
+
+- qcom,fg-recharge-soc-thr
+	Usage:      optional
+	Value type: <u32>
+	Definition: Percentage of monotonic SOC upon which the charging will
+		    will be resumed once the charging is complete. If this
+		    property is not specified, then the default value will be
+		    95.
+
+- qcom,fg-rsense-sel
+	Usage:      optional
+	Value type: <u32>
+	Definition: Specifies the source of sense resistor.
+		    Allowed values are:
+		    0 - Rsense is from Battery FET
+		    2 - Rsense is Battery FET and SMB
+		    Option 2 can be used only when a parallel charger is
+		    present. If this property is not specified, then the
+		    default value will be 2.
+
+- qcom,fg-jeita-thresholds
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: A list of integers which holds the jeita thresholds (degC)
+		    in the following order. Allowed size is 4.
+		    Element 0 - JEITA cold threshold
+		    Element 1 - JEITA cool threshold
+		    Element 2 - JEITA warm threshold
+		    Element 3 - JEITA hot threshold
+		    If these parameters are not specified, then the default
+		    values used will be 0, 5, 45, 50.
+
+- qcom,fg-esr-timer-charging
+	Usage:      optional
+	Value type: <u32>
+	Definition: Number of cycles between ESR pulses while the battery is
+		    charging.
+
+- qcom,fg-esr-timer-awake
+	Usage:      optional
+	Value type: <u32>
+	Definition: Number of cycles between ESR pulses while the system is
+		    awake and the battery is discharging.
+
+- qcom,fg-esr-timer-asleep
+	Usage:      optional
+	Value type: <u32>
+	Definition: Number of cycles between ESR pulses while the system is
+		    asleep and the battery is discharging. This option requires
+		    qcom,fg-esr-timer-awake to be defined.
+
+- qcom,cycle-counter-en
+	Usage:      optional
+	Value type: <empty>
+	Definition: Enables the cycle counter feature.
+
+- qcom,fg-force-load-profile
+	Usage:      optional
+	Value type: <empty>
+	Definition: If set, battery profile will be force loaded if the profile
+		    loaded earlier by bootloader doesn't match with the profile
+		    available in the device tree.
+
+- qcom,cl-start-capacity
+	Usage:      optional
+	Value type: <u32>
+	Definition: Battery SOC threshold to start the capacity learning.
+		    If this is not specified, then the default value used
+		    will be 15.
+
+- qcom,cl-min-temp
+	Usage:      optional
+	Value type: <u32>
+	Definition: Lower limit of battery temperature to start the capacity
+		    learning. If this is not specified, then the default value
+		    used will be 150. Unit is in decidegC.
+
+- qcom,cl-max-temp
+	Usage:      optional
+	Value type: <u32>
+	Definition: Upper limit of battery temperature to start the capacity
+		    learning. If this is not specified, then the default value
+		    used will be 450 (45C). Unit is in decidegC.
+
+- qcom,cl-max-increment
+	Usage:      optional
+	Value type: <u32>
+	Definition: Maximum capacity increment allowed per capacity learning
+		    cycle. If this is not specified, then the default value
+		    used will be 5 (0.5%). Unit is in decipercentage.
+
+- qcom,cl-max-decrement
+	Usage:      optional
+	Value type: <u32>
+	Definition: Maximum capacity decrement allowed per capacity learning
+		    cycle. If this is not specified, then the default value
+		    used will be 100 (10%). Unit is in decipercentage.
+
+- qcom,cl-min-limit
+	Usage:      optional
+	Value type: <u32>
+	Definition: Minimum limit that the capacity cannot go below in a
+		    capacity learning cycle. If this is not specified, then
+		    the default value is 0. Unit is in decipercentage.
+
+- qcom,cl-max-limit
+	Usage:      optional
+	Value type: <u32>
+	Definition: Maximum limit that the capacity cannot go above in a
+		    capacity learning cycle. If this is not specified, then
+		    the default value is 0. Unit is in decipercentage.
+
+- qcom,battery-thermal-coefficients
+	Usage:      optional
+	Value type: <u8>
+	Definition: Byte array of battery thermal coefficients.
+		    This should be exactly 3 bytes in length.
+
+- qcom,fg-jeita-hyst-temp
+	Usage:      optional
+	Value type: <u32>
+	Definition: Hysteresis applied to Jeita temperature comparison.
+		    Possible values are:
+			0 - No hysteresis
+			1,2,3 - Value in Celsius.
+
+- qcom,fg-batt-temp-delta
+	Usage:      optional
+	Value type: <u32>
+	Definition: Battery temperature delta interrupt threshold. Possible
+		    values are: 2, 4, 6 and 10. Unit is in Kelvin.
+
+- qcom,hold-soc-while-full
+	Usage:      optional
+	Value type: <empty>
+	Definition: A boolean property that when defined holds SOC at 100% when
+		    the battery is full.
+
+- qcom,ki-coeff-soc-dischg
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array of monotonic SOC threshold values to change the ki
+		    coefficient for medium discharge current during discharge.
+		    This should be defined in the ascending order and in the
+		    range of 0-100. Array limit is set to 3.
+
+- qcom,ki-coeff-med-dischg
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array of ki coefficient values for medium discharge current
+		    during discharge. These values will be applied when the
+		    monotonic SOC goes below the SOC threshold specified under
+		    qcom,ki-coeff-soc-dischg. Array limit is set to 3. This
+		    property should be specified if qcom,ki-coeff-soc-dischg
+		    is specified to make it fully functional. Value has no
+		    unit. Allowed range is 0 to 62200 in micro units.
+
+- qcom,ki-coeff-hi-dischg
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array of ki coefficient values for high discharge current
+		    during discharge. These values will be applied when the
+		    monotonic SOC goes below the SOC threshold specified under
+		    qcom,ki-coeff-soc-dischg. Array limit is set to 3. This
+		    property should be specified if qcom,ki-coeff-soc-dischg
+		    is specified to make it fully functional. Value has no
+		    unit. Allowed range is 0 to 62200 in micro units.
+
+- qcom,fg-rconn-mohms
+	Usage:      optional
+	Value type: <u32>
+	Definition: Battery connector resistance (Rconn) in milliohms. If Rconn
+		    is specified, then ESR to Rslow scaling factors will be
+		    updated to account it for an accurate ESR.
+
+- qcom,fg-esr-clamp-mohms
+	Usage:      optional
+	Value type: <u32>
+	Definition: Equivalent series resistance (ESR) in milliohms. If this
+		    is specified, then ESR will be clamped to this value when
+		    ESR is found to be dropping below this. Default value is
+		    20.
+
+- qcom,fg-esr-filter-switch-temp
+	Usage:      optional
+	Value type: <u32>
+	Definition: Battery temperature threshold below which low temperature
+		    ESR filter coefficients will be switched to normal
+		    temperature ESR filter coefficients. If this is not
+		    specified, then the default value used will be 100. Unit is
+		    in decidegC.
+
+- qcom,fg-esr-tight-filter-micro-pct
+	Usage:      optional
+	Value type: <u32>
+	Definition: Value in micro percentage for ESR tight filter. If this is
+		    not specified, then a default value of 3907 (0.39 %) will
+		    be used. Lowest possible value is 1954 (0.19 %).
+
+- qcom,fg-esr-broad-filter-micro-pct
+	Usage:      optional
+	Value type: <u32>
+	Definition: Value in micro percentage for ESR broad filter. If this is
+		    not specified, then a default value of 99610 (9.96 %) will
+		    be used. Lowest possible value is 1954 (0.19 %).
+
+- qcom,fg-esr-tight-lt-filter-micro-pct
+	Usage:      optional
+	Value type: <u32>
+	Definition: Value in micro percentage for low temperature ESR tight
+		    filter. If this is not specified, then a default value of
+		    48829 (4.88 %) will be used. Lowest possible value is 1954
+		    (0.19 %).
+
+- qcom,fg-esr-broad-lt-filter-micro-pct
+	Usage:      optional
+	Value type: <u32>
+	Definition: Value in micro percentage for low temperature ESR broad
+		    filter. If this is not specified, then a default value of
+		    148438 (14.84 %) will be used. Lowest possible value is
+		    1954 (0.19 %).
+
+- qcom,fg-auto-recharge-soc
+	Usage:      optional
+	Value type: <empty>
+	Definition: A boolean property when defined will configure automatic
+		    recharge SOC threshold. If not specified, automatic
+		    recharge voltage threshold will be configured. This has
+		    to be configured in conjunction with the charger side
+		    configuration for proper functionality.
+
+- qcom,slope-limit-temp-threshold
+	Usage:      optional
+	Value type: <u32>
+	Definition: Battery temperature threshold to decide when slope limit
+		    coefficients should be applied along with charging status.
+		    Unit is in decidegC.
+
+- qcom,slope-limit-coeffs
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: A list of integers which holds the slope limit coefficients
+		    in the following order. Allowed size is 4. Possible values
+		    are from 0 to 31. Unit is in decipercentage.
+		    Element 0 - Low temperature discharging
+		    Element 1 - Low temperature charging
+		    Element 2 - High temperature discharging
+		    Element 3 - High temperature charging
+		    These coefficients have to be specified along with the
+		    property "qcom,slope-limit-temp-threshold" to make dynamic
+		    slope limit adjustment functional.
+
+==========================================================
+Second Level Nodes - Peripherals managed by FG Gen3 driver
+==========================================================
+- reg
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Addresses and sizes for the specified peripheral
+
+- interrupts
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Interrupt mapping as per the interrupt encoding
+
+- interrupt-names
+	Usage:      optional
+	Value type: <stringlist>
+	Definition: Interrupt names.  This list must match up 1-to-1 with the
+		    interrupts specified in the 'interrupts' property.
+
+========
+Example
+========
+
+pmi8998_fg: qpnp,fg {
+	compatible = "qcom,fg-gen3";
+	#address-cells = <1>;
+	#size-cells = <1>;
+	qcom,pmic-revid = <&pmi8998_revid>;
+	io-channels = <&pmi8998_rradc 3>;
+	io-channel-names = "rradc_batt_id";
+	qcom,rradc-base = <0x4500>;
+	qcom,ki-coeff-soc-dischg = <30 60 90>;
+	qcom,ki-coeff-med-dischg = <800 1000 1400>;
+	qcom,ki-coeff-hi-dischg = <1200 1500 2100>;
+	qcom,slope-limit-temp-threshold = <100>;
+	qcom,slope-limit-coeffs = <10 11 12 13>;
+	qcom,battery-thermal-coefficients = [9d 50 ff];
+	status = "okay";
+
+	qcom,fg-batt-soc@4000 {
+		status = "okay";
+		reg = <0x4000 0x100>;
+		interrupts = <0x2 0x40 0x0 IRQ_TYPE_EDGE_BOTH>,
+			     <0x2 0x40 0x1 IRQ_TYPE_EDGE_BOTH>,
+			     <0x2 0x40 0x2 IRQ_TYPE_EDGE_BOTH>,
+			     <0x2 0x40 0x3 IRQ_TYPE_EDGE_BOTH>;
+		interrupt-names = "soc-update",
+				  "soc-ready",
+				  "bsoc-delta",
+				  "msoc-delta";
+
+	};
+
+	qcom,fg-batt-info@4100 {
+		status = "okay";
+		reg = <0x4100 0x100>;
+		interrupts = <0x2 0x41 0x3 IRQ_TYPE_EDGE_BOTH>;
+		interrupt-names = "batt-missing";
+	};
+
+	qcom,fg-memif@4400 {
+		status = "okay";
+		reg = <0x4400 0x100>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qnovo.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qnovo.txt
new file mode 100644
index 0000000..96b7dd5
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qnovo.txt
@@ -0,0 +1,32 @@
+QPNP Qnovo pulse engine
+
+QPNP Qnovo is a pulse charging engine which works in tandem with the QPNP SMB2
+Charger device. It configures the QPNP SMB2 charger to charge/discharge as per
+pulse characteristics.
+
+The QPNP Qnovo pulse engine has a single peripheral assigned to it.
+
+Required properties:
+- compatible:		Must be "qcom,qpnp-qnovo"
+- qcom,pmic-revid:	Should specify the phandle of PMIC
+			revid module. This is used to identify
+			the PMIC subtype.
+
+- reg:			The address for this peripheral
+- interrupts:		Specifies the interrupt associated with the peripheral.
+- interrupt-names:	Specifies the interrupt name for the peripheral. Qnovo
+			peripheral has only one interrupt "ptrain-done".
+
+Optional Properties:
+- qcom,external-rsense:		To indicate whether the platform uses external or
+				internal rsense for measuring battery current.
+
+Example:
+
+	qcom,qpnp-qnovo@1500 {
+		compatible = "qcom,qpnp-qnovo";
+		reg = <0x1500 0x100>;
+		interrupts = <0x2 0x15 0x0 IRQ_TYPE_NONE>;
+		interrupt-names = "ptrain-done";
+		qcom,pmic-revid = <&pmi8998_revid>;
+	};
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
new file mode 100644
index 0000000..f4a22e0
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
@@ -0,0 +1,315 @@
+Qualcomm Technologies, Inc. SMB2 Charger Specific Bindings
+
+SMB2 Charger is an efficient programmable battery charger capable of charging a
+high-capacity lithium-ion battery over micro-USB or USB Type-C ultrafast with
+Quick Charge 2.0, Quick Charge 3.0, and USB Power Delivery support. Wireless
+charging features full A4WP Rezence 1.2, WPC 1.2, and PMA support.
+
+=======================
+Required Node Structure
+=======================
+
+SMB2 Charger must be described in two levels of devices nodes.
+
+===============================
+First Level Node - SMB2 Charger
+===============================
+
+Charger specific properties:
+- compatible
+  Usage:      required
+  Value type: <string>
+  Definition: "qcom,qpnp-smb2".
+
+- qcom,pmic-revid
+  Usage:      required
+  Value type: phandle
+  Definition: Should specify the phandle of PMI's revid module. This is used to
+		identify the PMI subtype.
+
+- qcom,batteryless-platform
+  Usage:      optional
+  Value type: <empty>
+  Definition: Boolean flag which indicates that the platform does not have a
+		battery, and therefore charging should be disabled. In
+		addition battery properties will be faked such that the device
+		assumes normal operation.
+
+- qcom,external-vconn
+  Usage:      optional
+  Value type: <empty>
+  Definition: Boolean flag which indicates VCONN is sourced externally.
+
+- qcom,fcc-max-ua
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the maximum fast charge current in micro-amps.
+		If the value is not present, 1Amp is used as default.
+
+- qcom,fv-max-uv
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the maximum float voltage in micro-volts.
+		If the value is not present, 4.35V is used as default.
+
+- qcom,usb-icl-ua
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the USB input current limit in micro-amps.
+		 If the value is not present, 1.5Amps is used as default.
+
+- qcom,usb-ocl-ua
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the OTG output current limit in micro-amps.
+		If the value is not present, 1.5Amps is used as default
+
+- qcom,dc-icl-ua
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the DC input current limit in micro-amps.
+
+- qcom,boost-threshold-ua
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the boost current threshold in micro-amps.
+		If the value is not present, 100mA is used as default.
+
+- qcom,wipower-max-uw
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the DC input power limit in micro-watts.
+		If the value is not present, 8W is used as default.
+
+- qcom,thermal-mitigation
+  Usage:      optional
+  Value type: Array of <u32>
+  Definition: Array of fast charge current limit values for
+		different system thermal mitigation levels.
+		This should be a flat array that denotes the
+		maximum charge current in mA for each thermal
+		level.
+
+- qcom,step-soc-thresholds
+  Usage:      optional
+  Value type: Array of <u32>
+  Definition: Array of SOC threshold values, size of 4. This should be a
+		flat array that denotes the percentage ranging from 0 to 100.
+		If the array is not present, step charging is disabled.
+
+- qcom,step-current-deltas
+  Usage:      optional
+  Value type: Array of <s32>
+  Definition: Array of delta values for charging current, size of 5, with
+		FCC as base.  This should be a flat array that denotes the
+		offset of charging current in uA, from -3100000 to 3200000.
+		If the array is not present, step charging is disabled.
+
+- io-channels
+  Usage:      optional
+  Value type: List of <phandle u32>
+  Definition: List of phandle and IIO specifier pairs, one pair
+		for each IIO input to the device. Note: if the
+		IIO provider specifies '0' for #io-channel-cells,
+		then only the phandle portion of the pair will appear.
+
+- io-channel-names
+  Usage:      optional
+  Value type: List of <string>
+  Definition: List of IIO input name strings sorted in the same
+		order as the io-channels property. Consumer drivers
+		will use io-channel-names to match IIO input names
+		with IIO specifiers.
+
+- qcom,float-option
+  Usage:      optional
+  Value type: <u32>
+  Definition: Configures how the charger behaves when a float charger is
+	      detected by APSD
+	        1 - Treat as a DCP
+	        2 - Treat as a SDP
+	        3 - Disable charging
+                4 - Suspend USB input
+
+- qcom,hvdcp-disable
+  Usage:      optional
+  Value type: <empty>
+  Definition: Specifies if hvdcp charging is to be enabled or not.
+		If this property is not specified hvdcp will be enabled.
+		If this property is specified, hvdcp 2.0 detection will still
+		happen but the adapter won't be asked to switch to a higher
+		voltage point.
+
+- qcom,chg-inhibit-threshold-mv
+  Usage:      optional
+  Value type: <u32>
+  Definition: Charge inhibit threshold in milli-volts. Charging will be
+		inhibited when the battery voltage is within this threshold
+		from Vfloat at charger insertion. If this is not specified
+		then charge inhibit will be disabled by default.
+		Allowed values are: 50, 100, 200, 300.
+
+- qcom,auto-recharge-soc
+  Usage:      optional
+  Value type: <empty>
+  Definition: Specifies if automatic recharge needs to be based off battery
+		SOC. If this property is not specified, then auto recharge will
+		be based off battery voltage. For both SOC and battery voltage,
+		charger receives the signal from FG to resume charging.
+
+- qcom,micro-usb
+  Usage:      optional
+  Value type: <empty>
+  Definition: Boolean flag which indicates that the platform only support
+		micro usb port.
+
+- qcom,suspend-input-on-debug-batt
+  Usage:      optional
+  Value type: <empty>
+  Definition: Boolean flag which when present enables input suspend for
+		debug battery.
+
+=============================================
+Second Level Nodes - SMB2 Charger Peripherals
+=============================================
+
+Peripheral specific properties:
+- reg
+  Usage:      required
+  Value type: <prop-encoded-array>
+  Definition: Address and size of the peripheral's register block.
+
+- interrupts
+  Usage:      required
+  Value type: <prop-encoded-array>
+  Definition: Peripheral interrupt specifier.
+
+- interrupt-names
+  Usage:      required
+  Value type: <stringlist>
+  Definition: Interrupt names.  This list must match up 1-to-1 with the
+	      interrupts specified in the 'interrupts' property.
+
+=======
+Example
+=======
+
+pmi8998_charger: qcom,qpnp-smb2 {
+	compatible = "qcom,qpnp-smb2";
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	io-channels = <&pmic_rradc 0>;
+	io-channel-names = "rradc_batt_id";
+
+	dpdm-supply = <&qusb_phy0>;
+
+	qcom,step-soc-thresholds = <60 70 80 90>;
+	qcom,step-current-deltas = <500000 250000 150000 0 (-150000)>;
+
+	qcom,chgr@1000 {
+		reg = <0x1000 0x100>;
+		interrupts =    <0x2 0x10 0x0 IRQ_TYPE_NONE>,
+				<0x2 0x10 0x1 IRQ_TYPE_NONE>,
+				<0x2 0x10 0x2 IRQ_TYPE_NONE>,
+				<0x2 0x10 0x3 IRQ_TYPE_NONE>,
+				<0x2 0x10 0x4 IRQ_TYPE_NONE>;
+
+		interrupt-names =       "chg-error",
+					"chg-state-change",
+					"step-chg-state-change",
+					"step-chg-soc-update-fail",
+					"step-chg-soc-update-request";
+	};
+
+	qcom,otg@1100 {
+		reg = <0x1100 0x100>;
+		interrupts =    <0x2 0x11 0x0 IRQ_TYPE_NONE>,
+				<0x2 0x11 0x1 IRQ_TYPE_NONE>,
+				<0x2 0x11 0x2 IRQ_TYPE_NONE>,
+				<0x2 0x11 0x3 IRQ_TYPE_NONE>;
+
+		interrupt-names =       "otg-fail",
+					"otg-overcurrent",
+					"otg-oc-dis-sw-sts",
+					"testmode-change-detect";
+	};
+
+	qcom,bat-if@1200 {
+		reg = <0x1200 0x100>;
+		interrupts =    <0x2 0x12 0x0 IRQ_TYPE_NONE>,
+				<0x2 0x12 0x1 IRQ_TYPE_NONE>,
+				<0x2 0x12 0x2 IRQ_TYPE_NONE>,
+				<0x2 0x12 0x3 IRQ_TYPE_NONE>,
+				<0x2 0x12 0x4 IRQ_TYPE_NONE>,
+				<0x2 0x12 0x5 IRQ_TYPE_NONE>;
+
+		interrupt-names =       "bat-temp",
+					"bat-ocp",
+					"bat-ov",
+					"bat-low",
+					"bat-therm-or-id-missing",
+					"bat-terminal-missing";
+	};
+
+	qcom,usb-chgpth@1300 {
+		reg = <0x1300 0x100>;
+		interrupts =    <0x2 0x13 0x0 IRQ_TYPE_NONE>,
+				<0x2 0x13 0x1 IRQ_TYPE_NONE>,
+				<0x2 0x13 0x2 IRQ_TYPE_NONE>,
+				<0x2 0x13 0x3 IRQ_TYPE_NONE>,
+				<0x2 0x13 0x4 IRQ_TYPE_NONE>,
+				<0x2 0x13 0x5 IRQ_TYPE_NONE>,
+				<0x2 0x13 0x6 IRQ_TYPE_NONE>,
+				<0x2 0x13 0x7 IRQ_TYPE_NONE>;
+
+		interrupt-names =       "usbin-collapse",
+					"usbin-lt-3p6v",
+					"usbin-uv",
+					"usbin-ov",
+					"usbin-plugin",
+					"usbin-src-change",
+					"usbin-icl-change",
+					"type-c-change";
+	};
+
+	qcom,dc-chgpth@1400 {
+		reg = <0x1400 0x100>;
+		interrupts =    <0x2 0x14 0x0 IRQ_TYPE_NONE>,
+				<0x2 0x14 0x1 IRQ_TYPE_NONE>,
+				<0x2 0x14 0x2 IRQ_TYPE_NONE>,
+				<0x2 0x14 0x3 IRQ_TYPE_NONE>,
+				<0x2 0x14 0x4 IRQ_TYPE_NONE>,
+				<0x2 0x14 0x5 IRQ_TYPE_NONE>,
+				<0x2 0x14 0x6 IRQ_TYPE_NONE>;
+
+		interrupt-names =       "dcin-collapse",
+					"dcin-lt-3p6v",
+					"dcin-uv",
+					"dcin-ov",
+					"dcin-plugin",
+					"div2-en-dg",
+					"dcin-icl-change";
+	};
+
+	qcom,chgr-misc@1600 {
+		reg = <0x1600 0x100>;
+		interrupts =    <0x2 0x16 0x0 IRQ_TYPE_NONE>,
+				<0x2 0x16 0x1 IRQ_TYPE_NONE>,
+				<0x2 0x16 0x2 IRQ_TYPE_NONE>,
+				<0x2 0x16 0x3 IRQ_TYPE_NONE>,
+				<0x2 0x16 0x4 IRQ_TYPE_NONE>,
+				<0x2 0x16 0x5 IRQ_TYPE_NONE>,
+				<0x2 0x16 0x6 IRQ_TYPE_NONE>,
+				<0x2 0x16 0x7 IRQ_TYPE_NONE>;
+
+		interrupt-names =       "wdog-snarl",
+					"wdog-bark",
+					"aicl-fail",
+					"aicl-done",
+					"high-duty-cycle",
+					"input-current-limiting",
+					"temperature-change",
+					"switcher-power-ok";
+	};
+};
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/smb1351-charger.txt b/Documentation/devicetree/bindings/power/supply/qcom/smb1351-charger.txt
new file mode 100644
index 0000000..c200f94
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/qcom/smb1351-charger.txt
@@ -0,0 +1,115 @@
+Summit smb1351 battery charger
+
+SMB1351 is a single-cell battery charger. It can charge
+the battery and power the system via the USB/AC adapter input.
+
+The smb1351 interface is via I2C bus.
+
+Required Properties:
+- compatible			Must be "qcom,smb1351-charger".
+- reg				The device 7-bit I2C address.
+
+Required Properties for standalone charger:
+- regulator-name		A string used as a descriptive name for OTG regulator.
+- pinctrl-names			The state name of the pin configuration. Only
+				support "default".
+- pinctrl-0			The phandle of the pin configuration node in
+				pinctrl for smb_int_pin.
+
+Optional Properties:
+
+- interrupts			This indicates the IRQ number of the GPIO
+				connected to the STAT pin.
+- qcom,fastchg-current-max-ma 	Fast Charging current in mA. Supported range is
+				from 1000mA to 4500mA.
+- qcom,chg-autonomous-mode	This is a bool property and it indicates that the
+				charger is configured for autonomous operation and
+				does not require any software configuration.
+- qcom,disable-apsd		This is a bool property which disables automatic
+				power source detection (APSD). If this is set
+				charger detection is done by DCIN UV irq.
+- qcom,charging-disabled	This is a bool property which disables charging.
+- qcom,using-pmic-therm		This property indicates thermal pin connected to pmic or smb.
+- qcom,bms-psy-name		This is a string and it points to the bms
+				power supply name.
+- qcom,iterm-ma			Specifies the termination current to indicate end-of-charge.
+				Possible values in mA - 70, 100, 200, 300, 400, 500, 600, 700.
+- qcom,iterm-disabled		Disables the termination current feature. This is a bool
+				property.
+- qcom,float-voltage-mv	 	Float Voltage in mV - the maximum voltage up to which
+				the battery is charged. Supported range 3500mV to 4500mV
+- qcom,recharge-mv		Recharge threshold in mV - the offset from the float-volatge
+				as which the charger restarts charging. Possible
+				values are 50mV and 100mV.
+- qcom,recharge-disabled	Boolean value which disables the auto-recharge.
+- qcom,bms-controlled-charging	This property enables BMS to control EOC and
+				recharge. BMS and charger communicates with each
+				other via power_supply framework. This
+				property should be used with 'qcom,iterm-disabled'
+				to ensure EOC detection in charger is disabled.
+- qcom,force-hvdcp-2p0		Boolean value which allows to force hvdcp working on 2.0 mode.
+- qcom,parallel-charger		Boolean value which enables the parallel charger.
+- qcom,chg-vadc			Corresponding VADC device's phandle.
+- qcom,chg-adc_tm		phandle to the corresponding VADC device to read the ADC channels.
+- qcom,batt-cold-decidegc	Cold battery temperature in decidegC.
+- qcom,batt-hot-decidegc	Hot battery temperature in decidegC.
+- qcom,batt-missing-decidegc	This is a property indicating battery missing temperature, if
+				higher than it, battery should exist.
+- qcom,batt-warm-decidegc:	Warm battery temperature in decidegC. After hitting this threshold,
+				"qcom,warm-bat-ma" defines maximum charging current and
+				"qcom,warm-bat-mv" defines maximum target voltage.
+- qcom,batt-cool-decidegc:       Cool battery temperature in decidegC. After hitting this threshold,
+				"qcom,cool-bat-ma" defines maximum charging current and
+				"qcom,cool-bat-mv" defines maximum target voltage.
+- qcom,batt-warm-ma:		Maximum warm battery charge current in milli-amps.
+- qcom,batt-cool-ma:		Maximum cool battery charge current in milli-amps.
+- qcom,batt-warm-mv:		Maximum warm battery target voltage in milli-volts.
+- qcom,batt-cool-mv:		Maximum cool battery target voltage in milli-volts.
+- qcom,parallel-en-pin-polarity Specify the polarity of enable signal controlled
+				via pin in a parallel-charger configuration.
+				0 - Active low and 1  - Active high.
+				If not specified the default value is active-low.
+- qcom,parallel-external-current-sense If present specifies external rsense is
+				used for charge current sensing.
+
+Example for standalone charger:
+
+&i2c_4 {
+	smb1351_otg_supply: smb1351-charger@57 {
+		compatible = "qcom,smb1351-charger";
+		reg = <0x57>;
+		interrupt-parent = <&msm_gpio>;
+		interrupts = <62 2>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&smb_int_default>;
+		qcom,float-voltage-mv = <4350>;
+		qcom,iterm-ma = <100>;
+		qcom,recharge-mv = <100>;
+		qcom,bms-psy-name = "bms";
+		regulator-name = "smb1351_otg_vreg";
+		qcom,using-pmic-therm;
+		qcom,chg-adc_tm = <&pm8916_adc_tm>;
+		qcom,chg-vadc = <&pm8916_vadc>;
+		qcom,batt-hot-decidegc = <550>;
+		qcom,batt-cold-decidegc = <0>;
+		qcom,batt-missing-decidegc = <(-200)>;
+		qcom,batt-warm-decidegc = <500>;
+		qcom,batt-cool-decidegc = <50>;
+		qcom,batt-warm-ma = <350>;
+		qcom,batt-cool-ma = <350>;
+		qcom,batt-warm-mv = <4200>;
+		qcom,batt-cool-mv = <4200>;
+	};
+};
+
+Example for parallel charger:
+
+&i2c_11 {
+	smb1351-charger@1d {
+		compatible = "qcom,smb1351-charger";
+		reg = <0x1d>;
+		qcom,parallel-charger;
+		qcom,float-voltage-mv = <4400>;
+		qcom,recharge-mv = <100>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/smb135x-charger.txt b/Documentation/devicetree/bindings/power/supply/qcom/smb135x-charger.txt
new file mode 100644
index 0000000..90527f3
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/qcom/smb135x-charger.txt
@@ -0,0 +1,101 @@
+SMB135x battery charger
+
+SMB135x is a single-cell switching mode battery charger. It can charge
+the battery and power the system via the USB and AC adapter input.
+
+The smb135x interface is via I2C bus.
+
+Required Properties:
+- compatible:			Must be "qcom,smb1356-charger", "qcom,smb1357-charger",
+				"qcom,smb1358-charger" or "qcom,smb1359-charger".
+- reg:				The device 7-bit I2C address.
+
+Optional Properties:
+
+- interrupts			This indicates the IRQ number of the GPIO
+				connected to the STAT pin.
+- qcom,bms-psy-name	 	the psy name to use for reporting battery capacity. If left
+				unspecified it uses a preprogrammed default value.
+- qcom,float-voltage-mv	 	Float Voltage in mV - the maximum voltage up to which
+				the battery is charged. Supported range 3600mV to 4500mV
+- qcom,charging-timeout		Maximum duration in minutes that a single charge
+				cycle may last.  Supported values are: 0, 192, 384,
+				768, and 1536.  A value of 0 means that no
+				charge cycle timeout is used and charging can
+				continue indefinitely.
+- qcom,dc-psy-type		The type of charger connected to the DC path.
+				Can be "Mains" or "Wireless"
+- qcom,dc-psy-ma		The current in mA dc path can support. Must be specified if
+				dc-psy-type is specified. Valid range 300mA to 2000mA.
+- qcom,charging-disabled	Set this if charging should be disabled in the build
+				by default. Useful in usecases where battery current
+				needs to be profiled even when USB is present.
+- qcom,recharge-thresh-mv	Specifies the minimum voltage drop in millivolts
+				below the float voltage that is required in
+				order to initiate a new charging cycle.
+				Supported values are: 50, 100, 200 and 300mV.
+- qcom,bmd-algo-disabled	Indicates if the battery missing detection algorithm
+				is disabled. If this node is present SMB uses
+				the THERM pin for battery missing detection.
+- qcom,iterm-ma			Specifies the termination current to indicate end-of-charge.
+				Possible values in mA - 50, 100, 150, 200, 250, 300, 500, 600.
+- qcom,iterm-disabled		Disables the termination current feature. This is a bool
+				property.
+- qcom,soft-vfloat-comp-disabled	Set this property when the battery is powered via external
+					source and could go above the float voltage.  smb135x chips
+					go in to unintentional reverse boost in such a situation and
+					the float voltage compensation needs to be disabled to avoid
+					that reverse boost.
+- qcom,soft-current-comp-disabled	Set this property to disable charging current compensation
+					if battery temperature exceeds soft JEITA thresholds.
+- qcom,gamma-setting			Array of gamma values for JEITA. The sequence is
+					<"Cold Hard" "Hot Hard" "Cold Soft" "Hot Soft">. Gamma value
+					indicates the ratio of the pull up resistors and NTC
+					resistor in battery pack. There are 4 options referring to
+					the graphic user interface.
+- qcom,thermal-mitigation:		Array of input current limit values for different
+					system thermal mitigation level.
+- regulator-name			A string used as a descriptive name for OTG regulator.
+- therm-bias-supply			The supply that provides bias voltage to the battery
+					thermistor. This is useful in designs that do not use the SYSON
+					pin to bias the thermistor.
+- usb-pullup-supply			The supply regulator that act as pull-up for USB data lines.
+- qcom,parallel-charger:		A flag to indicate if the charger merely assists for USB
+					charging. In this case the input current from USB is split
+					between a main charger and smb135x for reducing thermal impact
+					of high current charging from USB path.
+- qcom,inhibit-disabled:	Disables the charger-inhibit function.
+- qcom,bms-controlled-charging: This property enables BMS to control EOC and
+				recharge. BMS and charger communicates with each
+				other via power_supply framework. This
+				property should be used with 'qcom,iterm-disabled'
+				to ensure EOC detection in charger is
+				disabled.
+- qcom,fastchg-ma:		Specifies the maximum fastcharge current.
+				The possible range for fastcharge current is
+				from 300mA to 3000mA.
+- qcom,id-line-not-connected:	Specifies if smb135x charger is not monitoring the USB_ID line.
+- qcom,parallel-en-pin-polarity Specify the polarity of enable signal controlled
+				via pin in a parallel-charger configuration.
+				0 - Active low and 1  - Active high.
+				If not specified the default value is active-low.
+
+Example:
+	i2c@f9967000 {
+		smb1357-charger@1b {
+			compatible = "qcom,smb1357-charger";
+			reg = <0x1b>;
+			interrupt-parent = <&spmi_bus>;
+			interrupts = <0x00 0xCD 0>;
+			qcom,float-voltage-mv = <4200>;
+			qcom,iterm-ma = <100>;
+			qcom,dc-psy-type = <8>;
+			qcom,dc-psy-ma = <800>;
+			qcom,charging-disabled;
+			qcom,recharge-thresh-mv = <100>;
+			regulator-name = "smb1357-otg";
+			qcom,thermal-mitigation = <1500 700 600 325>;
+			qcom,gamma-setting = <3 2 0 2>;
+			qcom,fastchg-ma = <3000>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt b/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt
new file mode 100644
index 0000000..c8f2a5a
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt
@@ -0,0 +1,234 @@
+Qualcomm Technologies, Inc. SMB138X Charger Specific Bindings
+
+SMB138X Charger is an efficient programmable battery charger capable of charging
+a high-capacity lithium-ion battery over micro-USB or USB Type-C ultrafast with
+Quick Charge 2.0, Quick Charge 3.0 support. Wireless charging features full
+A4WP Rezence 1.2, WPC 1.2, and PMA support.
+
+=======================
+Required Node Structure
+=======================
+
+SMB138X Charger must be described in two levels of devices nodes.
+
+==================================
+First Level Node - SMB138X Charger
+==================================
+
+Charger specific properties:
+- compatible
+  Usage:      required
+  Value type: <string>
+  Definition: String which indicates the charging mode. Can be one of the
+	      following:
+              Standalone/Parallel Master	- "qcom,smb138x-charger"
+	      Parallel Slave			- "qcom,smb138x-parallel-slave"
+
+- qcom,pmic-revid
+  Usage:      required
+  Value type: phandle
+  Definition: Should specify the phandle of SMB's
+	revid module. This is used to identify
+	the SMB subtype.
+
+- qcom,suspend-input
+  Usage:      optional
+  Value type: <empty>
+  Definition: Boolean flag which indicates that the charger should not draw
+	      current from any of its input sources (USB, DC).
+
+- qcom,fcc-max-ua
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the maximum fast charge current in micro-amps.
+
+- qcom,usb-icl-ua
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the USB input current limit in micro-amps.
+
+- qcom,dc-icl-ua
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the DC input current limit in micro-amps.
+
+- qcom,charger-temp-max-mdegc
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the maximum charger temperature in milli-degrees
+	      Celsius. If unspecified a default of 80000 will be used.
+
+- qcom,connector-temp-max-mdegc
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the maximum connector temperature in milli-degrees
+	      Celsius. If unspecified a default value of 105000 will be used.
+
+- io-channels
+  Usage:      optional
+  Value type: List of <phandle u32>
+  Definition: List of phandle and IIO specifier pairs, one pair
+		for each IIO input to the device. Note: if the
+		IIO provider specifies '0' for #io-channel-cells,
+		then only the phandle portion of the pair will appear.
+
+- io-channel-names
+  Usage:      optional
+  Value type: List of <string>
+  Definition: List of IIO input name strings sorted in the same
+		order as the io-channels property. Consumer drivers
+		will use io-channel-names to match IIO input names
+		with IIO specifiers.
+
+================================================
+Second Level Nodes - SMB138X Charger Peripherals
+================================================
+
+Peripheral specific properties:
+- reg
+  Usage:      required
+  Value type: <prop-encoded-array>
+  Definition: Address and size of the peripheral's register block.
+
+- interrupts
+  Usage:      required
+  Value type: <prop-encoded-array>
+  Definition: Peripheral interrupt specifier.
+
+- interrupt-names
+  Usage:      required
+  Value type: <stringlist>
+  Definition: Interrupt names.  This list must match up 1-to-1 with the
+	      interrupts specified in the 'interrupts' property.
+
+=======================================
+Second Level Nodes - SMB138X Regulators
+=======================================
+
+The following regulator nodes are supported:
+"qcom,smb138x-vbus"	- Regulator for enabling VBUS
+"qcom,smb138x-vconn"	- Regulator for enabling VCONN
+
+- regulator-name
+  Usage:      required
+  Value type: <string>
+  Definition: Specifies the name for this regulator.
+
+=======
+Example
+=======
+
+smb138x_charger: qcom,smb138x-charger {
+	compatible = "qcom,qpnp-smb138x-charger";
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	qcom,suspend-input;
+	dpdm-supply = <&qusb_phy0>;
+
+	qcom,chgr@1000 {
+		reg = <0x1000 0x100>;
+		interrupts =    <0x10 0x0 IRQ_TYPE_EDGE_BOTH>,
+				<0x10 0x1 IRQ_TYPE_EDGE_BOTH>,
+				<0x10 0x2 IRQ_TYPE_EDGE_BOTH>,
+				<0x10 0x3 IRQ_TYPE_EDGE_BOTH>,
+				<0x10 0x4 IRQ_TYPE_EDGE_BOTH>;
+
+		interrupt-names =       "chg-error",
+					"chg-state-change",
+					"step-chg-state-change",
+					"step-chg-soc-update-fail",
+					"step-chg-soc-update-request";
+	};
+
+	qcom,otg@1100 {
+		reg = <0x1100 0x100>;
+		interrupts =    <0x11 0x0 IRQ_TYPE_EDGE_BOTH>,
+				<0x11 0x1 IRQ_TYPE_EDGE_BOTH>,
+				<0x11 0x2 IRQ_TYPE_EDGE_BOTH>,
+				<0x11 0x3 IRQ_TYPE_EDGE_BOTH>;
+
+		interrupt-names =       "otg-fail",
+					"otg-overcurrent",
+					"otg-oc-dis-sw-sts",
+					"testmode-change-detect";
+	};
+
+	qcom,bat-if@1200 {
+		reg = <0x1200 0x100>;
+		interrupts =    <0x12 0x0 IRQ_TYPE_EDGE_BOTH>,
+				<0x12 0x1 IRQ_TYPE_EDGE_BOTH>,
+				<0x12 0x2 IRQ_TYPE_EDGE_BOTH>,
+				<0x12 0x3 IRQ_TYPE_EDGE_BOTH>,
+				<0x12 0x4 IRQ_TYPE_EDGE_BOTH>,
+				<0x12 0x5 IRQ_TYPE_EDGE_BOTH>;
+
+		interrupt-names =       "bat-temp",
+					"bat-ocp",
+					"bat-ov",
+					"bat-low",
+					"bat-therm-or-id-missing",
+					"bat-terminal-missing";
+	};
+
+	qcom,usb-chgpth@1300 {
+		reg = <0x1300 0x100>;
+		interrupts =    <0x13 0x0 IRQ_TYPE_EDGE_BOTH>,
+				<0x13 0x1 IRQ_TYPE_EDGE_BOTH>,
+				<0x13 0x2 IRQ_TYPE_EDGE_BOTH>,
+				<0x13 0x3 IRQ_TYPE_EDGE_BOTH>,
+				<0x13 0x4 IRQ_TYPE_EDGE_BOTH>,
+				<0x13 0x5 IRQ_TYPE_EDGE_BOTH>,
+				<0x13 0x6 IRQ_TYPE_EDGE_BOTH>,
+				<0x13 0x7 IRQ_TYPE_EDGE_BOTH>;
+
+		interrupt-names =       "usbin-collapse",
+					"usbin-lt-3p6v",
+					"usbin-uv",
+					"usbin-ov",
+					"usbin-plugin",
+					"usbin-src-change",
+					"usbin-icl-change",
+					"type-c-change";
+	};
+
+	qcom,dc-chgpth@1400 {
+		reg = <0x1400 0x100>;
+		interrupts =    <0x14 0x0 IRQ_TYPE_EDGE_BOTH>,
+				<0x14 0x1 IRQ_TYPE_EDGE_BOTH>,
+				<0x14 0x2 IRQ_TYPE_EDGE_BOTH>,
+				<0x14 0x3 IRQ_TYPE_EDGE_BOTH>,
+				<0x14 0x4 IRQ_TYPE_EDGE_BOTH>,
+				<0x14 0x5 IRQ_TYPE_EDGE_BOTH>,
+				<0x14 0x6 IRQ_TYPE_EDGE_BOTH>;
+
+		interrupt-names =       "dcin-collapse",
+					"dcin-lt-3p6v",
+					"dcin-uv",
+					"dcin-ov",
+					"dcin-plugin",
+					"div2-en-dg",
+					"dcin-icl-change";
+	};
+
+	qcom,chgr-misc@1600 {
+		reg = <0x1600 0x100>;
+		interrupts =    <0x16 0x0 IRQ_TYPE_EDGE_BOTH>,
+				<0x16 0x1 IRQ_TYPE_EDGE_BOTH>,
+				<0x16 0x2 IRQ_TYPE_EDGE_BOTH>,
+				<0x16 0x3 IRQ_TYPE_EDGE_BOTH>,
+				<0x16 0x4 IRQ_TYPE_EDGE_BOTH>,
+				<0x16 0x5 IRQ_TYPE_EDGE_BOTH>,
+				<0x16 0x6 IRQ_TYPE_EDGE_BOTH>,
+				<0x16 0x7 IRQ_TYPE_EDGE_BOTH>;
+
+		interrupt-names =       "wdog-snarl",
+					"wdog-bark",
+					"aicl-fail",
+					"aicl-done",
+					"high-duty-cycle",
+					"input-current-limiting",
+					"temperature-change",
+					"switcher-power-ok";
+	};
+};
diff --git a/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt b/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
index 6122f6e..8efa85d 100644
--- a/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
@@ -185,6 +185,40 @@
 	Definition: The initial temp band considering 0-based index at which
 		    the baseline target quotients are derived and fused.
 
+- qcom,cpr-acd-adj-down-step-limit
+	Usage:      required if qcom,cpr-acd-avg-enable is specified.
+	Value type: <u32>
+	Definition: The maximum number of PMIC steps to go down within a given
+		    corner due to all ACD adjustment recommendations. Valid
+		    values are 0 through 31.
+
+- qcom,cpr-acd-adj-up-step-limit
+	Usage:      required if qcom,cpr-acd-avg-enable is specified.
+	Value type: <u32>
+	Definition: The maximum number of PMIC steps to go up within a given
+		    corner due to all ACD adjustment recommendations. Valid
+		    values are 0 through 7
+
+- qcom,cpr-acd-adj-down-step-size
+	Usage:      required if qcom,cpr-acd-avg-enable is specified.
+	Value type: <u32>
+	Definition: Defines the step size in units of PMIC steps used for
+		    target quotient adjustment due to an ACD down recommendation.
+		    Valid values are 0 through 3.
+
+- qcom,cpr-acd-adj-up-step-size
+	Usage:      required if qcom,cpr-acd-avg-enable is specified.
+	Value type: <u32>
+	Definition: Defines the step size in units of PMIC steps used for
+		    target quotient adjustment due to an ACD up recommendation.
+		    Valid values are 0 through 3.
+
+- qcom,cpr-acd-avg-enable
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates that the CPRh controller
+		    should enable the ACD AVG feature.
+
 =================================================
 Second Level Nodes - CPR Threads for a Controller
 =================================================
diff --git a/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt b/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt
index d08ca95..c9cfc88 100644
--- a/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt
@@ -149,6 +149,8 @@
 					already. If it it not specified, then
 					output voltage can be configured to
 					any value in the allowed limit.
+- qcom,notify-lab-vreg-ok-sts:		A boolean property which upon set will
+					poll and notify the lab_vreg_ok status.
 
 Following properties are available only for PM660A:
 
diff --git a/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt b/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt
index 8b3a38da0..63da8ec 100644
--- a/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt
@@ -26,6 +26,13 @@
 	Value type: <prop-encoded-array>
 	Definition:  Base address of the LCDB SPMI peripheral.
 
+- qcom,force-module-reenable
+	Usage:      required if using SW mode for module enable
+	Value type:  <bool>
+	Definition: This enables the workaround to force enable
+		    the vph_pwr_2p5_ok signal required for
+		    turning on the LCDB module.
+
 Touch-to-wake (TTW) properties:
 
 TTW supports 2 modes of operation - HW and SW. In the HW mode the enable/disable
@@ -59,7 +66,6 @@
 	Definition: ON time (in mS) for the VDISP/VDISN signals.
 		    Possible values are 4, 8, 16, 32.
 
-
 ========================================
 Second Level Nodes - LDO/NCP/BOOST block
 ========================================
diff --git a/Documentation/devicetree/bindings/regulator/qpnp-oledb-regulator.txt b/Documentation/devicetree/bindings/regulator/qpnp-oledb-regulator.txt
index 5d80a04..38f599b 100644
--- a/Documentation/devicetree/bindings/regulator/qpnp-oledb-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qpnp-oledb-regulator.txt
@@ -44,12 +44,12 @@
 	Value type: <bool>
 	Definition: Enables the voltage programming through SWIRE signal.
 
- qcom,ext-pin-control
+- qcom,ext-pin-control
 	Usage:      optional
 	Value type: <bool>
 	Definition: Configures the OLED module to be enabled by a external pin.
 
- qcom,dynamic-ext-pinctl-config
+- qcom,dynamic-ext-pinctl-config
 	Usage:      optional
 	Value type: <bool>
 	Definition:  Used to dynamically enable/disable the OLEDB module
@@ -57,13 +57,27 @@
 		     rail.  This property is applicable only if qcom,ext-pin-ctl
 		     property is specified and it is specific to PM660A.
 
- qcom,pbs-control
+- qcom,force-pd-control
+	Usage:      optional
+	Value type: <bool>
+	Definition:  Used to enable the pull down control forcibly via SPMI by
+		     disabling the pull down configuration done by hardware
+		     automatically through SWIRE pulses.
+
+- qcom,pbs-client
+	Usage:      optional
+	Value type: <phandle>
+	Definition:  Used to send the PBS trigger to the specified PBS client.
+		     This property is applicable only if qcom,force-pd-control
+		     property is specified.
+
+- qcom,pbs-control
 	Usage:      optional
 	Value type: <bool>
 	Definition: PMIC PBS logic directly configures the output voltage update
 		    and pull down control.
 
- qcom,oledb-init-voltage-mv
+- qcom,oledb-init-voltage-mv
 	Usage:      optional
 	Value type: <u32>
 	Definition: Sets the AVDD bias voltage (in mV) when the module is
@@ -71,53 +85,53 @@
 		    property is not specified. Supported values are from 5.0V
 		    to 8.1V with a step of 100mV.
 
-qcom,oledb-default-voltage-mv
+- qcom,oledb-default-voltage-mv
 	Usage:      optional
 	Value type: <u32>
 	Definition: Sets the default AVDD bias voltage (in mV) before module
 		    enable. Supported values are from 5.0V to 8.1V with the
 		    step of 100mV.
 
-qcom,bias-gen-warmup-delay-ns
+- qcom,bias-gen-warmup-delay-ns
 	Usage:      optional
 	Value type: <u32>
 	Definition: Bias generator warm-up time (ns). Supported values are
 		    6700, 13300, 267000, 534000.
 
-qcom,peak-curr-limit-ma
+- qcom,peak-curr-limit-ma
 	Usage:      optional
 	Value type: <u32>
 	Definition: Peak current limit (in mA). Supported values are 115, 265,
 		    415, 570, 720, 870, 1020, 1170.
 
-qcom,pull-down-enable
+- qcom,pull-down-enable
 	Usage:      optional
 	Value type: <u32>
 	Definition: Pull down configuration of OLEDB.
 		    1 - Enable pull-down
 		    0 - Disable pull-down
 
-qcom,negative-curr-limit-enable
+- qcom,negative-curr-limit-enable
 	Usage:      optional
 	Value type: <u32>
 	Definition: negative current limit enable/disable.
 			1 = enable negative current limit
 			0 = disable negative current limit
 
-qcom,negative-curr-limit-ma
+- qcom,negative-curr-limit-ma
 	Usage:      optional
 	Value type: <u32>
 	Definition: Negative current limit (in mA). Supported values are
 		    170, 300, 420, 550.
 
-qcom,enable-short-circuit
+- qcom,enable-short-circuit
 	Usage:      optional
 	Value type: <u32>
 	Definition: Short circuit protection enable/disable.
 			1 = enable short circuit protection
 			0 = disable short circuit protection
 
-qcom,short-circuit-dbnc-time
+- qcom,short-circuit-dbnc-time
 	usage:      optional
 	Value type: <u32>
 	Definitioan: Short circuit debounce time (in Fsw). Supported
@@ -126,26 +140,26 @@
 Fast precharge properties:
 -------------------------
 
-qcom,fast-precharge-ppulse-enable
+- qcom,fast-precharge-ppulse-enable
 	usage:      optional
 	Value type: <u32>
 	Definitioan: Fast precharge pfet pulsing enable/disable.
 			1 = enable fast precharge pfet pulsing
 			0 = disable fast precharge pfet pulsing
 
-qcom,precharge-debounce-time-ms
+- qcom,precharge-debounce-time-ms
 	usage:      optional
 	Value type: <u32>
 	Definitioan: Fast precharge debounce time (in ms). Supported
 		     values are 1, 2, 4, 8.
 
-qcom,precharge-pulse-period-us
+- qcom,precharge-pulse-period-us
 	usage:      optional
 	Value type: <u32>
 	Definitioan: Fast precharge pulse period (in us). Supported
 		     values are 3, 6, 9, 12.
 
-qcom,precharge-pulse-on-time-us
+- qcom,precharge-pulse-on-time-us
 	usage:      optional
 	Value type: <u32>
 	Definitioan: Fast precharge pulse on time (in ns). Supported
@@ -154,20 +168,20 @@
 Pulse Skip Modulation (PSM) properties:
 --------------------------------------
 
-qcom,psm-enable
+- qcom,psm-enable
 	Usage:      optional
 	Value type: <u32>
 	Definition: Pulse Skip Modulation mode.
 		    1 - Enable PSM mode
 		    0 - Disable PSM mode
 
-qcom,psm-hys-mv
+- qcom,psm-hys-mv
 	Usage:      optional
 	Value type: <u32>
 	Definition: PSM hysterysis voltage (in mV).
 		    Supported values are 13mV and 26mV.
 
-qcom,psm-vref-mv
+- qcom,psm-vref-mv
 	Usage:      optional
 	Value type: <u32>
 	Definition: Reference voltage(in mV) control for PSM comparator.
@@ -177,26 +191,26 @@
 Pulse Frequency Modulation (PFM) properties:
 -------------------------------------------
 
-qcom,pfm-enable
+- qcom,pfm-enable
 	Usage:      optional
 	Value type: <u32>
 	Definition: Pulse Frequency Modulation mode.
 		    1 - Enable PFM mode
 		    0 - Disable PFM mode
 
-qcom,pfm-hys-mv
+- qcom,pfm-hys-mv
 	Usage:      optional
 	Value type: <u32>
 	Definition: PFM hysterysis voltage (in mV).
 		    Supported values are 13mV and 26mV.
 
-qcom,pfm-curr-limit-ma
+- qcom,pfm-curr-limit-ma
 	Usage:      optional
 	Value type: <u32>
 	Definition: PFM current limit (in mA).
 		    Supported values are 130, 200, 270, 340.
 
-qcom,pfm-off-time-ns
+- qcom,pfm-off-time-ns
 	Usage:      optional
 	Value type: <u32>
 	Definition: NFET off time at PFM (in ns).
diff --git a/Documentation/devicetree/bindings/soc/qcom/avtimer.txt b/Documentation/devicetree/bindings/soc/qcom/avtimer.txt
index 157f79b..7c70b1e 100644
--- a/Documentation/devicetree/bindings/soc/qcom/avtimer.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/avtimer.txt
@@ -13,8 +13,16 @@
 - compatible : Must be "qcom,avtimer"
 
 Optional properties:
-- clk-div : The clk is at 27MHz and hence ticks are to be
- divided by 27 to achive the msec value.
+- clk-div : Divisor to divide the ticks value to get msec value.
+ If the clock is at 27MHz, the ticks value read from AVTimer
+ registers will have to be divided by 27, to achieve the msec value.
+- clk-mult : Multiplier to multiply the ticks value in order to avoid
+ a floating point operation if the clock is of decimal value.
+ E.g. To get msec out of ticks from a 19.2MHz clock source, the ticks
+ value will have to be divided by 19.2, which will then become a
+ floating point operation. However, to avoid using a floating point
+ operation, the msec can be calculated by multiplying ticks with 10
+ and dividing the result by 192. i.e. msec = (ticks * 10) / 192;
 
 Example:
 	qcom,avtimer@90f7000 {
@@ -23,4 +31,5 @@
 		      <0x90f7010 0x4>;
 		reg-names = "avtimer_lsb_addr", "avtimer_msb_addr";
 		qcom,clk-div = <27>;
+		qcom,clk-mult = <10>;
 	};
diff --git a/Documentation/devicetree/bindings/soc/qcom/qpnp-pbs.txt b/Documentation/devicetree/bindings/soc/qcom/qpnp-pbs.txt
new file mode 100644
index 0000000..d7aefbf
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qpnp-pbs.txt
@@ -0,0 +1,30 @@
+QPNP PBS
+
+QPNP (Qualcomm Technologies, Inc. Plug N Play) PBS is programmable boot sequence
+and this driver is for helping the client drivers triggering such sequence
+to be configured in PMIC.
+
+This document describes the bindings for QPNP PBS driver.
+
+=======================
+Required Node Structure
+=======================
+
+- compatible
+	Usage:      required
+	Value type: <string>
+	Definition: should be "qcom,qpnp-pbs".
+
+- reg
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition:  Base address of the PBS registers.
+
+
+=======
+Example
+=======
+	pm660l_pbs: qcom,pbs@7300 {
+		compatible = "qcom,qpnp-pbs";
+		reg = <0x7300 0x100>;
+	};
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 3ff3b2f..e0ab31f 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -181,6 +181,11 @@
 
  - compatible : "qcom,msm-pcm-loopback"
 
+Optional properties:
+
+ - qcom,msm-pcm-loopback-low-latency : Flag indicating whether
+   the device node is of type low latency.
+
 * msm-dai-q6
 
 [First Level Nodes]
@@ -365,6 +370,10 @@
 
  - compatible : "qcom,msm-cdc-pinctrl"
 
+Optional properties:
+ - qcom,lpi-gpios : This boolean property is added if GPIOs are under
+		    LPI TLMM.
+
 * msm-dai-slim
 
 Required properties:
@@ -421,6 +430,11 @@
 		qcom,msm-pcm-low-latency;
 	};
 
+	qcom,msm-pcm-loopback-low-latency {
+		compatible = "qcom,msm-pcm-loopback";
+		qcom,msm-pcm-loopback-low-latency;
+	};
+
         qcom,msm-pcm-routing {
                 compatible = "qcom,msm-pcm-routing";
         };
@@ -829,17 +843,6 @@
 Required properties:
 - compatible : "qcom,msm8974-audio-taiko"
 - qcom,model : The user-visible name of this sound card.
-- reg : Offset and length of the register region(s) for MI2S/PCM MUX
-- reg-names : Register region name(s) referenced in reg above
-	 Required register resource entries are:
-	 "lpaif_pri_mode_muxsel": Physical address of MUX to select between
-				  Primary PCM and Primary MI2S
-	 "lpaif_sec_mode_muxsel": Physical address of MUX to select between
-				  Secondary PCM and Secondary MI2S
-	 "lpaif_tert_mode_muxsel": Physical address of MUX to select between
-				   Primary PCM and Tertiary MI2S
-	 "lpaif_quat_mode_muxsel": Physical address of MUX to select between
-				   Secondary PCM and Quarternary MI2S
 - qcom,audio-routing : A list of the connections between audio components.
   Each entry is a pair of strings, the first being the connection's sink,
   the second being the connection's source.
@@ -877,6 +880,19 @@
 		    codec dai names should match to that of the phandle order given
 		    in "asoc-codec".
 Optional properties:
+- reg : Offset and length of the register region(s) for MI2S/PCM MUX.
+	Not applicable for all targets.
+- reg-names : Register region name(s) referenced in reg above.
+	      Not applicable for all targets.
+	 Required register resource entries are:
+	 "lpaif_pri_mode_muxsel": Physical address of MUX to select between
+				  Primary PCM and Primary MI2S
+	 "lpaif_sec_mode_muxsel": Physical address of MUX to select between
+				  Secondary PCM and Secondary MI2S
+	 "lpaif_tert_mode_muxsel": Physical address of MUX to select between
+				   Primary PCM and Tertiary MI2S
+	 "lpaif_quat_mode_muxsel": Physical address of MUX to select between
+				   Secondary PCM and Quarternary MI2S
 - qcom,hdmi-audio-rx: specifies if HDMI audio support is enabled or not.
 - qcom,ext-ult-spk-amp-gpio : GPIO for enabling of speaker path amplifier.
 
@@ -1510,10 +1526,10 @@
 		asoc-wsa-codec-prefixes = "SpkrMono";
 	};
 
-* MSMFALCON ASoC Machine driver
+* SDM660 ASoC Machine driver
 
 Required properties:
-- compatible : "qcom,msmfalcon-asoc-snd"
+- compatible : "qcom,sdm660-asoc-snd"
 - qcom,model : The user-visible name of this sound card.
 - qcom,msm-hs-micbias-type : This property is used to recognize the headset
   micbias type, internal or external.
@@ -1539,6 +1555,7 @@
 capacitor mode.
 - qcom,msm-micbias2-ext-cap : Boolean. Enable micbias2 external
 capacitor mode.
+- qcom,wsa-disable : Boolean. Disables WSA speaker dailinks from sound node.
 - qcom,msm-spk-ext-pa : GPIO which enables external speaker pa.
 - qcom,msm-mclk-freq : This property is used to inform machine driver about
 mclk frequency needs to be configured for internal and external PA.
@@ -1559,11 +1576,19 @@
 - qcom,wsa-max-devs : Maximum number of WSA881x devices present in the target
 - qcom,wsa-devs : List of phandles for all possible WSA881x devices supported for the target
 - qcom,wsa-aux-dev-prefix : Name prefix with Left/Right configuration for WSA881x device
+- qcom,cdc-pdm-gpios : phandle for pdm gpios.
+- qcom,cdc-comp-gpios : phandle for compander gpios.
+- qcom,cdc-dmic-gpios : phandle for Digital mic clk and data gpios.
+- qcom,cdc-sdw-gpios : phandle for soundwire clk and data gpios.
+- qcom,msm-mbhc-moist-cfg: This property is used to set moisture detection
+		threshold values for different codecs. First parameter is V(voltage)
+		second one is i(current), third one is r (resistance). Depending on the
+		codec set corresponding element in array and set others to 0.
 
 Example:
 	 sound {
-		compatible = "qcom,msmfalcon-asoc-snd";
-		qcom,model = "msmfalcon-snd-card";
+		compatible = "qcom,sdm660-asoc-snd";
+		qcom,model = "sdm660-snd-card";
 		qcom,msm-mclk-freq = <9600000>;
 		qcom,msm-mbhc-hphl-swh = <0>;
 		qcom,msm-mbhc-gnd-swh = <0>;
@@ -1579,24 +1604,11 @@
 			"AMIC1", "MIC BIAS External",
 			"AMIC2", "MIC BIAS Internal2",
 			"AMIC3", "MIC BIAS External";
-		qcom,msm-gpios =
-			"int_pdm",
-			"us_eu_gpio";
-		qcom,pinctrl-names =
-			"all_off",
-			"int_pdm_act",
-			"us_eu_gpio_act",
-			"int_pdm_us_eu_gpio_act";
-		pinctrl-names =
-			"all_off",
-			"int_pdm_act",
-			"us_eu_gpio_act",
-			"int_pdm_us_eu_gpio_act";
-		pinctrl-0 = <&cdc_pdm_lines_sus &cdc_pdm_lines_2_sus &cross_conn_det_sus>;
-		pinctrl-1 = <&cdc_pdm_lines_act &cdc_pdm_lines_2_act &cross_conn_det_sus>;
-		pinctrl-2 = <&cdc_pdm_lines_sus &cdc_pdm_lines_2_sus &cross_conn_det_act>;
-		pinctrl-3 = <&cdc_pdm_lines_act &cdc_pdm_lines_2_act &cross_conn_det_act>;
 		qcom,cdc-us-euro-gpios = <&msm_gpio 63 0>;
+		qcom,cdc-pdm-gpios = <&cdc_pdm_gpios>;
+		qcom,cdc-comp-gpios = <&cdc_comp_gpios>;
+		qcom,cdc-dmic-gpios = <&cdc_dmic_gpios>;
+		qcom,cdc-sdw-gpios = <&cdc_sdw_gpios>;
 		asoc-platform = <&pcm0>, <&pcm1>, <&voip>, <&voice>,
 				<&loopback>, <&compress>, <&hostless>,
 				<&afe>, <&lsm>, <&routing>, <&lpa>;
@@ -1681,6 +1693,10 @@
 - qcom,msm-mi2s-master: This property is used to inform machine driver
   if MSM is the clock master of mi2s. 1 means master and 0 means slave. The
   first entry is primary mi2s; the second entry is secondary mi2s, and so on.
+- qcom,msm-mi2s-ext-mclk: This property is used to inform machine driver
+  if MCLK from MSM is used for any external audio connections. 1 means used
+  as external mclk source and 0 indicate not used. The first entry is
+  primary mclk; the second entry is secondary mclk, and so on.
 - reg: This property provides the AUX PCM/MI2S mux select register addresses
   and size.
 - reg_names: This property provides the name of the AUX PCM/MI2S mux select
@@ -1721,6 +1737,7 @@
 		qcom,mi2s-audio-intf;
 		qcom,auxpcm-audio-intf;
 		qcom,msm-mi2s-master = <1>, <0>, <1>, <1>;
+		qcom,msm-mi2s-ext-mclk = <1>, <1>, <0>, <1>;
 		reg = <0x1711a000 0x4>,
 		      <0x1711b000 0x4>,
 		      <0x1711c000 0x4>,
@@ -1732,6 +1749,8 @@
 		qcom,msm-mclk-freq = <9600000>;
 		qcom,msm-mbhc-hphl-swh = <0>;
 		qcom,msm-mbhc-gnd-swh = <0>;
+		qcom,wsa-disable;
+		qcom,msm-mbhc-moist-cfg = <1>, <3>, <0>;
 		qcom,msm-hs-micbias-type = "internal";
 		qcom,msm-micbias1-ext-cap;
 		qcom,audio-routing =
@@ -2020,13 +2039,15 @@
 
 		asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
 				<&loopback>, <&compress>, <&hostless>,
-				<&afe>, <&lsm>, <&routing>, <&compr>;
+				<&afe>, <&lsm>, <&routing>, <&compr>,
+				<&loopback1>;
 		asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
 				"msm-pcm-dsp.2", "msm-voip-dsp",
 				"msm-pcm-voice", "msm-pcm-loopback",
 				"msm-compress-dsp", "msm-pcm-hostless",
 				"msm-pcm-afe", "msm-lsm-client",
-				"msm-pcm-routing", "msm-compr-dsp";
+				"msm-pcm-routing", "msm-compr-dsp",
+				"msm-pcm-loopback.1";
 		asoc-cpu = <&dai_pri_auxpcm>, <&dai_sec_auxpcm>, <&dai_hdmi>,
 				<&dai_mi2s>, <&dai_mi2s_quat>,
 				<&afe_pcm_rx>, <&afe_pcm_tx>,
@@ -2060,11 +2081,11 @@
 		asoc-codec-names = "msm-stub-codec.1";
 	};
 
-* MSMFALCON ASoC Slimbus Machine driver
+* SDM660 ASoC Slimbus Machine driver
 
 Required properties:
-- compatible : "qcom,msmfalcon-asoc-snd-tasha" for tasha codec,
-		"qcom,msmfalcon-asoc-snd-tavil" for tavil codec.
+- compatible : "qcom,sdm660-asoc-snd-tasha" for tasha codec,
+		"qcom,sdm660-asoc-snd-tavil" for tavil codec.
 - qcom,model : The user-visible name of this sound card.
 - qcom,msm-mclk-freq : MCLK frequency value for external codec
 - qcom,msm-gpios : Lists down all the gpio sets that are supported.
@@ -2104,8 +2125,8 @@
 Example:
 
 	sound-9335 {
-	compatible = "qcom,msmfalcon-asoc-snd-tasha";
-	qcom,model = "msmfalcon-tasha-snd-card";
+	compatible = "qcom,sdm660-asoc-snd-tasha";
+	qcom,model = "sdm660-tasha-snd-card";
 
 	qcom,audio-routing =
 		"RX_BIAS", "MCLK",
@@ -2233,6 +2254,11 @@
 - qcom,wsa-devs : List of phandles for all possible WSA881x devices supported for the target
 - qcom,wsa-aux-dev-prefix : Name prefix with Left/Right configuration for WSA881x device
 - qcom,wcn-btfm : Property to specify if WCN BT/FM chip is used for the target
+- qcom,msm-mbhc-usbc-audio-supported : Property to specify if analog audio feature is
+				       enabled or not.
+- qcom,usbc-analog-en1_gpio : EN1 GPIO to enable USB type-C analog audio
+- qcom,usbc-analog-en2_n_gpio : EN2 GPIO to enable USB type-C analog audio
+- qcom,usbc-analog-force_detect_gpio : Force detect GPIO to enable USB type-C analog audio
 
 Example:
 
@@ -2306,6 +2332,10 @@
 				<&wsa881x_213>, <&wsa881x_214>;
 		qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrLeft",
 					  "SpkrRight", "SpkrLeft";
+		qcom,msm-mbhc-usbc-audio-supported = <1>;
+		qcom,usbc-analog-en1_gpio = <&wcd_usbc_analog_en1_gpio>;
+		qcom,usbc-analog-en2_n_gpio = <&wcd_usbc_analog_en2n_gpio>;
+		qcom,usbc-analog-force_detect_gpio = <&wcd_usbc_analog_f_gpio>;
 	};
 
 * MSMSTUB ASoC Machine driver
diff --git a/Documentation/devicetree/bindings/sound/qcom-usb-audio-qmi-dev.txt b/Documentation/devicetree/bindings/sound/qcom-usb-audio-qmi-dev.txt
new file mode 100644
index 0000000..9d3fb78
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/qcom-usb-audio-qmi-dev.txt
@@ -0,0 +1,26 @@
+QTI USB Audio QMI Device
+
+USB Audio QMI device is used to attach to remote processor IOMMU and
+map USB Audio driver specific memory to iova to share with remote
+processor.
+
+Required Properties:
+
+- compatible : "qcom,usb-audio-qmi-dev"
+
+- iommus : A list of phandle and IOMMU specifier pairs that describe the
+  IOMMU master interfaces of the device.
+
+- qcom,usb-audio-stream-id : Stream id is prepended to iova before passing
+  iova to remote processor. This allows remote processor to access iova.
+
+- qcom,usb-audio-intr-num : Interrupter number for external sub system
+  destination.
+
+Example:
+	usb_audio_qmi_dev {
+		compatible = "qcom,usb-audio-qmi-dev";
+		iommus = <&lpass_q6_smmu 12>;
+		qcom,usb-audio-stream-id = <12>;
+		qcom,usb-audio-intr-num = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/sound/wcd_codec.txt b/Documentation/devicetree/bindings/sound/wcd_codec.txt
index c0a7a24..0df9417 100644
--- a/Documentation/devicetree/bindings/sound/wcd_codec.txt
+++ b/Documentation/devicetree/bindings/sound/wcd_codec.txt
@@ -384,8 +384,8 @@
 Tombak audio CODEC in SPMI mode
 
  - compatible = "qcom,msm-codec-core",
- - compatible = "qcom,pmic-codec-digital"
- - compatible = "qcom,pmic-codec-analog"
+ - compatible = "qcom,msm-digital-codec"
+ - compatible = "qcom,pmic-analog-codec"
  - reg: represents the slave base address provided to the peripheral.
  - interrupt-parent : The parent interrupt controller.
  - interrupts: List of interrupts in given SPMI peripheral.
@@ -438,19 +438,14 @@
 
 Example:
 
-msm_dig_codec: qcom,msm-int-codec {
-	compatible = "qcom,msm_int_core_codec";
-	qcom,dig-cdc-base-addr = <0xc0f0000>;
+msm_digital_codec: msm-dig-codec@c0f0000 {
+	compatible = "qcom,msm-digital-codec";
+	reg = <0xc0f0000 0x0>;
 };
 
-msm8x16_wcd_codec@f100 {
-	compatible = "qcom,msm_int_pmic_analog_codec";
-	reg = <0xf100 0x100>;
-};
-
-msm8x16_wcd_codec@f000{
-	compatible = "qcom,msm_int_pmic_digital_codec";
-	reg = <0xf000 0x100>;
+pmic_analog_codec: analog-codec@f000 {
+	compatible = "qcom,pmic-analog-codec";
+	reg = <0xf000 0x200>;
 	interrupt-parent = <&spmi_bus>;
 	interrupts = <0x1 0xf0 0x0>,
 		     <0x1 0xf0 0x1>,
@@ -501,7 +496,41 @@
 				   "cdc-vdda-cp";
 
 	qcom,cdc-on-demand-supplies = "cdc-vdd-mic-bias";
-	qcom,dig-cdc-base-addr = <0xc0f0000>;
+};
+
+MSM based Soundwire audio codec
+
+Required properties:
+ - compatible = "qcom,msm-sdw-codec";
+ - reg: Specifies the soundwire codec base address for MSM digital
+	soundwire core registers.
+ - interrupts: Specifies the soundwire master interrupt number to Apps processor.
+ - interrupt-names: Specify the interrupt name from soundwire master.
+ - swr_master: This node is added as a child of MSM soundwire codec
+	       and uses already existing driver soundwire master.
+	       And there is/are subchild node(s) under soundwire master
+	       which is also existing driver WSA881x that represents
+	       soundwire slave devices.
+
+Example:
+
+msm_sdw_codec: qcom,msm-sdw-codec@152c1000 {
+	compatible = "qcom,msm-sdw-codec";
+	reg = <0x152c1000 0x0>;
+	interrupts = <0 161 0>;
+	interrupt-names = "swr_master_irq";
+
+	swr_master {
+		compatible = "qcom,swr-wcd";
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		wsa881x_1: wsa881x@20170212 {
+			compatible = "qcom,wsa881x";
+			reg = <0x00 0x20170212>;
+			qcom,spkr-sd-n-gpio = <&tlmm 80 0>;
+		};
+	};
 };
 
 Tasha audio CODEC in I2C mode
diff --git a/Documentation/devicetree/bindings/thermal/qcom-bcl.txt b/Documentation/devicetree/bindings/thermal/qcom-bcl.txt
new file mode 100644
index 0000000..449cbad
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/qcom-bcl.txt
@@ -0,0 +1,44 @@
+===============================================================================
+BCL PMIC Peripheral driver:
+===============================================================================
+Qualcomm Technologies, Inc's PMIC has battery current limiting peripheral, which can monitor for
+high battery current and low battery voltage in the hardware. The BCL
+peripheral driver interacts with the PMIC peripheral using the SPMI driver
+interface. The hardware can take threshold for notifying for high battery
+current or low battery voltage events.
+
+Required Parameters:
+- compatible: must be
+	'qcom,msm-bcl-lmh' for bcl peripheral with LMH DCVSh interface.
+- reg: <a b> where 'a' is the starting register address of the PMIC
+	peripheral and 'b' is the size of the peripheral address space.
+	If the BCL inhibit current derating feature is enabled, this must also
+	have the PON spare registers as well. Example: <a b c d> where
+	c is the first PON spare register that will be written and d is the
+	size of the registers space needed to be written. Certain version
+	of PMIC, can send interrupt to LMH hardware driver directly. In that
+	case the shadow peripheral address space should be mentioned along
+	with the bcl peripherals address.
+- interrupts: <a b c> Where 'a' is the SLAVE ID of the PMIC, 'b' is
+		the peripheral ID and 'c' is the interrupt number in PMIC.
+- interrupt-names: user defined names for the interrupts. These
+		interrupt names will be used by the drivers to identify the
+		interrupts, instead of specifying the ID's. bcl driver will
+		accept these five standard interrupts.
+		"bcl-low-vbat"
+		"bcl-very-low-vbat"
+		"bcl-crit-low-vbat"
+		"bcl-high-ibat"
+		"bcl-very-high-ibat"
+
+
+Optional Parameters:
+
+		bcl@4200 {
+			compatible = "qcom,msm-bcl";
+			reg = <0x4200 0xFF 0x88e 0x2>;
+			interrupts = <0x2 0x42 0x0>,
+					<0x2 0x42 0x1>;
+			interrupt-names = "bcl-high-ibat-int",
+						"bcl-low-vbat-int";
+		};
diff --git a/Documentation/devicetree/bindings/thermal/qcom-lmh-dcvs.txt b/Documentation/devicetree/bindings/thermal/qcom-lmh-dcvs.txt
new file mode 100644
index 0000000..080d4da
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/qcom-lmh-dcvs.txt
@@ -0,0 +1,41 @@
+Limits Management Hardware - DCVS
+
+The LMH-DCVS block is a hardware IP for every CPU cluster, to handle quick
+changes in thermal limits. The hardware responds to thermal variation amongst
+the CPUs in the cluster by requesting limits on the clock frequency and
+voltage on the OSM hardware.
+
+The LMH DCVS driver exports a virtual sensor that can be used to set the
+thermal limits on the hardware. LMH DCVS driver can be a platform CPU Cooling
+device, which registers with the CPU cooling device interface. All CPU device
+nodes should reference the corresponding LMH DCVS hardware in device tree.
+CPUs referencing the same LMH DCVS node will be associated with the
+corresponding cooling device as related CPUs.
+
+Properties:
+
+- compatible:
+	Usage: required
+	Value type: <string>
+	Definition: shall be "qcom,msm-hw-limits"
+- interrupts:
+	Usage: required
+	Value type: <interrupt_type interrupt_number interrupt_trigger_type>
+	Definition: Should specify interrupt information about the debug
+			interrupt generated by the LMH DCVSh hardware. LMH
+			DCVSh hardware will generate this interrupt whenever
+			it makes a new cpu DCVS decision.
+
+Example:
+
+	lmh_dcvs0: qcom,limits-dcvs@0 {
+		compatible = "qcom,msm-hw-limits";
+		interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	CPU0: cpu@0 {
+		device_type = "cpu";
+		compatible = "arm,armv8";
+		reg = <0x0 0x0>;
+		qcom,lmh-dcvs = <&lmh_dcvs0>;;
+	};
diff --git a/Documentation/devicetree/bindings/thermal/thermal.txt b/Documentation/devicetree/bindings/thermal/thermal.txt
index 88b6ea1..123a65b 100644
--- a/Documentation/devicetree/bindings/thermal/thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/thermal.txt
@@ -168,6 +168,18 @@
 			by means of sensor ID. Additional coefficients are
 			interpreted as constant offset.
 
+- thermal-governor:     Thermal governor to be used for this thermal zone.
+			Expected values are:
+			"step_wise": Use step wise governor.
+			"fair_share": Use fair share governor.
+			"user_space": Use user space governor.
+			"power_allocator": Use power allocator governor.
+			"low_limits_floor": Use low limits floor
+						mitigation governor.
+			"low_limits_cap": Use a low limits cap mitigation
+						governor.
+  Type: string
+
 - sustainable-power:	An estimate of the sustainable power (in mW) that the
   Type: unsigned	thermal zone can dissipate at the desired
   Size: one cell	control temperature.  For reference, the
@@ -175,6 +187,11 @@
 			2000mW, while on a 10'' tablet is around
 			4500mW.
 
+- tracks-low:		Indicates that the temperature sensor tracks the low
+  Type: bool		thresholds, so the governors may mitigate by ensuring
+			timing closures and other low temperature operating
+			issues.
+
 Note: The delay properties are bound to the maximum dT/dt (temperature
 derivative over time) in two situations for a thermal zone:
 (i)  - when passive cooling is activated (polling-delay-passive); and
diff --git a/Documentation/devicetree/bindings/thermal/tsens.txt b/Documentation/devicetree/bindings/thermal/tsens.txt
new file mode 100644
index 0000000..1065456
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/tsens.txt
@@ -0,0 +1,54 @@
+Qualcomm Technologies, Inc. TSENS driver
+
+Temperature sensor (TSENS) driver supports reading temperature from sensors
+across the MSM. The driver defaults to support a 12 bit ADC.
+
+The driver uses the Thermal sysfs framework to provide thermal
+clients the ability to read from supported on-die temperature sensors,
+set temperature thresholds for cool/warm thresholds and receive notification
+on temperature threshold events.
+
+TSENS node
+
+Required properties:
+- compatible : should be "qcom,msm8996-tsens" for 8996 TSENS driver.
+	       should be "qcom,msm8953-tsens" for 8953 TSENS driver.
+	       should be "qcom,msm8998-tsens" for 8998 TSENS driver.
+	       should be "qcom,msmhamster-tsens" for hamster TSENS driver.
+	       should be "qcom,sdm660-tsens" for 660 TSENS driver.
+	       should be "qcom,sdm630-tsens" for 630 TSENS driver.
+	       should be "qcom,sdm845-tsens" for SDM845 TSENS driver.
+	       The compatible property is used to identify the respective controller to use
+	       for the corresponding SoC.
+- reg : offset and length of the TSENS registers with associated property in reg-names
+	as "tsens_physical" for TSENS TM physical address region.
+- reg-names : resource names used for the physical address of the TSENS
+	      registers. Should be "tsens_physical" for physical address of the TSENS.
+- interrupts : TSENS interrupt to notify Upper/Lower and Critical temperature threshold.
+- interrupt-names: Should be "tsens-upper-lower" for temperature threshold.
+		   Add "tsens-critical" for Critical temperature threshold notification
+		   in addition to "tsens-upper-lower" for 8996 TSENS since
+		   8996 supports Upper/Lower and Critical temperature threshold.
+- qcom,sensors : Total number of available Temperature sensors for TSENS.
+
+Optional properties:
+- qcom,sensor-id : If the flag is present map the TSENS sensors based on the
+		remote sensors that are enabled in HW. Ensure the mapping is not
+		more than the number of supported sensors.
+- qcom,client-id : If the flag is present use it to identify the SW ID mapping
+		used to associate it with the controller and the physical sensor
+		mapping within the controller. The physical sensor mapping within
+		each controller is done using the qcom,sensor-id property. If the
+		property is not present the SW ID mapping with default from 0 to
+		total number of supported sensors with each controller instance.
+
+Example:
+
+tsens@fc4a8000 {
+	compatible = "qcom,msm-tsens";
+	reg = <0xfc4a8000 0x2000>;,
+	reg-names = "tsens_physical";
+	interrupts = <0 184 0>;
+	interrupt-names = "tsens-upper-lower";
+	qcom,sensors = <11>;
+};
diff --git a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
index af1ba92..af754fe 100644
--- a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
+++ b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
@@ -7,11 +7,12 @@
 contain a phandle reference to UFS PHY node.
 
 Required properties:
-- compatible        : compatible list, contains one of the following:
+- compatible        : compatible list, contains one of the following
+		      according to the relevant phy in use:
 		      "qcom,ufs-phy-qmp-14nm"
 		      "qcom,ufs-phy-qmp-v3"
 		      "qcom,ufs-phy-qrbtc-sdm845"
-according to the relevant phy in use.
+		      "qcom,ufs-phy-qmp-v3-660"
 - reg               : should contain PHY register address space (mandatory),
 - reg-names         : indicates various resources passed to driver (via reg proptery) by name.
                       Required "reg-names" is "phy_mem".
@@ -27,11 +28,12 @@
 Optional properties:
 - vdda-phy-max-microamp : specifies max. load that can be drawn from phy supply
 - vdda-pll-max-microamp : specifies max. load that can be drawn from pll supply
-- vddp-ref-clk-supply   : phandle to UFS device ref_clk pad power supply
-- vddp-ref-clk-max-microamp : specifies max. load that can be drawn from this supply
-- vddp-ref-clk-always-on : specifies if this supply needs to be kept always on
 - qcom,disable-lpm : disable various LPM mechanisms in UFS for platform compatibility
   (limit link to PWM Gear-1, 1-lane slow mode; disable hibernate, and avoid suspend/resume)
+- lanes-per-direction:	number of lanes available per direction - either 1 or 2.
+			Note that it is assumed that same number of lanes is
+			used both directions at once.
+			If not specified, default is 2 lanes per direction.
 
 Example:
 
@@ -40,6 +42,7 @@
 		reg = <0xfc597000 0x800>;
 		reg-names = "phy_mem";
 		#phy-cells = <0>;
+		lanes-per-direction = <1>;
 		vdda-phy-supply = <&pma8084_l4>;
 		vdda-pll-supply = <&pma8084_l12>;
 		vdda-phy-max-microamp = <50000>;
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index 81c74c5..958194b 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -13,6 +13,9 @@
 - reg               : <registers mapping>
 		      first entry should contain UFS host controller register address space (mandatory),
                       second entry is the device ref. clock control register map (optional).
+- reset             : reset specifier pair consists of phandle for the reset provider
+                      and reset lines used by this controller.
+- reset-names       : reset signal name strings sorted in the same order as the resets property.
 
 Optional properties:
 - phys                  : phandle to UFS PHY node
@@ -52,6 +55,8 @@
 - lanes-per-direction:	number of lanes available per direction - either 1 or 2.
 			Note that it is assume same number of lanes is used both directions at once.
 			If not specified, default is 2 lanes per direction.
+- pinctrl-names, pinctrl-0, pinctrl-1,.. pinctrl-n: Refer to "Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt"
+			for these optional properties
 - limit-tx-hs-gear	: Specify the max. limit on the TX HS gear.
 			  Valid range: 1-3. 1 => HS-G1, 2 => HS-G2, 3 => HS-G3
 - limit-rx-hs-gear	: Specify the max. limit on the RX HS gear. Refer "limit-tx-hs-gear" for expected values.
@@ -89,6 +94,8 @@
 		clocks = <&core 0>, <&ref 0>, <&iface 0>;
 		clock-names = "core_clk", "ref_clk", "iface_clk";
 		freq-table-hz = <100000000 200000000>, <0 0>, <0 0>;
+		resets = <clock_gcc GCC_UFS_BCR>;
+		reset-names = "core_reset";
 		phys = <&ufsphy1>;
 		phy-names = "ufsphy";
 		rpm-level = <3>;
@@ -146,6 +153,9 @@
 - qcom,pm-qos-default-cpu:		PM QoS voting is based on the cpu associated with each IO request by the block layer.
 					This defined the default cpu used for PM QoS voting in case a specific cpu value is not available.
 
+- qcom,vddp-ref-clk-supply	 : reference clock to ufs device. Controlled by the host driver.
+- qcom,vddp-ref-clk-max-microamp : specifies max. load that can be drawn for
+				   ref-clk supply.
 Example:
 	ufshc@0xfc598000 {
 		...
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index 4a81034..609d853 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -56,6 +56,8 @@
 	fladj_30mhz_sdbnd signal is invalid or incorrect.
  - snps,disable-clk-gating: If present, disable controller's internal clock
 	gating. Default it is enabled.
+ - snps,xhci-imod-value: Interrupt moderation interval for host mode
+	(in increments of 250nsec).
 
 This is usually a subnode to DWC3 glue to which it is connected.
 
@@ -65,4 +67,5 @@
 	interrupts = <0 92 4>
 	usb-phy = <&usb2_phy>, <&usb3,phy>;
 	tx-fifo-resize;
+	snps,xhci-imod-value = <4000>;
 };
diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt
index ad4adf0..8e5782a 100644
--- a/Documentation/devicetree/bindings/usb/msm-phy.txt
+++ b/Documentation/devicetree/bindings/usb/msm-phy.txt
@@ -43,7 +43,7 @@
  - clocks: a list of phandles to the PHY clocks. Use as per
    Documentation/devicetree/bindings/clock/clock-bindings.txt
  - clock-names: Names of the clocks in 1-1 correspondence with the "clocks"
-   property. "cfg_ahb_clk" is an optional clock.
+   property. "cfg_ahb_clk" and "com_aux_clk" are an optional clocks.
  - qcom,vbus-valid-override: If present, indicates VBUS pin is not connected to
    the USB PHY and the controller must rely on external VBUS notification in
    order to manually relay the notification to the SSPHY.
@@ -91,6 +91,10 @@
 	"vdd" : vdd supply for digital circuit operation
 	"vdda18" : 1.8v high-voltage analog supply
 	"vdda33" : 3.3v high-voltage analog supply
+ - clocks: a list of phandles to the PHY clocks. Use as per
+   Documentation/devicetree/bindings/clock/clock-bindings.txt
+ - clock-names: Names of the clocks in 1-1 correspondence with the "clocks"
+   property. "ref_clk_src" is a mandatory clock.
  - qcom,vdd-voltage-level: This property must be a list of three integer
    values (no, min, max) where each value represents either a voltage in
    microvolts or a value corresponding to voltage corner
@@ -102,22 +106,21 @@
 
 Optional properties:
  - reg-names: Additional registers corresponding with the following:
-   "tune2_efuse_addr": EFUSE based register address to read TUNE2 parameter.
-   via the QSCRATCH interface.
+   "efuse_addr": EFUSE address to read and update analog tune parameter.
    "emu_phy_base" : phy base address used for programming emulation target phy.
    "ref_clk_addr" : ref_clk bcr address used for on/off ref_clk before reset.
  - clocks: a list of phandles to the PHY clocks. Use as per
    Documentation/devicetree/bindings/clock/clock-bindings.txt
  - clock-names: Names of the clocks in 1-1 correspondence with the "clocks"
-   property. "cfg_ahb_clk", "ref_clk_src" and "ref_clk" are optional clocks.
+   property. "cfg_ahb_clk" and "ref_clk" are optional clocks.
  - qcom,qusb-phy-init-seq: QUSB PHY initialization sequence with value,reg pair.
  - qcom,qusb-phy-host-init-seq: QUSB PHY initialization sequence for host mode
    with value,reg pair.
  - qcom,emu-init-seq : emulation initialization sequence with value,reg pair.
  - qcom,phy-pll-reset-seq : emulation PLL reset sequence with value,reg pair.
  - qcom,emu-dcm-reset-seq : emulation DCM reset sequence with value,reg pair.
- - qcom,tune2-efuse-bit-pos: TUNE2 parameter related start bit position with EFUSE register
- - qcom,tune2-efuse-num-bits: Number of bits based value to use for TUNE2 high nibble
+ - qcom,efuse-bit-pos: start bit position within EFUSE register
+ - qcom,efuse-num-bits: Number of bits to read from EFUSE register
  - qcom,emulation: Indicates that we are running on emulation platform.
  - qcom,hold-reset: Indicates that hold QUSB PHY into reset state.
  - qcom,phy-clk-scheme: Should be one of "cml" or "cmos" if ref_clk_addr is provided.
@@ -132,8 +135,8 @@
 		vdda18-supply = <&pm8994_l6>;
 		vdda33-supply = <&pm8994_l24>;
 		qcom,vdd-voltage-level = <1 5 7>;
-		qcom,tune2-efuse-bit-pos = <21>;
-		qcom,tune2-efuse-num-bits = <3>;
+		qcom,efuse-bit-pos = <21>;
+		qcom,efuse-num-bits = <3>;
 
 		clocks = <&clock_rpm clk_ln_bb_clk>,
 			 <&clock_gcc clk_gcc_rx2_usb1_clkref_clk>,
diff --git a/Documentation/devicetree/bindings/usb/msm-ssusb.txt b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
index 18056ee..bc66690 100644
--- a/Documentation/devicetree/bindings/usb/msm-ssusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
@@ -66,6 +66,7 @@
   event buffers. 1 event buffer is needed per h/w accelerated endpoint.
 - qcom,pm-qos-latency: This represents max tolerable CPU latency in microsecs,
 	which is used as a vote by driver to get max performance in perf mode.
+- qcom,smmu-s1-bypass: If present, configure SMMU to bypass stage 1 translation.
 
 Sub nodes:
 - Sub node for "DWC3- USB3 controller".
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 4d6cdcf..9877ebf 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -848,6 +848,12 @@
 			seconds. Defaults to 10*60 = 10mins. A value of 0
 			disables the blank timer.
 
+	core_ctl_disable_cpumask= [SMP]
+			Exempt the CPUs from being managed by core_ctl.
+			core_ctl operates on a cluster basis. So all the
+			CPUs in a given cluster must be specified to disable
+			core_ctl for that cluster.
+
 	coredump_filter=
 			[KNL] Change the default value for
 			/proc/<pid>/coredump_filter.
diff --git a/Documentation/mmc/mmc-dev-attrs.txt b/Documentation/mmc/mmc-dev-attrs.txt
index 404a0e9..379dc99 100644
--- a/Documentation/mmc/mmc-dev-attrs.txt
+++ b/Documentation/mmc/mmc-dev-attrs.txt
@@ -74,3 +74,13 @@
 	"raw_rpmb_size_mult" is a mutliple of 128kB block.
 	RPMB size in byte is calculated by using the following equation:
 	RPMB partition size = 128kB x raw_rpmb_size_mult
+
+SD/MMC/SDIO Clock Gating Attribute
+==================================
+
+Read and write access is provided to following attribute.
+This attribute appears only if CONFIG_MMC_CLKGATE is enabled.
+
+	clkgate_delay	Tune the clock gating delay with desired value in milliseconds.
+
+echo <desired delay> > /sys/class/mmc_host/mmcX/clkgate_delay
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index e206560..0f0fc7d 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1462,11 +1462,20 @@
 	Functional default: enabled if accept_ra is enabled.
 			    disabled if accept_ra is disabled.
 
+accept_ra_rt_info_min_plen - INTEGER
+	Minimum prefix length of Route Information in RA.
+
+	Route Information w/ prefix smaller than this variable shall
+	be ignored.
+
+	Functional default: 0 if accept_ra_rtr_pref is enabled.
+			    -1 if accept_ra_rtr_pref is disabled.
+
 accept_ra_rt_info_max_plen - INTEGER
 	Maximum prefix length of Route Information in RA.
 
-	Route Information w/ prefix larger than or equal to this
-	variable shall be ignored.
+	Route Information w/ prefix larger than this variable shall
+	be ignored.
 
 	Functional default: 0 if accept_ra_rtr_pref is enabled.
 			    -1 if accept_ra_rtr_pref is disabled.
diff --git a/Documentation/vm/ksm.txt b/Documentation/vm/ksm.txt
index f34a8ee6..f40b965 100644
--- a/Documentation/vm/ksm.txt
+++ b/Documentation/vm/ksm.txt
@@ -87,6 +87,13 @@
 pages_unshared   - how many pages unique but repeatedly checked for merging
 pages_volatile   - how many pages changing too fast to be placed in a tree
 full_scans       - how many times all mergeable areas have been scanned
+deferred_timer   - whether to use deferred timers or not
+                 e.g. "echo 1 > /sys/kernel/mm/ksm/deferred_timer"
+                 Default: 0 (means, we are not using deferred timers. Users
+		 might want to set deferred_timer option if they donot want
+		 ksm thread to wakeup CPU to carryout ksm activities thus
+		 gaining on battery while compromising slightly on memory
+		 that could have been saved.)
 
 A high ratio of pages_sharing to pages_shared indicates good sharing, but
 a high ratio of pages_unshared to pages_sharing indicates wasted effort.
diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt
index 8465241..7ca432e 100644
--- a/Documentation/vm/slub.txt
+++ b/Documentation/vm/slub.txt
@@ -252,6 +252,10 @@
 tells us that SLUB has restored the Redzone to its proper value and then
 system operations continue.
 
+If it is required to only report the details of the issue and panic immediately
+after in order to possibly catch any scribblers one can set the
+CONFIG_DEBUG_SLUB_PANIC_ON option.
+
 Emergency operations:
 ---------------------
 
diff --git a/Makefile b/Makefile
index 9ec83a0..e70a1eb 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 16
+SUBLEVEL = 20
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 7173ec9..8158c873 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -266,7 +266,7 @@
 		};
 
 		usb1: ohci@00400000 {
-			compatible = "atmel,sama5d2-ohci", "usb-ohci";
+			compatible = "atmel,at91rm9200-ohci", "usb-ohci";
 			reg = <0x00400000 0x100000>;
 			interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>;
 			clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 2ef282f..b4e74af 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -24,6 +24,8 @@
 	struct kref		kref;
 };
 
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+
 struct dma_iommu_mapping *
 arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size);
 
@@ -33,5 +35,29 @@
 					struct dma_iommu_mapping *mapping);
 void arm_iommu_detach_device(struct device *dev);
 
+#else  /* !CONFIG_ARM_DMA_USE_IOMMU */
+
+static inline struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
+{
+	return NULL;
+}
+
+static inline void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
+{
+}
+
+static inline int arm_iommu_attach_device(struct device *dev,
+			struct dma_iommu_mapping *mapping)
+{
+	return -ENODEV;
+}
+
+static inline void arm_iommu_detach_device(struct device *dev)
+{
+}
+
+#endif	/* CONFIG_ARM_DMA_USE_IOMMU */
+
 #endif /* __KERNEL__ */
 #endif
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index b4332b7..31dde8b 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -289,6 +289,22 @@
 		at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
 }
 
+static void sama5d3_ddr_standby(void)
+{
+	u32 lpr0;
+	u32 saved_lpr0;
+
+	saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
+	lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
+	lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
+
+	at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
+
+	cpu_do_idle();
+
+	at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
+}
+
 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
  * remember.
  */
@@ -323,7 +339,7 @@
 	{ .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
 	{ .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
 	{ .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
-	{ .compatible = "atmel,sama5d3-ddramc", .data = at91_ddr_standby },
+	{ .compatible = "atmel,sama5d3-ddramc", .data = sama5d3_ddr_standby },
 	{ /*sentinel*/ }
 };
 
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e46907c..33f3cc6 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1925,7 +1925,11 @@
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
 	dma_addr_t dma_addr;
-	int ret, prot, len = PAGE_ALIGN(size + offset);
+	int ret, prot, len, start_offset, map_offset;
+
+	map_offset = offset & ~PAGE_MASK;
+	start_offset = offset & PAGE_MASK;
+	len = PAGE_ALIGN(map_offset + size);
 
 	dma_addr = __alloc_iova(mapping, len);
 	if (dma_addr == DMA_ERROR_CODE)
@@ -1933,11 +1937,12 @@
 
 	prot = __dma_direction_to_prot(dir);
 
-	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
+	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page) +
+			start_offset, len, prot);
 	if (ret < 0)
 		goto fail;
 
-	return dma_addr + offset;
+	return dma_addr + map_offset;
 fail:
 	__free_iova(mapping, dma_addr, len);
 	return DMA_ERROR_CODE;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6293973..ba0695b 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -478,6 +478,16 @@
 
 	  If unsure, say Y.
 
+config QCOM_QDF2400_ERRATUM_0065
+	bool "QDF2400 E0065: Incorrect GITS_TYPER.ITT_Entry_size"
+	default y
+	help
+	  On Qualcomm Datacenter Technologies QDF2400 SoC, ITS hardware reports
+	  ITE size incorrectly. The GITS_TYPER.ITT_Entry_size field should have
+	  been indicated as 16Bytes (0xf), not 8Bytes (0x7).
+
+	  If unsure, say Y.
+
 endmenu
 
 
@@ -1069,6 +1079,26 @@
 	  DTBs to be built by default (instead of a standalone Image.gz.)
 	  The image will built in arch/arm64/boot/Image.gz-dtb
 
+choice
+	prompt "Appended DTB Kernel Image name"
+	depends on BUILD_ARM64_APPENDED_DTB_IMAGE
+	help
+	  Enabling this option will cause a specific kernel image Image or
+	  Image.gz to be used for final image creation.
+	  The image will built in arch/arm64/boot/IMAGE-NAME-dtb
+
+	config IMG_GZ_DTB
+		bool "Image.gz-dtb"
+	config IMG_DTB
+		bool "Image-dtb"
+endchoice
+
+config BUILD_ARM64_APPENDED_KERNEL_IMAGE_NAME
+	string
+	depends on BUILD_ARM64_APPENDED_DTB_IMAGE
+	default "Image.gz-dtb" if IMG_GZ_DTB
+	default "Image-dtb" if IMG_DTB
+
 config BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES
 	string "Default dtb names"
 	depends on BUILD_ARM64_APPENDED_DTB_IMAGE
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index f7a21a6..445aeb6 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -120,7 +120,7 @@
 	  This enables support for the ARMv8 based Qualcomm chipsets.
 
 config ARCH_SDM845
-	bool "Enable Support for Qualcomm SDM845"
+	bool "Enable Support for Qualcomm Technologies Inc. SDM845"
 	depends on ARCH_QCOM
 	select COMMON_CLK_QCOM
 	select QCOM_GDSC
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index bba8d2c..13a64c9 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -96,7 +96,7 @@
 
 # Default target when executing plain make
 ifeq ($(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE),y)
-KBUILD_IMAGE	:= Image.gz-dtb
+KBUILD_IMAGE	:= $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_KERNEL_IMAGE_NAME))
 else
 KBUILD_IMAGE	:= Image.gz
 endif
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 30ee6e7..c32324f 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -8,7 +8,10 @@
 	sdm845-cdp.dtb \
 	sdm845-v2-rumi.dtb \
 	sdm845-v2-mtp.dtb \
-	sdm845-v2-cdp.dtb
+	sdm845-v2-cdp.dtb \
+	sdm845-qrd.dtb \
+	sdm845-4k-panel-mtp.dtb \
+	sdm845-4k-panel-cdp.dtb
 
 dtb-$(CONFIG_ARCH_SDM830) += sdm830-sim.dtb \
 	sdm830-rumi.dtb \
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
new file mode 100644
index 0000000..c6dfc8d
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
@@ -0,0 +1,261 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_nt35597_truly_dsc_cmd: qcom,mdss_dsi_nt35597_dsc_cmd_truly {
+		qcom,mdss-dsi-panel-name =
+			"nt35597 cmd mode dsi truly panel with DSC";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <1440>;
+		qcom,mdss-dsi-panel-height = <2560>;
+		qcom,mdss-dsi-h-front-porch = <100>;
+		qcom,mdss-dsi-h-back-porch = <32>;
+		qcom,mdss-dsi-h-pulse-width = <16>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <8>;
+		qcom,mdss-dsi-v-front-porch = <10>;
+		qcom,mdss-dsi-v-pulse-width = <2>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-on-command = [
+			/* CMD2_P0 */
+			15 01 00 00 00 00 02 ff 20
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 00 01
+			15 01 00 00 00 00 02 01 55
+			15 01 00 00 00 00 02 02 45
+			15 01 00 00 00 00 02 05 40
+			15 01 00 00 00 00 02 06 19
+			15 01 00 00 00 00 02 07 1e
+			15 01 00 00 00 00 02 0b 73
+			15 01 00 00 00 00 02 0c 73
+			15 01 00 00 00 00 02 0e b0
+			15 01 00 00 00 00 02 0f ae
+			15 01 00 00 00 00 02 11 b8
+			15 01 00 00 00 00 02 13 00
+			15 01 00 00 00 00 02 58 80
+			15 01 00 00 00 00 02 59 01
+			15 01 00 00 00 00 02 5a 00
+			15 01 00 00 00 00 02 5b 01
+			15 01 00 00 00 00 02 5c 80
+			15 01 00 00 00 00 02 5d 81
+			15 01 00 00 00 00 02 5e 00
+			15 01 00 00 00 00 02 5f 01
+			15 01 00 00 00 00 02 72 31
+			15 01 00 00 00 00 02 68 03
+			/* CMD2_P4 */
+			15 01 00 00 00 00 02 ff 24
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 00 1c
+			15 01 00 00 00 00 02 01 0b
+			15 01 00 00 00 00 02 02 0c
+			15 01 00 00 00 00 02 03 01
+			15 01 00 00 00 00 02 04 0f
+			15 01 00 00 00 00 02 05 10
+			15 01 00 00 00 00 02 06 10
+			15 01 00 00 00 00 02 07 10
+			15 01 00 00 00 00 02 08 89
+			15 01 00 00 00 00 02 09 8a
+			15 01 00 00 00 00 02 0a 13
+			15 01 00 00 00 00 02 0b 13
+			15 01 00 00 00 00 02 0c 15
+			15 01 00 00 00 00 02 0d 15
+			15 01 00 00 00 00 02 0e 17
+			15 01 00 00 00 00 02 0f 17
+			15 01 00 00 00 00 02 10 1c
+			15 01 00 00 00 00 02 11 0b
+			15 01 00 00 00 00 02 12 0c
+			15 01 00 00 00 00 02 13 01
+			15 01 00 00 00 00 02 14 0f
+			15 01 00 00 00 00 02 15 10
+			15 01 00 00 00 00 02 16 10
+			15 01 00 00 00 00 02 17 10
+			15 01 00 00 00 00 02 18 89
+			15 01 00 00 00 00 02 19 8a
+			15 01 00 00 00 00 02 1a 13
+			15 01 00 00 00 00 02 1b 13
+			15 01 00 00 00 00 02 1c 15
+			15 01 00 00 00 00 02 1d 15
+			15 01 00 00 00 00 02 1e 17
+			15 01 00 00 00 00 02 1f 17
+			/* STV */
+			15 01 00 00 00 00 02 20 40
+			15 01 00 00 00 00 02 21 01
+			15 01 00 00 00 00 02 22 00
+			15 01 00 00 00 00 02 23 40
+			15 01 00 00 00 00 02 24 40
+			15 01 00 00 00 00 02 25 6d
+			15 01 00 00 00 00 02 26 40
+			15 01 00 00 00 00 02 27 40
+			/* Vend */
+			15 01 00 00 00 00 02 e0 00
+			15 01 00 00 00 00 02 dc 21
+			15 01 00 00 00 00 02 dd 22
+			15 01 00 00 00 00 02 de 07
+			15 01 00 00 00 00 02 df 07
+			15 01 00 00 00 00 02 e3 6D
+			15 01 00 00 00 00 02 e1 07
+			15 01 00 00 00 00 02 e2 07
+			/* UD */
+			15 01 00 00 00 00 02 29 d8
+			15 01 00 00 00 00 02 2a 2a
+			/* CLK */
+			15 01 00 00 00 00 02 4b 03
+			15 01 00 00 00 00 02 4c 11
+			15 01 00 00 00 00 02 4d 10
+			15 01 00 00 00 00 02 4e 01
+			15 01 00 00 00 00 02 4f 01
+			15 01 00 00 00 00 02 50 10
+			15 01 00 00 00 00 02 51 00
+			15 01 00 00 00 00 02 52 80
+			15 01 00 00 00 00 02 53 00
+			15 01 00 00 00 00 02 56 00
+			15 01 00 00 00 00 02 54 07
+			15 01 00 00 00 00 02 58 07
+			15 01 00 00 00 00 02 55 25
+			/* Reset XDONB */
+			15 01 00 00 00 00 02 5b 43
+			15 01 00 00 00 00 02 5c 00
+			15 01 00 00 00 00 02 5f 73
+			15 01 00 00 00 00 02 60 73
+			15 01 00 00 00 00 02 63 22
+			15 01 00 00 00 00 02 64 00
+			15 01 00 00 00 00 02 67 08
+			15 01 00 00 00 00 02 68 04
+			/* Resolution:1440x2560*/
+			15 01 00 00 00 00 02 72 02
+			/* mux */
+			15 01 00 00 00 00 02 7a 80
+			15 01 00 00 00 00 02 7b 91
+			15 01 00 00 00 00 02 7c D8
+			15 01 00 00 00 00 02 7d 60
+			15 01 00 00 00 00 02 7f 15
+			15 01 00 00 00 00 02 75 15
+			/* ABOFF */
+			15 01 00 00 00 00 02 b3 C0
+			15 01 00 00 00 00 02 b4 00
+			15 01 00 00 00 00 02 b5 00
+			/* Source EQ */
+			15 01 00 00 00 00 02 78 00
+			15 01 00 00 00 00 02 79 00
+			15 01 00 00 00 00 02 80 00
+			15 01 00 00 00 00 02 83 00
+			/* FP BP */
+			15 01 00 00 00 00 02 93 0a
+			15 01 00 00 00 00 02 94 0a
+			/* Inversion Type */
+			15 01 00 00 00 00 02 8a 00
+			15 01 00 00 00 00 02 9b ff
+			/* IMGSWAP =1 @PortSwap=1 */
+			15 01 00 00 00 00 02 9d b0
+			15 01 00 00 00 00 02 9f 63
+			15 01 00 00 00 00 02 98 10
+			/* FRM */
+			15 01 00 00 00 00 02 ec 00
+			/* CMD1 */
+			15 01 00 00 00 00 02 ff 10
+			/* VESA DSC PPS settings(1440x2560 slide 16H) */
+			39 01 00 00 00 00 11 c1 09 20 00 10 02 00 02 68
+					01 bb 00 0a 06 67 04 c5
+			39 01 00 00 00 00 03 c2 10 f0
+			/* C0h = 0x0(2 Port SDC)0x01(1 PortA FBC)
+			 * 0x02(MTK) 0x03(1 PortA VESA)
+			 */
+			15 01 00 00 00 00 02 c0 03
+			/* VBP+VSA=,VFP = 10H */
+			15 01 00 00 00 00 04 3b 03 0a 0a
+			/* FTE on */
+			15 01 00 00 00 00 02 35 00
+			/* EN_BK =1(auto black) */
+			15 01 00 00 00 00 02 e5 01
+			/* CMD mode(10) VDO mode(03) */
+			15 01 00 00 00 00 02 bb 10
+			/* Non Reload MTP */
+			15 01 00 00 00 00 02 fb 01
+			/* SlpOut + DispOn */
+			05 01 00 00 78 00 02 11 00
+			05 01 00 00 78 00 02 29 00
+			];
+		qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
+			05 01 00 00 78 00 02 10 00];
+
+		qcom,mdss-dsi-on-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,adjust-timer-wakeup-ms = <1>;
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+
+		qcom,compression-mode = "dsc";
+		qcom,config-select = <&dsi_nt35597_truly_dsc_cmd_config0>;
+
+		dsi_nt35597_truly_dsc_cmd_config0: config0 {
+			qcom,mdss-dsc-encoders = <1>;
+			qcom,mdss-dsc-slice-height = <16>;
+			qcom,mdss-dsc-slice-width = <720>;
+			qcom,mdss-dsc-slice-per-pkt = <2>;
+
+			qcom,mdss-dsc-bit-per-component = <8>;
+			qcom,mdss-dsc-bit-per-pixel = <8>;
+			qcom,mdss-dsc-block-prediction-enable;
+		};
+
+		dsi_nt35597_truly_dsc_cmd_config1: config1 {
+			qcom,lm-split = <720 720>;
+			qcom,mdss-dsc-encoders = <1>; /* 3D Mux */
+			qcom,mdss-dsc-slice-height = <16>;
+			qcom,mdss-dsc-slice-width = <720>;
+			qcom,mdss-dsc-slice-per-pkt = <2>;
+
+			qcom,mdss-dsc-bit-per-component = <8>;
+			qcom,mdss-dsc-bit-per-pixel = <8>;
+			qcom,mdss-dsc-block-prediction-enable;
+		};
+
+		dsi_nt35597_truly_dsc_cmd_config2: config2 {
+			qcom,lm-split = <720 720>;
+			qcom,mdss-dsc-encoders = <2>; /* DSC Merge */
+			qcom,mdss-dsc-slice-height = <16>;
+			qcom,mdss-dsc-slice-width = <720>;
+			qcom,mdss-dsc-slice-per-pkt = <2>;
+
+			qcom,mdss-dsc-bit-per-component = <8>;
+			qcom,mdss-dsc-bit-per-pixel = <8>;
+			qcom,mdss-dsc-block-prediction-enable;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi
new file mode 100644
index 0000000..334120a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi
@@ -0,0 +1,248 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_nt35597_truly_dsc_video: qcom,mdss_dsi_nt35597_dsc_video_truly {
+		qcom,mdss-dsi-panel-name =
+			"nt35597 video mode dsi truly panel with DSC";
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <1440>;
+		qcom,mdss-dsi-panel-height = <2560>;
+		qcom,mdss-dsi-h-front-porch = <100>;
+		qcom,mdss-dsi-h-back-porch = <32>;
+		qcom,mdss-dsi-h-pulse-width = <16>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <8>;
+		qcom,mdss-dsi-v-front-porch = <10>;
+		qcom,mdss-dsi-v-pulse-width = <2>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-on-command = [
+			/* CMD2_P0 */
+			15 01 00 00 00 00 02 ff 20
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 00 01
+			15 01 00 00 00 00 02 01 55
+			15 01 00 00 00 00 02 02 45
+			15 01 00 00 00 00 02 05 40
+			15 01 00 00 00 00 02 06 19
+			15 01 00 00 00 00 02 07 1e
+			15 01 00 00 00 00 02 0b 73
+			15 01 00 00 00 00 02 0c 73
+			15 01 00 00 00 00 02 0e b0
+			15 01 00 00 00 00 02 0f aE
+			15 01 00 00 00 00 02 11 b8
+			15 01 00 00 00 00 02 13 00
+			15 01 00 00 00 00 02 58 80
+			15 01 00 00 00 00 02 59 01
+			15 01 00 00 00 00 02 5a 00
+			15 01 00 00 00 00 02 5b 01
+			15 01 00 00 00 00 02 5c 80
+			15 01 00 00 00 00 02 5d 81
+			15 01 00 00 00 00 02 5e 00
+			15 01 00 00 00 00 02 5f 01
+			15 01 00 00 00 00 02 72 31
+			15 01 00 00 00 00 02 68 03
+			/* CMD2_P4 */
+			15 01 00 00 00 00 02 ff 24
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 00 1c
+			15 01 00 00 00 00 02 01 0b
+			15 01 00 00 00 00 02 02 0c
+			15 01 00 00 00 00 02 03 01
+			15 01 00 00 00 00 02 04 0f
+			15 01 00 00 00 00 02 05 10
+			15 01 00 00 00 00 02 06 10
+			15 01 00 00 00 00 02 07 10
+			15 01 00 00 00 00 02 08 89
+			15 01 00 00 00 00 02 09 8a
+			15 01 00 00 00 00 02 0a 13
+			15 01 00 00 00 00 02 0b 13
+			15 01 00 00 00 00 02 0c 15
+			15 01 00 00 00 00 02 0d 15
+			15 01 00 00 00 00 02 0e 17
+			15 01 00 00 00 00 02 0f 17
+			15 01 00 00 00 00 02 10 1c
+			15 01 00 00 00 00 02 11 0b
+			15 01 00 00 00 00 02 12 0c
+			15 01 00 00 00 00 02 13 01
+			15 01 00 00 00 00 02 14 0f
+			15 01 00 00 00 00 02 15 10
+			15 01 00 00 00 00 02 16 10
+			15 01 00 00 00 00 02 17 10
+			15 01 00 00 00 00 02 18 89
+			15 01 00 00 00 00 02 19 8a
+			15 01 00 00 00 00 02 1a 13
+			15 01 00 00 00 00 02 1b 13
+			15 01 00 00 00 00 02 1c 15
+			15 01 00 00 00 00 02 1d 15
+			15 01 00 00 00 00 02 1e 17
+			15 01 00 00 00 00 02 1f 17
+			/* STV */
+			15 01 00 00 00 00 02 20 40
+			15 01 00 00 00 00 02 21 01
+			15 01 00 00 00 00 02 22 00
+			15 01 00 00 00 00 02 23 40
+			15 01 00 00 00 00 02 24 40
+			15 01 00 00 00 00 02 25 6d
+			15 01 00 00 00 00 02 26 40
+			15 01 00 00 00 00 02 27 40
+			/* Vend */
+			15 01 00 00 00 00 02 e0 00
+			15 01 00 00 00 00 02 dc 21
+			15 01 00 00 00 00 02 dd 22
+			15 01 00 00 00 00 02 de 07
+			15 01 00 00 00 00 02 df 07
+			15 01 00 00 00 00 02 e3 6d
+			15 01 00 00 00 00 02 e1 07
+			15 01 00 00 00 00 02 e2 07
+			/* UD */
+			15 01 00 00 00 00 02 29 d8
+			15 01 00 00 00 00 02 2a 2a
+			/* CLK */
+			15 01 00 00 00 00 02 4b 03
+			15 01 00 00 00 00 02 4c 11
+			15 01 00 00 00 00 02 4d 10
+			15 01 00 00 00 00 02 4e 01
+			15 01 00 00 00 00 02 4f 01
+			15 01 00 00 00 00 02 50 10
+			15 01 00 00 00 00 02 51 00
+			15 01 00 00 00 00 02 52 80
+			15 01 00 00 00 00 02 53 00
+			15 01 00 00 00 00 02 56 00
+			15 01 00 00 00 00 02 54 07
+			15 01 00 00 00 00 02 58 07
+			15 01 00 00 00 00 02 55 25
+			/* Reset XDONB */
+			15 01 00 00 00 00 02 5b 43
+			15 01 00 00 00 00 02 5c 00
+			15 01 00 00 00 00 02 5f 73
+			15 01 00 00 00 00 02 60 73
+			15 01 00 00 00 00 02 63 22
+			15 01 00 00 00 00 02 64 00
+			15 01 00 00 00 00 02 67 08
+			15 01 00 00 00 00 02 68 04
+			/* Resolution:1440x2560*/
+			15 01 00 00 00 00 02 72 02
+			/* mux */
+			15 01 00 00 00 00 02 7a 80
+			15 01 00 00 00 00 02 7b 91
+			15 01 00 00 00 00 02 7c d8
+			15 01 00 00 00 00 02 7d 60
+			15 01 00 00 00 00 02 7f 15
+			15 01 00 00 00 00 02 75 15
+			/* ABOFF */
+			15 01 00 00 00 00 02 b3 c0
+			15 01 00 00 00 00 02 b4 00
+			15 01 00 00 00 00 02 b5 00
+			/* Source EQ */
+			15 01 00 00 00 00 02 78 00
+			15 01 00 00 00 00 02 79 00
+			15 01 00 00 00 00 02 80 00
+			15 01 00 00 00 00 02 83 00
+			/* FP BP */
+			15 01 00 00 00 00 02 93 0a
+			15 01 00 00 00 00 02 94 0a
+			/* Inversion Type */
+			15 01 00 00 00 00 02 8a 00
+			15 01 00 00 00 00 02 9b ff
+			/* IMGSWAP =1 @PortSwap=1 */
+			15 01 00 00 00 00 02 9d b0
+			15 01 00 00 00 00 02 9f 63
+			15 01 00 00 00 00 02 98 10
+			/* FRM */
+			15 01 00 00 00 00 02 ec 00
+			/* CMD1 */
+			15 01 00 00 00 00 02 ff 10
+			/* VESA DSC PPS settings(1440x2560 slide 16H) */
+			39 01 00 00 00 00 11 c1 09 20 00 10 02 00 02 68 01
+				bb 00 0a 06 67 04 c5
+			39 01 00 00 00 00 03 c2 10 f0
+			/* C0h = 0x00(2 Port SDC); 0x01(1 PortA FBC);
+			 * 0x02(MTK); 0x03(1 PortA VESA)
+			 */
+			15 01 00 00 00 00 02 c0 03
+			/* VBP+VSA=,VFP = 10H */
+			39 01 00 00 00 00 04 3b 03 0a 0a
+			/* FTE on */
+			15 01 00 00 00 00 02 35 00
+			/* EN_BK =1(auto black) */
+			15 01 00 00 00 00 02 e5 01
+			/* CMD mode(10) VDO mode(03) */
+			15 01 00 00 00 00 02 bb 03
+			/* Non Reload MTP */
+			15 01 00 00 00 00 02 fb 01
+			/* SlpOut + DispOn */
+			05 01 00 00 78 00 02 11 00
+			05 01 00 00 78 00 02 29 00
+			];
+		qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
+				 05 01 00 00 78 00 02 10 00];
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-pan-physical-width-dimension = <74>;
+		qcom,mdss-pan-physical-height-dimension = <131>;
+
+		qcom,compression-mode = "dsc";
+		qcom,config-select = <&dsi_nt35597_truly_dsc_video_config0>;
+
+		dsi_nt35597_truly_dsc_video_config0: config0 {
+			qcom,mdss-dsc-encoders = <1>;
+			qcom,mdss-dsc-slice-height = <16>;
+			qcom,mdss-dsc-slice-width = <720>;
+			qcom,mdss-dsc-slice-per-pkt = <2>;
+
+			qcom,mdss-dsc-bit-per-component = <8>;
+			qcom,mdss-dsc-bit-per-pixel = <8>;
+			qcom,mdss-dsc-block-prediction-enable;
+		};
+
+		dsi_nt35597_truly_dsc_video_config1: config1 {
+			qcom,lm-split = <720 720>;
+			qcom,mdss-dsc-encoders = <1>; /* 3D Mux */
+			qcom,mdss-dsc-slice-height = <16>;
+			qcom,mdss-dsc-slice-width = <720>;
+			qcom,mdss-dsc-slice-per-pkt = <2>;
+
+			qcom,mdss-dsc-bit-per-component = <8>;
+			qcom,mdss-dsc-bit-per-pixel = <8>;
+			qcom,mdss-dsc-block-prediction-enable;
+		};
+
+		dsi_nt35597_truly_dsc_video_config2: config2 {
+			qcom,lm-split = <720 720>;
+			qcom,mdss-dsc-encoders = <2>; /* DSC Merge */
+			qcom,mdss-dsc-slice-height = <16>;
+			qcom,mdss-dsc-slice-width = <720>;
+			qcom,mdss-dsc-slice-per-pkt = <2>;
+
+			qcom,mdss-dsc-bit-per-component = <8>;
+			qcom,mdss-dsc-bit-per-pixel = <8>;
+			qcom,mdss-dsc-block-prediction-enable;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
new file mode 100644
index 0000000..e4a0370
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
@@ -0,0 +1,220 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_dual_nt35597_truly_cmd: qcom,mdss_dsi_nt35597_truly_wqxga_cmd{
+		qcom,mdss-dsi-panel-name =
+			"Dual nt35597 cmd mode dsi truly panel without DSC";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <720>;
+		qcom,mdss-dsi-panel-height = <2560>;
+		qcom,mdss-dsi-h-front-porch = <100>;
+		qcom,mdss-dsi-h-back-porch = <32>;
+		qcom,mdss-dsi-h-pulse-width = <16>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <7>;
+		qcom,mdss-dsi-v-front-porch = <8>;
+		qcom,mdss-dsi-v-pulse-width = <1>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,adjust-timer-wakeup-ms = <1>;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+		qcom,mdss-dsi-on-command = [
+			/* CMD2_P0 */
+			15 01 00 00 00 00 02 FF 20
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 00 01
+			15 01 00 00 00 00 02 01 55
+			15 01 00 00 00 00 02 02 45
+			15 01 00 00 00 00 02 05 40
+			15 01 00 00 00 00 02 06 19
+			15 01 00 00 00 00 02 07 1E
+			15 01 00 00 00 00 02 0B 73
+			15 01 00 00 00 00 02 0C 73
+			15 01 00 00 00 00 02 0E B0
+			15 01 00 00 00 00 02 0F AE
+			15 01 00 00 00 00 02 11 B8
+			15 01 00 00 00 00 02 13 00
+			15 01 00 00 00 00 02 58 80
+			15 01 00 00 00 00 02 59 01
+			15 01 00 00 00 00 02 5A 00
+			15 01 00 00 00 00 02 5B 01
+			15 01 00 00 00 00 02 5C 80
+			15 01 00 00 00 00 02 5D 81
+			15 01 00 00 00 00 02 5E 00
+			15 01 00 00 00 00 02 5F 01
+			15 01 00 00 00 00 02 72 31
+			15 01 00 00 00 00 02 68 03
+			/* CMD2_P4 */
+			15 01 00 00 00 00 02 ff 24
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 00 1C
+			15 01 00 00 00 00 02 01 0B
+			15 01 00 00 00 00 02 02 0C
+			15 01 00 00 00 00 02 03 01
+			15 01 00 00 00 00 02 04 0F
+			15 01 00 00 00 00 02 05 10
+			15 01 00 00 00 00 02 06 10
+			15 01 00 00 00 00 02 07 10
+			15 01 00 00 00 00 02 08 89
+			15 01 00 00 00 00 02 09 8A
+			15 01 00 00 00 00 02 0A 13
+			15 01 00 00 00 00 02 0B 13
+			15 01 00 00 00 00 02 0C 15
+			15 01 00 00 00 00 02 0D 15
+			15 01 00 00 00 00 02 0E 17
+			15 01 00 00 00 00 02 0F 17
+			15 01 00 00 00 00 02 10 1C
+			15 01 00 00 00 00 02 11 0B
+			15 01 00 00 00 00 02 12 0C
+			15 01 00 00 00 00 02 13 01
+			15 01 00 00 00 00 02 14 0F
+			15 01 00 00 00 00 02 15 10
+			15 01 00 00 00 00 02 16 10
+			15 01 00 00 00 00 02 17 10
+			15 01 00 00 00 00 02 18 89
+			15 01 00 00 00 00 02 19 8A
+			15 01 00 00 00 00 02 1A 13
+			15 01 00 00 00 00 02 1B 13
+			15 01 00 00 00 00 02 1C 15
+			15 01 00 00 00 00 02 1D 15
+			15 01 00 00 00 00 02 1E 17
+			15 01 00 00 00 00 02 1F 17
+			/* STV */
+			15 01 00 00 00 00 02 20 40
+			15 01 00 00 00 00 02 21 01
+			15 01 00 00 00 00 02 22 00
+			15 01 00 00 00 00 02 23 40
+			15 01 00 00 00 00 02 24 40
+			15 01 00 00 00 00 02 25 6D
+			15 01 00 00 00 00 02 26 40
+			15 01 00 00 00 00 02 27 40
+			/* Vend */
+			15 01 00 00 00 00 02 E0 00
+			15 01 00 00 00 00 02 DC 21
+			15 01 00 00 00 00 02 DD 22
+			15 01 00 00 00 00 02 DE 07
+			15 01 00 00 00 00 02 DF 07
+			15 01 00 00 00 00 02 E3 6D
+			15 01 00 00 00 00 02 E1 07
+			15 01 00 00 00 00 02 E2 07
+			/* UD */
+			15 01 00 00 00 00 02 29 D8
+			15 01 00 00 00 00 02 2A 2A
+			/* CLK */
+			15 01 00 00 00 00 02 4B 03
+			15 01 00 00 00 00 02 4C 11
+			15 01 00 00 00 00 02 4D 10
+			15 01 00 00 00 00 02 4E 01
+			15 01 00 00 00 00 02 4F 01
+			15 01 00 00 00 00 02 50 10
+			15 01 00 00 00 00 02 51 00
+			15 01 00 00 00 00 02 52 80
+			15 01 00 00 00 00 02 53 00
+			15 01 00 00 00 00 02 56 00
+			15 01 00 00 00 00 02 54 07
+			15 01 00 00 00 00 02 58 07
+			15 01 00 00 00 00 02 55 25
+			/* Reset XDONB */
+			15 01 00 00 00 00 02 5B 43
+			15 01 00 00 00 00 02 5C 00
+			15 01 00 00 00 00 02 5F 73
+			15 01 00 00 00 00 02 60 73
+			15 01 00 00 00 00 02 63 22
+			15 01 00 00 00 00 02 64 00
+			15 01 00 00 00 00 02 67 08
+			15 01 00 00 00 00 02 68 04
+			/* Resolution:1440x2560*/
+			15 01 00 00 00 00 02 72 02
+			/* mux */
+			15 01 00 00 00 00 02 7A 80
+			15 01 00 00 00 00 02 7B 91
+			15 01 00 00 00 00 02 7C D8
+			15 01 00 00 00 00 02 7D 60
+			15 01 00 00 00 00 02 7F 15
+			15 01 00 00 00 00 02 75 15
+			/* ABOFF */
+			15 01 00 00 00 00 02 B3 C0
+			15 01 00 00 00 00 02 B4 00
+			15 01 00 00 00 00 02 B5 00
+			/* Source EQ */
+			15 01 00 00 00 00 02 78 00
+			15 01 00 00 00 00 02 79 00
+			15 01 00 00 00 00 02 80 00
+			15 01 00 00 00 00 02 83 00
+			/* FP BP */
+			15 01 00 00 00 00 02 93 0A
+			15 01 00 00 00 00 02 94 0A
+			/* Inversion Type */
+			15 01 00 00 00 00 02 8A 00
+			15 01 00 00 00 00 02 9B FF
+			/* IMGSWAP =1 @PortSwap=1 */
+			15 01 00 00 00 00 02 9D B0
+			15 01 00 00 00 00 02 9F 63
+			15 01 00 00 00 00 02 98 10
+			/* FRM */
+			15 01 00 00 00 00 02 EC 00
+			/* CMD1 */
+			15 01 00 00 00 00 02 ff 10
+			/* VBP+VSA=,VFP = 10H */
+			15 01 00 00 00 00 04 3B 03 0A 0A
+			/* FTE on */
+			15 01 00 00 00 00 02 35 00
+			/* EN_BK =1(auto black) */
+			15 01 00 00 00 00 02 E5 01
+			/* CMD mode(10) VDO mode(03) */
+			15 01 00 00 00 00 02 BB 10
+			/* Non Reload MTP */
+			15 01 00 00 00 00 02 FB 01
+			/* SlpOut + DispOn */
+			05 01 00 00 78 00 02 11 00
+			05 01 00 00 78 00 02 29 00
+			];
+		qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
+			05 01 00 00 78 00 02 10 00];
+
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+
+		qcom,config-select = <&dsi_dual_nt35597_truly_cmd_config0>;
+
+		dsi_dual_nt35597_truly_cmd_config0: config0 {
+			qcom,split-mode = "dualctl-split";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
new file mode 100644
index 0000000..d6ef3d8
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
@@ -0,0 +1,210 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_dual_nt35597_truly_video: qcom,mdss_dsi_nt35597_wqxga_video_truly {
+		qcom,mdss-dsi-panel-name =
+			"Dual nt35597 video mode dsi truly panel without DSC";
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <720>;
+		qcom,mdss-dsi-panel-height = <2560>;
+		qcom,mdss-dsi-h-front-porch = <100>;
+		qcom,mdss-dsi-h-back-porch = <32>;
+		qcom,mdss-dsi-h-pulse-width = <16>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <7>;
+		qcom,mdss-dsi-v-front-porch = <8>;
+		qcom,mdss-dsi-v-pulse-width = <1>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0x3ff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-on-command = [
+			/* CMD2_P0 */
+			15 01 00 00 00 00 02 FF 20
+			15 01 00 00 00 00 02 FB 01
+			15 01 00 00 00 00 02 00 01
+			15 01 00 00 00 00 02 01 55
+			15 01 00 00 00 00 02 02 45
+			15 01 00 00 00 00 02 05 40
+			15 01 00 00 00 00 02 06 19
+			15 01 00 00 00 00 02 07 1E
+			15 01 00 00 00 00 02 0B 73
+			15 01 00 00 00 00 02 0C 73
+			15 01 00 00 00 00 02 0E B0
+			15 01 00 00 00 00 02 0F AE
+			15 01 00 00 00 00 02 11 B8
+			15 01 00 00 00 00 02 13 00
+			15 01 00 00 00 00 02 58 80
+			15 01 00 00 00 00 02 59 01
+			15 01 00 00 00 00 02 5A 00
+			15 01 00 00 00 00 02 5B 01
+			15 01 00 00 00 00 02 5C 80
+			15 01 00 00 00 00 02 5D 81
+			15 01 00 00 00 00 02 5E 00
+			15 01 00 00 00 00 02 5F 01
+			15 01 00 00 00 00 02 72 31
+			15 01 00 00 00 00 02 68 03
+			/* CMD2_P4 */
+			15 01 00 00 00 00 02 FF 24
+			15 01 00 00 00 00 02 FB 01
+			15 01 00 00 00 00 02 00 1C
+			15 01 00 00 00 00 02 01 0B
+			15 01 00 00 00 00 02 02 0C
+			15 01 00 00 00 00 02 03 01
+			15 01 00 00 00 00 02 04 0F
+			15 01 00 00 00 00 02 05 10
+			15 01 00 00 00 00 02 06 10
+			15 01 00 00 00 00 02 07 10
+			15 01 00 00 00 00 02 08 89
+			15 01 00 00 00 00 02 09 8A
+			15 01 00 00 00 00 02 0A 13
+			15 01 00 00 00 00 02 0B 13
+			15 01 00 00 00 00 02 0C 15
+			15 01 00 00 00 00 02 0D 15
+			15 01 00 00 00 00 02 0E 17
+			15 01 00 00 00 00 02 0F 17
+			15 01 00 00 00 00 02 10 1C
+			15 01 00 00 00 00 02 11 0B
+			15 01 00 00 00 00 02 12 0C
+			15 01 00 00 00 00 02 13 01
+			15 01 00 00 00 00 02 14 0F
+			15 01 00 00 00 00 02 15 10
+			15 01 00 00 00 00 02 16 10
+			15 01 00 00 00 00 02 17 10
+			15 01 00 00 00 00 02 18 89
+			15 01 00 00 00 00 02 19 8A
+			15 01 00 00 00 00 02 1A 13
+			15 01 00 00 00 00 02 1B 13
+			15 01 00 00 00 00 02 1C 15
+			15 01 00 00 00 00 02 1D 15
+			15 01 00 00 00 00 02 1E 17
+			15 01 00 00 00 00 02 1F 17
+			/* STV */
+			15 01 00 00 00 00 02 20 40
+			15 01 00 00 00 00 02 21 01
+			15 01 00 00 00 00 02 22 00
+			15 01 00 00 00 00 02 23 40
+			15 01 00 00 00 00 02 24 40
+			15 01 00 00 00 00 02 25 6D
+			15 01 00 00 00 00 02 26 40
+			15 01 00 00 00 00 02 27 40
+			/* Vend */
+			15 01 00 00 00 00 02 E0 00
+			15 01 00 00 00 00 02 DC 21
+			15 01 00 00 00 00 02 DD 22
+			15 01 00 00 00 00 02 DE 07
+			15 01 00 00 00 00 02 DF 07
+			15 01 00 00 00 00 02 E3 6D
+			15 01 00 00 00 00 02 E1 07
+			15 01 00 00 00 00 02 E2 07
+			/* UD */
+			15 01 00 00 00 00 02 29 D8
+			15 01 00 00 00 00 02 2A 2A
+			/* CLK */
+			15 01 00 00 00 00 02 4B 03
+			15 01 00 00 00 00 02 4C 11
+			15 01 00 00 00 00 02 4D 10
+			15 01 00 00 00 00 02 4E 01
+			15 01 00 00 00 00 02 4F 01
+			15 01 00 00 00 00 02 50 10
+			15 01 00 00 00 00 02 51 00
+			15 01 00 00 00 00 02 52 80
+			15 01 00 00 00 00 02 53 00
+			15 01 00 00 00 00 02 56 00
+			15 01 00 00 00 00 02 54 07
+			15 01 00 00 00 00 02 58 07
+			15 01 00 00 00 00 02 55 25
+			/* Reset XDONB */
+			15 01 00 00 00 00 02 5B 43
+			15 01 00 00 00 00 02 5C 00
+			15 01 00 00 00 00 02 5F 73
+			15 01 00 00 00 00 02 60 73
+			15 01 00 00 00 00 02 63 22
+			15 01 00 00 00 00 02 64 00
+			15 01 00 00 00 00 02 67 08
+			15 01 00 00 00 00 02 68 04
+			/* Resolution:1440x2560*/
+			15 01 00 00 00 00 02 72 02
+			/* mux */
+			15 01 00 00 00 00 02 7A 80
+			15 01 00 00 00 00 02 7B 91
+			15 01 00 00 00 00 02 7C D8
+			15 01 00 00 00 00 02 7D 60
+			15 01 00 00 00 00 02 7F 15
+			15 01 00 00 00 00 02 75 15
+			/* ABOFF */
+			15 01 00 00 00 00 02 B3 C0
+			15 01 00 00 00 00 02 B4 00
+			15 01 00 00 00 00 02 B5 00
+			/* Source EQ */
+			15 01 00 00 00 00 02 78 00
+			15 01 00 00 00 00 02 79 00
+			15 01 00 00 00 00 02 80 00
+			15 01 00 00 00 00 02 83 00
+			/* FP BP */
+			15 01 00 00 00 00 02 93 0A
+			15 01 00 00 00 00 02 94 0A
+			/* Inversion Type */
+			15 01 00 00 00 00 02 8A 00
+			15 01 00 00 00 00 02 9B FF
+			/* IMGSWAP =1 @PortSwap=1 */
+			15 01 00 00 00 00 02 9D B0
+			15 01 00 00 00 00 02 9F 63
+			15 01 00 00 00 00 02 98 10
+			/* FRM */
+			15 01 00 00 00 00 02 EC 00
+			/* CMD1 */
+			15 01 00 00 00 00 02 FF 10
+			/* VBP+VSA=,VFP = 10H */
+			15 01 00 00 00 00 04 3B 03 0A 0A
+			/* FTE on */
+			15 01 00 00 00 00 02 35 00
+			/* EN_BK =1(auto black) */
+			15 01 00 00 00 00 02 E5 01
+			/* CMD mode(10) VDO mode(03) */
+			15 01 00 00 00 00 02 BB 03
+			/* Non Reload MTP */
+			15 01 00 00 00 00 02 FB 01
+			/* SlpOut + DispOn */
+			05 01 00 00 78 00 02 11 00
+			05 01 00 00 78 00 02 29 00
+			];
+		qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
+				 05 01 00 00 78 00 02 10 00];
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,cmd-sync-wait-broadcast;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>;
+		qcom,mdss-dsi-tx-eot-append;
+
+		qcom,config-select = <&dsi_dual_nt35597_truly_video_config0>;
+
+		dsi_dual_nt35597_truly_video_config0: config0 {
+			qcom,split-mode = "dualctl-split";
+		};
+
+
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi
new file mode 100644
index 0000000..834a08fd
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi
@@ -0,0 +1,141 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_dual_s6e3ha3_amoled_cmd: qcom,mdss_dsi_s6e3ha3_amoled_wqhd_cmd{
+		qcom,mdss-dsi-panel-name =
+			"Dual s6e3ha3 amoled cmd mode dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <720>;
+		qcom,mdss-dsi-panel-height = <2560>;
+		qcom,mdss-dsi-h-front-porch = <100>;
+		qcom,mdss-dsi-h-back-porch = <100>;
+		qcom,mdss-dsi-h-pulse-width = <40>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <31>;
+		qcom,mdss-dsi-v-front-porch = <30>;
+		qcom,mdss-dsi-v-pulse-width = <8>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-on-command = [05 01 00 00 05 00 02 11 00
+					39 01 00 00 00 00 05 2a 00 00 05 9f
+					39 01 00 00 00 00 05 2b 00 00 09 ff
+					39 01 00 00 00 00 03 f0 5a 5a
+					39 01 00 00 00 00 02 b0 10
+					39 01 00 00 00 00 02 b5 a0
+					39 01 00 00 00 00 02 c4 03
+					39 01 00 00 00 00 0a
+						f6 42 57 37 00 aa cc d0 00 00
+					39 01 00 00 00 00 02 f9 03
+					39 01 00 00 00 00 14
+						c2 00 00 d8 d8 00 80 2b 05 08
+						0e 07 0b 05 0d 0a 15 13 20 1e
+					39 01 00 00 78 00 03 f0 a5 a5
+					39 01 00 00 00 00 02 35 00
+					39 01 00 00 00 00 02 53 20
+					39 01 00 00 00 00 02 51 60
+					05 01 00 00 05 00 02 29 00];
+		qcom,mdss-dsi-off-command = [05 01 00 00 3c 00 02 28 00
+					05 01 00 00 b4 00 02 10 00];
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-lp-mode-on = [39 00 00 00 05 00 03 f0 5a 5a
+					39 00 00 00 05 00 03 f1 5a 5a
+					39 00 00 00 05 00 03 fc 5a 5a
+					39 00 00 00 05 00 02 b0 17
+					39 00 00 00 05 00 02 cb 10
+					39 00 00 00 05 00 02 b0 2d
+					39 00 00 00 05 00 02 cb cd
+					39 00 00 00 05 00 02 b0 0e
+					39 00 00 00 05 00 02 cb 02
+					39 00 00 00 05 00 02 b0 0f
+					39 00 00 00 05 00 02 cb 09
+					39 00 00 00 05 00 02 b0 02
+					39 00 00 00 05 00 02 f2 c9
+					39 00 00 00 05 00 02 b0 03
+					39 00 00 00 05 00 02 f2 c0
+					39 00 00 00 05 00 02 b0 03
+					39 00 00 00 05 00 02 f4 aa
+					39 00 00 00 05 00 02 b0 08
+					39 00 00 00 05 00 02 b1 30
+					39 00 00 00 05 00 02 b0 09
+					39 00 00 00 05 00 02 b1 0a
+					39 00 00 00 05 00 02 b0 0d
+					39 00 00 00 05 00 02 b1 10
+					39 00 00 00 05 00 02 b0 00
+					39 00 00 00 05 00 02 f7 03
+					39 00 00 00 05 00 02 fe 30
+					39 01 00 00 05 00 02 fe b0];
+		qcom,mdss-dsi-lp-mode-off = [39 00 00 00 05 00 03 f0 5a 5a
+					39 00 00 00 05 00 03 f1 5a 5a
+					39 00 00 00 05 00 03 fc 5a 5a
+					39 00 00 00 05 00 02 b0 2d
+					39 00 00 00 05 00 02 cb 4d
+					39 00 00 00 05 00 02 b0 17
+					39 00 00 00 05 00 02 cb 04
+					39 00 00 00 05 00 02 b0 0e
+					39 00 00 00 05 00 02 cb 06
+					39 00 00 00 05 00 02 b0 0f
+					39 00 00 00 05 00 02 cb 05
+					39 00 00 00 05 00 02 b0 02
+					39 00 00 00 05 00 02 f2 b8
+					39 00 00 00 05 00 02 b0 03
+					39 00 00 00 05 00 02 f2 80
+					39 00 00 00 05 00 02 b0 03
+					39 00 00 00 05 00 02 f4 8a
+					39 00 00 00 05 00 02 b0 08
+					39 00 00 00 05 00 02 b1 10
+					39 00 00 00 05 00 02 b0 09
+					39 00 00 00 05 00 02 b1 0a
+					39 00 00 00 05 00 02 b0 0d
+					39 00 00 00 05 00 02 b1 80
+					39 00 00 00 05 00 02 b0 00
+					39 00 00 00 05 00 02 f7 03
+					39 00 00 00 05 00 02 fe 30
+					39 01 00 00 05 00 02 fe b0];
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-lane-map = "lane_map_0123";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-tx-eot-append;
+		qcom,dcs-cmd-by-left;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-lp11-init;
+		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+		qcom,mdss-dsi-bl-min-level = <1>;
+		qcom,mdss-dsi-bl-max-level = <255>;
+		qcom,mdss-pan-physical-width-dimension = <68>;
+		qcom,mdss-pan-physical-height-dimension = <122>;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi
new file mode 100644
index 0000000..aa52083
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi
@@ -0,0 +1,77 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_sharp_1080_cmd: qcom,mdss_dsi_sharp_1080p_cmd {
+		qcom,mdss-dsi-panel-name = "sharp 1080p cmd mode dsi panel";
+		qcom,mdss-dsi-panel-controller = <&mdss_dsi0>;
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-panel-destination = "display_1";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-panel-clockrate = <850000000>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <1080>;
+		qcom,mdss-dsi-panel-height = <1920>;
+		qcom,mdss-dsi-h-front-porch = <0>;
+		qcom,mdss-dsi-h-back-porch = <0>;
+		qcom,mdss-dsi-h-pulse-width = <0>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <0>;
+		qcom,mdss-dsi-v-front-porch = <0>;
+		qcom,mdss-dsi-v-pulse-width = <0>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-dsi-on-command = [
+			15 01 00 00 00 00 02 bb 10
+			15 01 00 00 00 00 02 b0 03
+			05 01 00 00 78 00 01 11
+			15 01 00 00 00 00 02 51 ff
+			15 01 00 00 00 00 02 53 24
+			15 01 00 00 00 00 02 ff 23
+			15 01 00 00 00 00 02 08 05
+			15 01 00 00 00 00 02 46 90
+			15 01 00 00 00 00 02 ff 10
+			15 01 00 00 00 00 02 ff f0
+			15 01 00 00 00 00 02 92 01
+			15 01 00 00 00 00 02 ff 10
+			05 01 00 00 28 00 01 29];
+		qcom,mdss-dsi-off-command = [
+			05 01 00 00 10 00 01 28
+			05 01 00 00 40 00 01 10];
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "burst_mode";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
new file mode 100644
index 0000000..25c949c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
@@ -0,0 +1,93 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_sharp_4k_dsc_cmd: qcom,mdss_dsi_sharp_4k_dsc_cmd {
+		qcom,mdss-dsi-panel-name = "Sharp 4k cmd mode dsc dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <1080>;
+		qcom,mdss-dsi-panel-height = <3840>;
+		qcom,mdss-dsi-h-front-porch = <30>;
+		qcom,mdss-dsi-h-back-porch = <100>;
+		qcom,mdss-dsi-h-pulse-width = <4>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <7>;
+		qcom,mdss-dsi-v-front-porch = <8>;
+		qcom,mdss-dsi-v-pulse-width = <1>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "burst_mode";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 20>;
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+		qcom,dcs-cmd-by-left;
+		qcom,mdss-dsi-tx-eot-append;
+
+		qcom,adjust-timer-wakeup-ms = <1>;
+		qcom,mdss-dsi-on-command = [
+			39 01 00 00 00 00 11 91 09 20 00 20 02 00 03 1c 04 21 00
+			0f 03 19 01 97
+			39 01 00 00 00 00 03 92 10 f0
+			15 01 00 00 00 00 02 90 03
+			15 01 00 00 00 00 02 03 01
+			39 01 00 00 00 00 06 f0 55 aa 52 08 04
+			15 01 00 00 00 00 02 c0 03
+			39 01 00 00 00 00 06 f0 55 aa 52 08 07
+			15 01 00 00 00 00 02 ef 01
+			39 01 00 00 00 00 06 f0 55 aa 52 08 00
+			15 01 00 00 00 00 02 b4 01
+			15 01 00 00 00 00 02 35 00
+			39 01 00 00 00 00 06 f0 55 aa 52 08 01
+			39 01 00 00 00 00 05 ff aa 55 a5 80
+			15 01 00 00 00 00 02 6f 01
+			15 01 00 00 00 00 02 f3 10
+			39 01 00 00 00 00 05 ff aa 55 a5 00
+			05 01 00 00 78 00 01 11 /* sleep out + delay 120ms */
+			05 01 00 00 78 00 01 29 /* display on + delay 120ms */
+			];
+
+		qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
+				 05 01 00 00 78 00 02 10 00];
+
+		qcom,compression-mode = "dsc";
+		qcom,config-select = <&dsi_sharp_dsc_cmd_config0>;
+
+		dsi_sharp_dsc_cmd_config0: config0 {
+			qcom,mdss-dsc-encoders = <1>;
+			qcom,mdss-dsc-slice-height = <32>;
+			qcom,mdss-dsc-slice-width = <1080>;
+			qcom,mdss-dsc-slice-per-pkt = <1>;
+
+			qcom,mdss-dsc-bit-per-component = <8>;
+			qcom,mdss-dsc-bit-per-pixel = <8>;
+			qcom,mdss-dsc-block-prediction-enable;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
new file mode 100644
index 0000000..cc093d6
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
@@ -0,0 +1,86 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_sharp_4k_dsc_video: qcom,mdss_dsi_sharp_4k_dsc_video {
+		qcom,mdss-dsi-panel-name = "Sharp 4k video mode dsc dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <1080>;
+		qcom,mdss-dsi-panel-height = <3840>;
+		qcom,mdss-dsi-h-front-porch = <30>;
+		qcom,mdss-dsi-h-back-porch = <100>;
+		qcom,mdss-dsi-h-pulse-width = <4>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <7>;
+		qcom,mdss-dsi-v-front-porch = <8>;
+		qcom,mdss-dsi-v-pulse-width = <1>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "burst_mode";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 20>;
+		qcom,mdss-dsi-tx-eot-append;
+
+		qcom,adjust-timer-wakeup-ms = <1>;
+		qcom,mdss-dsi-on-command = [
+			39 01 00 00 00 00 11 91 09 20 00 20 02 00 03 1c 04 21 00
+			0f 03 19 01 97
+			39 01 00 00 00 00 03 92 10 f0
+			15 01 00 00 00 00 02 90 03
+			15 01 00 00 00 00 02 03 01
+			39 01 00 00 00 00 06 f0 55 aa 52 08 04
+			15 01 00 00 00 00 02 c0 03
+			39 01 00 00 00 00 06 f0 55 aa 52 08 07
+			15 01 00 00 00 00 02 ef 01
+			39 01 00 00 00 00 06 f0 55 aa 52 08 00
+			15 01 00 00 00 00 02 b4 10
+			15 01 00 00 00 00 02 35 00
+			39 01 00 00 00 00 06 f0 55 aa 52 08 01
+			39 01 00 00 00 00 05 ff aa 55 a5 80
+			15 01 00 00 00 00 02 6f 01
+			15 01 00 00 00 00 02 f3 10
+			39 01 00 00 00 00 05 ff aa 55 a5 00
+			05 01 00 00 78 00 01 11 /* sleep out + delay 120ms */
+			05 01 00 00 78 00 01 29 /* display on + delay 120ms */
+			];
+
+		qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
+				 05 01 00 00 78 00 02 10 00];
+
+		qcom,compression-mode = "dsc";
+		qcom,config-select = <&dsi_sharp_dsc_video_config0>;
+
+		dsi_sharp_dsc_video_config0: config0 {
+			qcom,mdss-dsc-encoders = <1>;
+			qcom,mdss-dsc-slice-height = <32>;
+			qcom,mdss-dsc-slice-width = <1080>;
+			qcom,mdss-dsc-slice-per-pkt = <1>;
+
+			qcom,mdss-dsc-bit-per-component = <8>;
+			qcom,mdss-dsc-bit-per-pixel = <8>;
+			qcom,mdss-dsc-block-prediction-enable;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dualmipi-1080p-120hz.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dualmipi-1080p-120hz.dtsi
new file mode 100644
index 0000000..2071649
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dualmipi-1080p-120hz.dtsi
@@ -0,0 +1,632 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_dual_sharp_1080_120hz_cmd: qcom,mdss_dual_sharp_1080p_120hz_cmd {
+		qcom,mdss-dsi-panel-name =
+			"sharp 1080p 120hz dual dsi cmd mode panel";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-panel-framerate = <120>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <540>;
+		qcom,mdss-dsi-panel-height = <1920>;
+		qcom,mdss-dsi-h-front-porch = <28>;
+		qcom,mdss-dsi-h-back-porch = <4>;
+		qcom,mdss-dsi-h-pulse-width = <4>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <12>;
+		qcom,mdss-dsi-v-front-porch = <12>;
+		qcom,mdss-dsi-v-pulse-width = <2>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-reset-sequence = <1 20>, <0 1>, <1 10>;
+		qcom,mdss-dsi-on-command = [15 01 00 00 00 00 02 ff 10
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 ba 07
+			15 01 00 00 00 00 02 c0 00
+			15 01 00 00 00 00 02 bb 10
+			15 01 00 00 00 00 02 d9 00
+			15 01 00 00 00 00 02 ef 70
+			15 01 00 00 00 00 02 f7 80
+			39 01 00 00 00 00 06 3b 03 0e 0c 08 1c
+			15 01 00 00 00 00 02 e9 0e
+			15 01 00 00 00 00 02 ea 0c
+			15 01 00 00 00 00 02 35 00
+			15 01 00 00 00 00 02 c0 00
+			15 01 00 00 00 00 02 ff 20
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 59 6a
+			15 01 00 00 00 00 02 0b 1b
+			15 01 00 00 00 00 02 61 f7
+			15 01 00 00 00 00 02 62 6c
+			15 01 00 00 00 00 02 00 01
+			15 01 00 00 00 00 02 01 55
+			15 01 00 00 00 00 02 04 c8
+			15 01 00 00 00 00 02 05 1a
+			15 01 00 00 00 00 02 0d 93
+			15 01 00 00 00 00 02 0e 93
+			15 01 00 00 00 00 02 0f 7e
+			15 01 00 00 00 00 02 06 69
+			15 01 00 00 00 00 02 07 bc
+			15 01 00 00 00 00 02 10 03
+			15 01 00 00 00 00 02 11 64
+			15 01 00 00 00 00 02 12 5a
+			15 01 00 00 00 00 02 13 40
+			15 01 00 00 00 00 02 14 40
+			15 01 00 00 00 00 02 15 00
+			15 01 00 00 00 00 02 33 13
+			15 01 00 00 00 00 02 5a 40
+			15 01 00 00 00 00 02 5b 40
+			15 01 00 00 00 00 02 5e 80
+			15 01 00 00 00 00 02 ff 24
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 00 80
+			15 01 00 00 00 00 02 14 80
+			15 01 00 00 00 00 02 01 80
+			15 01 00 00 00 00 02 15 80
+			15 01 00 00 00 00 02 02 80
+			15 01 00 00 00 00 02 16 80
+			15 01 00 00 00 00 02 03 0a
+			15 01 00 00 00 00 02 17 0c
+			15 01 00 00 00 00 02 04 06
+			15 01 00 00 00 00 02 18 08
+			15 01 00 00 00 00 02 05 80
+			15 01 00 00 00 00 02 19 80
+			15 01 00 00 00 00 02 06 80
+			15 01 00 00 00 00 02 1a 80
+			15 01 00 00 00 00 02 07 80
+			15 01 00 00 00 00 02 1b 80
+			15 01 00 00 00 00 02 08 80
+			15 01 00 00 00 00 02 1c 80
+			15 01 00 00 00 00 02 09 80
+			15 01 00 00 00 00 02 1d 80
+			15 01 00 00 00 00 02 0a 80
+			15 01 00 00 00 00 02 1e 80
+			15 01 00 00 00 00 02 0b 1a
+			15 01 00 00 00 00 02 1f 1b
+			15 01 00 00 00 00 02 0c 16
+			15 01 00 00 00 00 02 20 17
+			15 01 00 00 00 00 02 0d 1c
+			15 01 00 00 00 00 02 21 1d
+			15 01 00 00 00 00 02 0e 18
+			15 01 00 00 00 00 02 22 19
+			15 01 00 00 00 00 02 0f 0e
+			15 01 00 00 00 00 02 23 10
+			15 01 00 00 00 00 02 10 80
+			15 01 00 00 00 00 02 24 80
+			15 01 00 00 00 00 02 11 80
+			15 01 00 00 00 00 02 25 80
+			15 01 00 00 00 00 02 12 80
+			15 01 00 00 00 00 02 26 80
+			15 01 00 00 00 00 02 13 80
+			15 01 00 00 00 00 02 27 80
+			15 01 00 00 00 00 02 74 ff
+			15 01 00 00 00 00 02 75 ff
+			15 01 00 00 00 00 02 8d 00
+			15 01 00 00 00 00 02 8e 00
+			15 01 00 00 00 00 02 8f 9c
+			15 01 00 00 00 00 02 90 0c
+			15 01 00 00 00 00 02 91 0e
+			15 01 00 00 00 00 02 d6 00
+			15 01 00 00 00 00 02 d7 20
+			15 01 00 00 00 00 02 d8 00
+			15 01 00 00 00 00 02 d9 88
+			15 01 00 00 00 00 02 e5 05
+			15 01 00 00 00 00 02 e6 10
+			15 01 00 00 00 00 02 54 06
+			15 01 00 00 00 00 02 55 05
+			15 01 00 00 00 00 02 56 04
+			15 01 00 00 00 00 02 58 03
+			15 01 00 00 00 00 02 59 33
+			15 01 00 00 00 00 02 5a 33
+			15 01 00 00 00 00 02 5b 01
+			15 01 00 00 00 00 02 5c 00
+			15 01 00 00 00 00 02 5d 01
+			15 01 00 00 00 00 02 5e 0a
+			15 01 00 00 00 00 02 5f 0a
+			15 01 00 00 00 00 02 60 0a
+			15 01 00 00 00 00 02 61 0a
+			15 01 00 00 00 00 02 62 10
+			15 01 00 00 00 00 02 63 01
+			15 01 00 00 00 00 02 64 00
+			15 01 00 00 00 00 02 65 00
+			15 01 00 00 00 00 02 ef 00
+			15 01 00 00 00 00 02 f0 00
+			15 01 00 00 00 00 02 6d 20
+			15 01 00 00 00 00 02 66 44
+			15 01 00 00 00 00 02 68 01
+			15 01 00 00 00 00 02 69 00
+			15 01 00 00 00 00 02 67 11
+			15 01 00 00 00 00 02 6a 06
+			15 01 00 00 00 00 02 6b 31
+			15 01 00 00 00 00 02 6c 90
+			15 01 00 00 00 00 02 ab c3
+			15 01 00 00 00 00 02 b1 49
+			15 01 00 00 00 00 02 aa 80
+			15 01 00 00 00 00 02 b0 90
+			15 01 00 00 00 00 02 b2 a4
+			15 01 00 00 00 00 02 b3 00
+			15 01 00 00 00 00 02 b4 23
+			15 01 00 00 00 00 02 b5 00
+			15 01 00 00 00 00 02 b6 00
+			15 01 00 00 00 00 02 b7 00
+			15 01 00 00 00 00 02 b8 00
+			15 01 00 00 00 00 02 b9 00
+			15 01 00 00 00 00 02 ba 00
+			15 01 00 00 00 00 02 bb 00
+			15 01 00 00 00 00 02 bc 00
+			15 01 00 00 00 00 02 bd 00
+			15 01 00 00 00 00 02 be 00
+			15 01 00 00 00 00 02 bf 00
+			15 01 00 00 00 00 02 c0 00
+			15 01 00 00 00 00 02 c7 40
+			15 01 00 00 00 00 02 c9 00
+			15 01 00 00 00 00 02 c1 2a
+			15 01 00 00 00 00 02 c2 2a
+			15 01 00 00 00 00 02 c3 00
+			15 01 00 00 00 00 02 c4 00
+			15 01 00 00 00 00 02 c5 00
+			15 01 00 00 00 00 02 c6 00
+			15 01 00 00 00 00 02 c8 ab
+			15 01 00 00 00 00 02 ca 00
+			15 01 00 00 00 00 02 cb 00
+			15 01 00 00 00 00 02 cc 20
+			15 01 00 00 00 00 02 cd 40
+			15 01 00 00 00 00 02 ce a8
+			15 01 00 00 00 00 02 cf a8
+			15 01 00 00 00 00 02 d0 00
+			15 01 00 00 00 00 02 d1 00
+			15 01 00 00 00 00 02 d2 00
+			15 01 00 00 00 00 02 d3 00
+			15 01 00 00 00 00 02 af 01
+			15 01 00 00 00 00 02 a4 1e
+			15 01 00 00 00 00 02 95 41
+			15 01 00 00 00 00 02 96 03
+			15 01 00 00 00 00 02 98 00
+			15 01 00 00 00 00 02 9a 9a
+			15 01 00 00 00 00 02 9b 03
+			15 01 00 00 00 00 02 9d 80
+			15 01 00 00 00 00 02 ff 26
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 fa d0
+			15 01 00 00 00 00 02 6b 80
+			15 01 00 00 00 00 02 6c 5c
+			15 01 00 00 00 00 02 6d 0c
+			15 01 00 00 00 00 02 6e 0e
+			15 01 00 00 00 00 02 58 01
+			15 01 00 00 00 00 02 59 15
+			15 01 00 00 00 00 02 5a 01
+			15 01 00 00 00 00 02 5b 00
+			15 01 00 00 00 00 02 5c 01
+			15 01 00 00 00 00 02 5d 2b
+			15 01 00 00 00 00 02 74 00
+			15 01 00 00 00 00 02 75 ba
+			15 01 00 00 00 00 02 81 0a
+			15 01 00 00 00 00 02 4e 81
+			15 01 00 00 00 00 02 4f 83
+			15 01 00 00 00 00 02 51 00
+			15 01 00 00 00 00 02 53 4d
+			15 01 00 00 00 00 02 54 03
+			15 01 00 00 00 00 02 ff e0
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 b2 81
+			15 01 00 00 00 00 02 62 28
+			15 01 00 00 00 00 02 a2 09
+			15 01 00 00 00 00 02 b3 01
+			15 01 00 00 00 00 02 ed 00
+			15 01 00 00 00 00 02 ff 10
+			05 01 00 00 78 00 01 11
+			15 01 00 00 00 00 02 ff 20
+			15 01 00 00 00 00 02 75 00
+			15 01 00 00 00 00 02 76 71
+			15 01 00 00 00 00 02 77 00
+			15 01 00 00 00 00 02 78 84
+			15 01 00 00 00 00 02 79 00
+			15 01 00 00 00 00 02 7a a5
+			15 01 00 00 00 00 02 7b 00
+			15 01 00 00 00 00 02 7c bb
+			15 01 00 00 00 00 02 7d 00
+			15 01 00 00 00 00 02 7e ce
+			15 01 00 00 00 00 02 7f 00
+			15 01 00 00 00 00 02 80 e0
+			15 01 00 00 00 00 02 81 00
+			15 01 00 00 00 00 02 82 ef
+			15 01 00 00 00 00 02 83 00
+			15 01 00 00 00 00 02 84 ff
+			15 01 00 00 00 00 02 85 01
+			15 01 00 00 00 00 02 86 0b
+			15 01 00 00 00 00 02 87 01
+			15 01 00 00 00 00 02 88 38
+			15 01 00 00 00 00 02 89 01
+			15 01 00 00 00 00 02 8a 5b
+			15 01 00 00 00 00 02 8b 01
+			15 01 00 00 00 00 02 8c 95
+			15 01 00 00 00 00 02 8d 01
+			15 01 00 00 00 00 02 8e c4
+			15 01 00 00 00 00 02 8f 02
+			15 01 00 00 00 00 02 90 0d
+			15 01 00 00 00 00 02 91 02
+			15 01 00 00 00 00 02 92 4a
+			15 01 00 00 00 00 02 93 02
+			15 01 00 00 00 00 02 94 4c
+			15 01 00 00 00 00 02 95 02
+			15 01 00 00 00 00 02 96 85
+			15 01 00 00 00 00 02 97 02
+			15 01 00 00 00 00 02 98 c3
+			15 01 00 00 00 00 02 99 02
+			15 01 00 00 00 00 02 9a e9
+			15 01 00 00 00 00 02 9b 03
+			15 01 00 00 00 00 02 9c 16
+			15 01 00 00 00 00 02 9d 03
+			15 01 00 00 00 00 02 9e 34
+			15 01 00 00 00 00 02 9f 03
+			15 01 00 00 00 00 02 a0 56
+			15 01 00 00 00 00 02 a2 03
+			15 01 00 00 00 00 02 a3 62
+			15 01 00 00 00 00 02 a4 03
+			15 01 00 00 00 00 02 a5 6c
+			15 01 00 00 00 00 02 a6 03
+			15 01 00 00 00 00 02 a7 74
+			15 01 00 00 00 00 02 a9 03
+			15 01 00 00 00 00 02 aa 80
+			15 01 00 00 00 00 02 ab 03
+			15 01 00 00 00 00 02 ac 89
+			15 01 00 00 00 00 02 ad 03
+			15 01 00 00 00 00 02 ae 8b
+			15 01 00 00 00 00 02 af 03
+			15 01 00 00 00 00 02 b0 8d
+			15 01 00 00 00 00 02 b1 03
+			15 01 00 00 00 00 02 b2 8e
+			15 01 00 00 00 00 02 b3 00
+			15 01 00 00 00 00 02 b4 71
+			15 01 00 00 00 00 02 b5 00
+			15 01 00 00 00 00 02 b6 84
+			15 01 00 00 00 00 02 b7 00
+			15 01 00 00 00 00 02 b8 a5
+			15 01 00 00 00 00 02 b9 00
+			15 01 00 00 00 00 02 ba bb
+			15 01 00 00 00 00 02 bb 00
+			15 01 00 00 00 00 02 bc ce
+			15 01 00 00 00 00 02 bd 00
+			15 01 00 00 00 00 02 be e0
+			15 01 00 00 00 00 02 bf 00
+			15 01 00 00 00 00 02 c0 ef
+			15 01 00 00 00 00 02 c1 00
+			15 01 00 00 00 00 02 c2 ff
+			15 01 00 00 00 00 02 c3 01
+			15 01 00 00 00 00 02 c4 0b
+			15 01 00 00 00 00 02 c5 01
+			15 01 00 00 00 00 02 c6 38
+			15 01 00 00 00 00 02 c7 01
+			15 01 00 00 00 00 02 c8 5b
+			15 01 00 00 00 00 02 c9 01
+			15 01 00 00 00 00 02 ca 95
+			15 01 00 00 00 00 02 cb 01
+			15 01 00 00 00 00 02 cc c4
+			15 01 00 00 00 00 02 cd 02
+			15 01 00 00 00 00 02 ce 0d
+			15 01 00 00 00 00 02 cf 02
+			15 01 00 00 00 00 02 d0 4a
+			15 01 00 00 00 00 02 d1 02
+			15 01 00 00 00 00 02 d2 4c
+			15 01 00 00 00 00 02 d3 02
+			15 01 00 00 00 00 02 d4 85
+			15 01 00 00 00 00 02 d5 02
+			15 01 00 00 00 00 02 d6 c3
+			15 01 00 00 00 00 02 d7 02
+			15 01 00 00 00 00 02 d8 e9
+			15 01 00 00 00 00 02 d9 03
+			15 01 00 00 00 00 02 da 16
+			15 01 00 00 00 00 02 db 03
+			15 01 00 00 00 00 02 dc 34
+			15 01 00 00 00 00 02 dd 03
+			15 01 00 00 00 00 02 de 56
+			15 01 00 00 00 00 02 df 03
+			15 01 00 00 00 00 02 e0 62
+			15 01 00 00 00 00 02 e1 03
+			15 01 00 00 00 00 02 e2 6c
+			15 01 00 00 00 00 02 e3 03
+			15 01 00 00 00 00 02 e4 74
+			15 01 00 00 00 00 02 e5 03
+			15 01 00 00 00 00 02 e6 80
+			15 01 00 00 00 00 02 e7 03
+			15 01 00 00 00 00 02 e8 89
+			15 01 00 00 00 00 02 e9 03
+			15 01 00 00 00 00 02 ea 8b
+			15 01 00 00 00 00 02 eb 03
+			15 01 00 00 00 00 02 ec 8d
+			15 01 00 00 00 00 02 ed 03
+			15 01 00 00 00 00 02 ee 8e
+			15 01 00 00 00 00 02 ef 00
+			15 01 00 00 00 00 02 f0 71
+			15 01 00 00 00 00 02 f1 00
+			15 01 00 00 00 00 02 f2 84
+			15 01 00 00 00 00 02 f3 00
+			15 01 00 00 00 00 02 f4 a5
+			15 01 00 00 00 00 02 f5 00
+			15 01 00 00 00 00 02 f6 bb
+			15 01 00 00 00 00 02 f7 00
+			15 01 00 00 00 00 02 f8 ce
+			15 01 00 00 00 00 02 f9 00
+			15 01 00 00 00 00 02 fa e0
+			15 01 00 00 00 00 02 ff 21
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 00 00
+			15 01 00 00 00 00 02 01 ef
+			15 01 00 00 00 00 02 02 00
+			15 01 00 00 00 00 02 03 ff
+			15 01 00 00 00 00 02 04 01
+			15 01 00 00 00 00 02 05 0b
+			15 01 00 00 00 00 02 06 01
+			15 01 00 00 00 00 02 07 38
+			15 01 00 00 00 00 02 08 01
+			15 01 00 00 00 00 02 09 5b
+			15 01 00 00 00 00 02 0a 01
+			15 01 00 00 00 00 02 0b 95
+			15 01 00 00 00 00 02 0c 01
+			15 01 00 00 00 00 02 0d c4
+			15 01 00 00 00 00 02 0e 02
+			15 01 00 00 00 00 02 0f 0d
+			15 01 00 00 00 00 02 10 02
+			15 01 00 00 00 00 02 11 4a
+			15 01 00 00 00 00 02 12 02
+			15 01 00 00 00 00 02 13 4c
+			15 01 00 00 00 00 02 14 02
+			15 01 00 00 00 00 02 15 85
+			15 01 00 00 00 00 02 16 02
+			15 01 00 00 00 00 02 17 c3
+			15 01 00 00 00 00 02 18 02
+			15 01 00 00 00 00 02 19 e9
+			15 01 00 00 00 00 02 1a 03
+			15 01 00 00 00 00 02 1b 16
+			15 01 00 00 00 00 02 1c 03
+			15 01 00 00 00 00 02 1d 34
+			15 01 00 00 00 00 02 1e 03
+			15 01 00 00 00 00 02 1f 56
+			15 01 00 00 00 00 02 20 03
+			15 01 00 00 00 00 02 21 62
+			15 01 00 00 00 00 02 22 03
+			15 01 00 00 00 00 02 23 6c
+			15 01 00 00 00 00 02 24 03
+			15 01 00 00 00 00 02 25 74
+			15 01 00 00 00 00 02 26 03
+			15 01 00 00 00 00 02 27 80
+			15 01 00 00 00 00 02 28 03
+			15 01 00 00 00 00 02 29 89
+			15 01 00 00 00 00 02 2a 03
+			15 01 00 00 00 00 02 2b 8b
+			15 01 00 00 00 00 02 2d 03
+			15 01 00 00 00 00 02 2f 8d
+			15 01 00 00 00 00 02 30 03
+			15 01 00 00 00 00 02 31 8e
+			15 01 00 00 00 00 02 32 00
+			15 01 00 00 00 00 02 33 71
+			15 01 00 00 00 00 02 34 00
+			15 01 00 00 00 00 02 35 84
+			15 01 00 00 00 00 02 36 00
+			15 01 00 00 00 00 02 37 a5
+			15 01 00 00 00 00 02 38 00
+			15 01 00 00 00 00 02 39 bb
+			15 01 00 00 00 00 02 3a 00
+			15 01 00 00 00 00 02 3b ce
+			15 01 00 00 00 00 02 3d 00
+			15 01 00 00 00 00 02 3f e0
+			15 01 00 00 00 00 02 40 00
+			15 01 00 00 00 00 02 41 ef
+			15 01 00 00 00 00 02 42 00
+			15 01 00 00 00 00 02 43 ff
+			15 01 00 00 00 00 02 44 01
+			15 01 00 00 00 00 02 45 0b
+			15 01 00 00 00 00 02 46 01
+			15 01 00 00 00 00 02 47 38
+			15 01 00 00 00 00 02 48 01
+			15 01 00 00 00 00 02 49 5b
+			15 01 00 00 00 00 02 4a 01
+			15 01 00 00 00 00 02 4b 95
+			15 01 00 00 00 00 02 4c 01
+			15 01 00 00 00 00 02 4d c4
+			15 01 00 00 00 00 02 4e 02
+			15 01 00 00 00 00 02 4f 0d
+			15 01 00 00 00 00 02 50 02
+			15 01 00 00 00 00 02 51 4a
+			15 01 00 00 00 00 02 52 02
+			15 01 00 00 00 00 02 53 4c
+			15 01 00 00 00 00 02 54 02
+			15 01 00 00 00 00 02 55 85
+			15 01 00 00 00 00 02 56 02
+			15 01 00 00 00 00 02 58 c3
+			15 01 00 00 00 00 02 59 02
+			15 01 00 00 00 00 02 5a e9
+			15 01 00 00 00 00 02 5b 03
+			15 01 00 00 00 00 02 5c 16
+			15 01 00 00 00 00 02 5d 03
+			15 01 00 00 00 00 02 5e 34
+			15 01 00 00 00 00 02 5f 03
+			15 01 00 00 00 00 02 60 56
+			15 01 00 00 00 00 02 61 03
+			15 01 00 00 00 00 02 62 62
+			15 01 00 00 00 00 02 63 03
+			15 01 00 00 00 00 02 64 6c
+			15 01 00 00 00 00 02 65 03
+			15 01 00 00 00 00 02 66 74
+			15 01 00 00 00 00 02 67 03
+			15 01 00 00 00 00 02 68 80
+			15 01 00 00 00 00 02 69 03
+			15 01 00 00 00 00 02 6a 89
+			15 01 00 00 00 00 02 6b 03
+			15 01 00 00 00 00 02 6c 8b
+			15 01 00 00 00 00 02 6d 03
+			15 01 00 00 00 00 02 6e 8d
+			15 01 00 00 00 00 02 6f 03
+			15 01 00 00 00 00 02 70 8e
+			15 01 00 00 00 00 02 71 00
+			15 01 00 00 00 00 02 72 71
+			15 01 00 00 00 00 02 73 00
+			15 01 00 00 00 00 02 74 84
+			15 01 00 00 00 00 02 75 00
+			15 01 00 00 00 00 02 76 a5
+			15 01 00 00 00 00 02 77 00
+			15 01 00 00 00 00 02 78 bb
+			15 01 00 00 00 00 02 79 00
+			15 01 00 00 00 00 02 7a ce
+			15 01 00 00 00 00 02 7b 00
+			15 01 00 00 00 00 02 7c e0
+			15 01 00 00 00 00 02 7d 00
+			15 01 00 00 00 00 02 7e ef
+			15 01 00 00 00 00 02 7f 00
+			15 01 00 00 00 00 02 80 ff
+			15 01 00 00 00 00 02 81 01
+			15 01 00 00 00 00 02 82 0b
+			15 01 00 00 00 00 02 83 01
+			15 01 00 00 00 00 02 84 38
+			15 01 00 00 00 00 02 85 01
+			15 01 00 00 00 00 02 86 5b
+			15 01 00 00 00 00 02 87 01
+			15 01 00 00 00 00 02 88 95
+			15 01 00 00 00 00 02 89 01
+			15 01 00 00 00 00 02 8a c4
+			15 01 00 00 00 00 02 8b 02
+			15 01 00 00 00 00 02 8c 0d
+			15 01 00 00 00 00 02 8d 02
+			15 01 00 00 00 00 02 8e 4a
+			15 01 00 00 00 00 02 8f 02
+			15 01 00 00 00 00 02 90 4c
+			15 01 00 00 00 00 02 91 02
+			15 01 00 00 00 00 02 92 85
+			15 01 00 00 00 00 02 93 02
+			15 01 00 00 00 00 02 94 c3
+			15 01 00 00 00 00 02 95 02
+			15 01 00 00 00 00 02 96 e9
+			15 01 00 00 00 00 02 97 03
+			15 01 00 00 00 00 02 98 16
+			15 01 00 00 00 00 02 99 03
+			15 01 00 00 00 00 02 9a 34
+			15 01 00 00 00 00 02 9b 03
+			15 01 00 00 00 00 02 9c 56
+			15 01 00 00 00 00 02 9d 03
+			15 01 00 00 00 00 02 9e 62
+			15 01 00 00 00 00 02 9f 03
+			15 01 00 00 00 00 02 a0 6c
+			15 01 00 00 00 00 02 a2 03
+			15 01 00 00 00 00 02 a3 74
+			15 01 00 00 00 00 02 a4 03
+			15 01 00 00 00 00 02 a5 80
+			15 01 00 00 00 00 02 a6 03
+			15 01 00 00 00 00 02 a7 89
+			15 01 00 00 00 00 02 a9 03
+			15 01 00 00 00 00 02 aa 8b
+			15 01 00 00 00 00 02 ab 03
+			15 01 00 00 00 00 02 ac 8d
+			15 01 00 00 00 00 02 ad 03
+			15 01 00 00 00 00 02 ae 8e
+			15 01 00 00 00 00 02 af 00
+			15 01 00 00 00 00 02 b0 71
+			15 01 00 00 00 00 02 b1 00
+			15 01 00 00 00 00 02 b2 84
+			15 01 00 00 00 00 02 b3 00
+			15 01 00 00 00 00 02 b4 a5
+			15 01 00 00 00 00 02 b5 00
+			15 01 00 00 00 00 02 b6 bb
+			15 01 00 00 00 00 02 b7 00
+			15 01 00 00 00 00 02 b8 ce
+			15 01 00 00 00 00 02 b9 00
+			15 01 00 00 00 00 02 ba e0
+			15 01 00 00 00 00 02 bb 00
+			15 01 00 00 00 00 02 bc ef
+			15 01 00 00 00 00 02 bd 00
+			15 01 00 00 00 00 02 be ff
+			15 01 00 00 00 00 02 bf 01
+			15 01 00 00 00 00 02 c0 0b
+			15 01 00 00 00 00 02 c1 01
+			15 01 00 00 00 00 02 c2 38
+			15 01 00 00 00 00 02 c3 01
+			15 01 00 00 00 00 02 c4 5b
+			15 01 00 00 00 00 02 c5 01
+			15 01 00 00 00 00 02 c6 95
+			15 01 00 00 00 00 02 c7 01
+			15 01 00 00 00 00 02 c8 c4
+			15 01 00 00 00 00 02 c9 02
+			15 01 00 00 00 00 02 ca 0d
+			15 01 00 00 00 00 02 cb 02
+			15 01 00 00 00 00 02 cc 4a
+			15 01 00 00 00 00 02 cd 02
+			15 01 00 00 00 00 02 ce 4c
+			15 01 00 00 00 00 02 cf 02
+			15 01 00 00 00 00 02 d0 85
+			15 01 00 00 00 00 02 d1 02
+			15 01 00 00 00 00 02 d2 c3
+			15 01 00 00 00 00 02 d3 02
+			15 01 00 00 00 00 02 d4 e9
+			15 01 00 00 00 00 02 d5 03
+			15 01 00 00 00 00 02 d6 16
+			15 01 00 00 00 00 02 d7 03
+			15 01 00 00 00 00 02 d8 34
+			15 01 00 00 00 00 02 d9 03
+			15 01 00 00 00 00 02 da 56
+			15 01 00 00 00 00 02 db 03
+			15 01 00 00 00 00 02 dc 62
+			15 01 00 00 00 00 02 dd 03
+			15 01 00 00 00 00 02 de 6c
+			15 01 00 00 00 00 02 df 03
+			15 01 00 00 00 00 02 e0 74
+			15 01 00 00 00 00 02 e1 03
+			15 01 00 00 00 00 02 e2 80
+			15 01 00 00 00 00 02 e3 03
+			15 01 00 00 00 00 02 e4 89
+			15 01 00 00 00 00 02 e5 03
+			15 01 00 00 00 00 02 e6 8b
+			15 01 00 00 00 00 02 e7 03
+			15 01 00 00 00 00 02 e8 8d
+			15 01 00 00 00 00 02 e9 03
+			15 01 00 00 00 00 02 ea 8e
+			15 01 00 00 00 00 02 FF 10
+			05 01 00 00 00 00 01 29];
+		qcom,mdss-dsi-off-command = [15 01 00 00 00 00 02 ff 10
+			05 01 00 00 10 00 01 28
+			15 01 00 00 00 00 02 b0 00
+			05 01 00 00 40 00 01 10
+			15 01 00 00 00 00 02 4f 01];
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "burst_mode";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,cmd-sync-wait-broadcast;
+		qcom,cmd-sync-wait-trigger;
+		qcom,mdss-tear-check-frame-rate = <12000>;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+
+		qcom,config-select = <&dsi_dual_sharp_cmd_config0>;
+
+		dsi_dual_sharp_cmd_config0: config0 {
+			qcom,split-mode = "dualctl-split";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
new file mode 100644
index 0000000..509547f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
@@ -0,0 +1,88 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_dual_sim_cmd: qcom,mdss_dsi_dual_sim_cmd {
+		qcom,mdss-dsi-panel-name = "Sim dual cmd mode dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <1280>;
+		qcom,mdss-dsi-panel-height = <1440>;
+		qcom,mdss-dsi-h-front-porch = <120>;
+		qcom,mdss-dsi-h-back-porch = <44>;
+		qcom,mdss-dsi-h-pulse-width = <16>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <4>;
+		qcom,mdss-dsi-v-front-porch = <8>;
+		qcom,mdss-dsi-v-pulse-width = <4>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,cmd-sync-wait-broadcast;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-hor-line-idle = <0 40 256>,
+						<40 120 128>,
+						<120 240 64>;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+		qcom,mdss-dsi-on-command = [29 01 00 00 00 00 02 b0 03
+			05 01 00 00 0a 00 01 00
+			/* Soft reset, wait 10ms */
+			15 01 00 00 0a 00 02 3a 77
+			/* Set Pixel format (24 bpp) */
+			39 01 00 00 0a 00 05 2a 00 00 04 ff
+			/* Set Column address */
+			39 01 00 00 0a 00 05 2b 00 00 05 9f
+			/* Set page address */
+			15 01 00 00 0a 00 02 35 00
+			/* Set tear on */
+			39 01 00 00 0a 00 03 44 00 00
+			/* Set tear scan line */
+			15 01 00 00 0a 00 02 51 ff
+			/* write display brightness */
+			15 01 00 00 0a 00 02 53 24
+			 /* write control brightness */
+			15 01 00 00 0a 00 02 55 00
+			/* CABC brightness */
+			05 01 00 00 78 00 01 11
+			/* exit sleep mode, wait 120ms */
+			05 01 00 00 10 00 01 29];
+			/* Set display on, wait 16ms */
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command = [05 01 00 00 32 00 02 28 00
+					05 01 00 00 78 00 02 10 00];
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,panel-ack-disabled;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-video.dtsi
new file mode 100644
index 0000000..cca28c7
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-video.dtsi
@@ -0,0 +1,55 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_dual_sim_vid: qcom,mdss_dsi_dual_sim_video {
+		qcom,mdss-dsi-panel-name = "Sim dual video mode dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <1280>;
+		qcom,mdss-dsi-panel-height = <1440>;
+		qcom,mdss-dsi-h-front-porch = <120>;
+		qcom,mdss-dsi-h-back-porch = <44>;
+		qcom,mdss-dsi-h-pulse-width = <16>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <4>;
+		qcom,mdss-dsi-v-front-porch = <8>;
+		qcom,mdss-dsi-v-pulse-width = <4>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-panel-broadcast-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command = [05 01 00 00 32 00 02 28 00
+					05 01 00 00 78 00 02 10 00];
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-reset-sequence = <1 20>, <0 200>, <1 20>;
+		qcom,panel-ack-disabled;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
index b589fe5..44d6f18 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
@@ -32,11 +32,26 @@
 				<GIC_SPI 369 IRQ_TYPE_EDGE_RISING>,
 				<GIC_SPI 370 IRQ_TYPE_EDGE_RISING>,
 				<GIC_SPI 371 IRQ_TYPE_EDGE_RISING>;
+		attach-impl-defs =
+				<0x6000 0x2378>,
+				<0x6060 0x1055>,
+				<0x678c 0x8>,
+				<0x6794 0x28>,
+				<0x6800 0x6>,
+				<0x6900 0x3ff>,
+				<0x6924 0x204>,
+				<0x6928 0x11000>,
+				<0x6930 0x800>,
+				<0x6960 0xffffffff>,
+				<0x6b64 0x1a5551>,
+				<0x6b68 0x9a82a382>;
 	};
 
 	apps_smmu: apps-smmu@0x15000000 {
 		compatible = "qcom,qsmmu-v500";
-		reg = <0x15000000 0x80000>;
+		reg = <0x15000000 0x80000>,
+			<0x150c2000 0x20>;
+		reg-names = "base", "tcu-base";
 		#iommu-cells = <1>;
 		qcom,skip-init;
 		#global-interrupts = <1>;
@@ -115,6 +130,7 @@
 			reg = <0x150c5000 0x1000>,
 				<0x150c2200 0x8>;
 			reg-names = "base", "status-reg";
+			qcom,stream-id-range = <0x0 0x400>;
 			qcom,regulator-names = "vdd";
 			vdd-supply = <&hlos1_vote_aggre_noc_mmu_tbu1_gdsc>;
 		};
@@ -125,6 +141,7 @@
 			reg = <0x150c9000 0x1000>,
 				<0x150c2208 0x8>;
 			reg-names = "base", "status-reg";
+			qcom,stream-id-range = <0x400 0x400>;
 			qcom,regulator-names = "vdd";
 			vdd-supply = <&hlos1_vote_aggre_noc_mmu_tbu2_gdsc>;
 		};
@@ -135,6 +152,7 @@
 			reg = <0x150cd000 0x1000>,
 				<0x150c2210 0x8>;
 			reg-names = "base", "status-reg";
+			qcom,stream-id-range = <0x800 0x400>;
 			qcom,regulator-names = "vdd";
 			vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc>;
 		};
@@ -145,6 +163,7 @@
 			reg = <0x150d1000 0x1000>,
 				<0x150c2218 0x8>;
 			reg-names = "base", "status-reg";
+			qcom,stream-id-range = <0xc00 0x400>;
 			qcom,regulator-names = "vdd";
 			vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc>;
 		};
@@ -155,6 +174,7 @@
 			reg = <0x150d5000 0x1000>,
 				<0x150c2220 0x8>;
 			reg-names = "base", "status-reg";
+			qcom,stream-id-range = <0x1000 0x400>;
 			qcom,regulator-names = "vdd";
 			vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_sf_gdsc>;
 		};
@@ -165,6 +185,7 @@
 			reg = <0x150d9000 0x1000>,
 				<0x150c2228 0x8>;
 			reg-names = "base", "status-reg";
+			qcom,stream-id-range = <0x1400 0x400>;
 			/* No GDSC */
 		};
 
@@ -174,6 +195,7 @@
 			reg = <0x150dd000 0x1000>,
 				<0x150c2230 0x8>;
 			reg-names = "base", "status-reg";
+			qcom,stream-id-range = <0x1800 0x400>;
 			qcom,regulator-names = "vdd";
 			vdd-supply = <&hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc>;
 		};
@@ -184,23 +206,24 @@
 			reg = <0x150e1000 0x1000>,
 				<0x150c2238 0x8>;
 			reg-names = "base", "status-reg";
+			qcom,stream-id-range = <0x1c00 0x400>;
 			qcom,regulator-names = "vdd";
 			vdd-supply = <&hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc>;
 		};
 	};
 
-	iommu_test_device {
+	kgsl_iommu_test_device {
 		compatible = "iommu-debug-test";
 		/*
-		 * 42 shouldn't be used by anyone on the mmss_smmu.  We just
-		 * need _something_ here to get this node recognized by the
-		 * SMMU driver. Our test uses ATOS, which doesn't use SIDs
+		 * 0x7 isn't a valid sid, but should pass the sid sanity check.
+		 * We just need _something_ here to get this node recognized by
+		 * the SMMU driver. Our test uses ATOS, which doesn't use SIDs
 		 * anyways, so using a dummy value is ok.
 		 */
-		iommus = <&kgsl_smmu 0x3>;
+		iommus = <&kgsl_smmu 0x7>;
 	};
 
-	iommu_test_device2 {
+	apps_iommu_test_device {
 		compatible = "iommu-debug-test";
 		/*
 		 * This SID belongs to PCIE. We can't use a fake SID for
diff --git a/arch/arm64/boot/dts/qcom/pm8005.dtsi b/arch/arm64/boot/dts/qcom/pm8005.dtsi
index 241864f..1f8d20e 100644
--- a/arch/arm64/boot/dts/qcom/pm8005.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8005.dtsi
@@ -32,37 +32,15 @@
 			label = "pm8005_tz";
 		};
 
-		pm8005_gpios: gpios {
-			compatible = "qcom,qpnp-pin";
+		pm8005_gpios: pinctrl@c000 {
+			compatible = "qcom,spmi-gpio";
+			reg = <0xc000 0x400>;
+			interrupts = <0x4 0xc0 0 IRQ_TYPE_NONE>,
+					<0x4 0xc1 0 IRQ_TYPE_NONE>;
+			interrupt-names = "pm8005_gpio1", "pm8005_gpio2";
 			gpio-controller;
 			#gpio-cells = <2>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			label = "pm8005-gpio";
-
-			gpio@c000 {
-				reg = <0xc000 0x100>;
-				qcom,pin-num = <1>;
-				status = "disabled";
-			};
-
-			gpio@c100 {
-				reg = <0xc100 0x100>;
-				qcom,pin-num = <2>;
-				status = "disabled";
-			};
-
-			gpio@c200 {
-				reg = <0xc200 0x100>;
-				qcom,pin-num = <3>;
-				status = "disabled";
-			};
-
-			gpio@c300 {
-				reg = <0xc300 0x100>;
-				qcom,pin-num = <4>;
-				status = "disabled";
-			};
+			qcom,gpios-disallowed = <3 4>;
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/qcom/pm8998.dtsi b/arch/arm64/boot/dts/qcom/pm8998.dtsi
index ed4fdde..5290f46 100644
--- a/arch/arm64/boot/dts/qcom/pm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8998.dtsi
@@ -72,169 +72,41 @@
 			label = "pm8998_tz";
 		};
 
-		pm8998_gpios: gpios {
-			compatible = "qcom,qpnp-pin";
+		pm8998_gpios: pinctrl@c000 {
+			compatible = "qcom,spmi-gpio";
+			reg = <0xc000 0x1a00>;
+			interrupts = <0x0 0xc0 0 IRQ_TYPE_NONE>,
+					<0x0 0xc1 0 IRQ_TYPE_NONE>,
+					<0x0 0xc3 0 IRQ_TYPE_NONE>,
+					<0x0 0xc4 0 IRQ_TYPE_NONE>,
+					<0x0 0xc5 0 IRQ_TYPE_NONE>,
+					<0x0 0xc6 0 IRQ_TYPE_NONE>,
+					<0x0 0xc7 0 IRQ_TYPE_NONE>,
+					<0x0 0xc8 0 IRQ_TYPE_NONE>,
+					<0x0 0xc9 0 IRQ_TYPE_NONE>,
+					<0x0 0xca 0 IRQ_TYPE_NONE>,
+					<0x0 0xcb 0 IRQ_TYPE_NONE>,
+					<0x0 0xcc 0 IRQ_TYPE_NONE>,
+					<0x0 0xcd 0 IRQ_TYPE_NONE>,
+					<0x0 0xcf 0 IRQ_TYPE_NONE>,
+					<0x0 0xd0 0 IRQ_TYPE_NONE>,
+					<0x0 0xd1 0 IRQ_TYPE_NONE>,
+					<0x0 0xd2 0 IRQ_TYPE_NONE>,
+					<0x0 0xd4 0 IRQ_TYPE_NONE>,
+					<0x0 0xd6 0 IRQ_TYPE_NONE>;
+			interrupt-names = "pm8998_gpio1", "pm8998_gpio2",
+					"pm8998_gpio4", "pm8998_gpio5",
+					"pm8998_gpio6", "pm8998_gpio7",
+					"pm8998_gpio8", "pm8998_gpio9",
+					"pm8998_gpio10", "pm8998_gpio11",
+					"pm8998_gpio12", "pm8998_gpio13",
+					"pm8998_gpio14", "pm8998_gpio16",
+					"pm8998_gpio17", "pm8998_gpio18",
+					"pm8998_gpio19", "pm8998_gpio21",
+					"pm8998_gpio23";
 			gpio-controller;
 			#gpio-cells = <2>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			label = "pm8998-gpio";
-
-			gpio@c000 {
-				reg = <0xc000 0x100>;
-				qcom,pin-num = <1>;
-				status = "disabled";
-			};
-
-			gpio@c100 {
-				reg = <0xc100 0x100>;
-				qcom,pin-num = <2>;
-				status = "disabled";
-			};
-
-			gpio@c200 {
-				reg = <0xc200 0x100>;
-				qcom,pin-num = <3>;
-				status = "disabled";
-			};
-
-			gpio@c300 {
-				reg = <0xc300 0x100>;
-				qcom,pin-num = <4>;
-				status = "disabled";
-			};
-
-			gpio@c400 {
-				reg = <0xc400 0x100>;
-				qcom,pin-num = <5>;
-				status = "disabled";
-			};
-
-			gpio@c500 {
-				reg = <0xc500 0x100>;
-				qcom,pin-num = <6>;
-				status = "disabled";
-			};
-
-			gpio@c600 {
-				reg = <0xc600 0x100>;
-				qcom,pin-num = <7>;
-				status = "disabled";
-			};
-
-			gpio@c700 {
-				reg = <0xc700 0x100>;
-				qcom,pin-num = <8>;
-				status = "disabled";
-			};
-
-			gpio@c800 {
-				reg = <0xc800 0x100>;
-				qcom,pin-num = <9>;
-				status = "disabled";
-			};
-
-			gpio@c900 {
-				reg = <0xc900 0x100>;
-				qcom,pin-num = <10>;
-				status = "disabled";
-			};
-
-			gpio@ca00 {
-				reg = <0xca00 0x100>;
-				qcom,pin-num = <11>;
-				status = "disabled";
-			};
-
-			gpio@cb00 {
-				reg = <0xcb00 0x100>;
-				qcom,pin-num = <12>;
-				status = "disabled";
-			};
-
-			gpio@cc00 {
-				reg = <0xcc00 0x100>;
-				qcom,pin-num = <13>;
-				status = "disabled";
-			};
-
-			gpio@cd00 {
-				reg = <0xcd00 0x100>;
-				qcom,pin-num = <14>;
-				status = "disabled";
-			};
-
-			gpio@ce00 {
-				reg = <0xce00 0x100>;
-				qcom,pin-num = <15>;
-				status = "disabled";
-			};
-
-			gpio@cf00 {
-				reg = <0xcf00 0x100>;
-				qcom,pin-num = <16>;
-				status = "disabled";
-			};
-
-			gpio@d000 {
-				reg = <0xd000 0x100>;
-				qcom,pin-num = <17>;
-				status = "disabled";
-			};
-
-			gpio@d100 {
-				reg = <0xd100 0x100>;
-				qcom,pin-num = <18>;
-				status = "disabled";
-			};
-
-			gpio@d200 {
-				reg = <0xd200 0x100>;
-				qcom,pin-num = <19>;
-				status = "disabled";
-			};
-
-			gpio@d300 {
-				reg = <0xd300 0x100>;
-				qcom,pin-num = <20>;
-				status = "disabled";
-			};
-
-			gpio@d400 {
-				reg = <0xd400 0x100>;
-				qcom,pin-num = <21>;
-				status = "disabled";
-			};
-
-			gpio@d500 {
-				reg = <0xd500 0x100>;
-				qcom,pin-num = <22>;
-				status = "disabled";
-			};
-
-			gpio@d600 {
-				reg = <0xd600 0x100>;
-				qcom,pin-num = <23>;
-				status = "disabled";
-			};
-
-			gpio@d700 {
-				reg = <0xd700 0x100>;
-				qcom,pin-num = <24>;
-				status = "disabled";
-			};
-
-			gpio@d800 {
-				reg = <0xd800 0x100>;
-				qcom,pin-num = <25>;
-				status = "disabled";
-			};
-
-			gpio@d900 {
-				reg = <0xd900 0x100>;
-				qcom,pin-num = <26>;
-				status = "disabled";
-			};
+			qcom,gpios-disallowed = <3 15 20 22 24 25 26>;
 		};
 
 		pm8998_coincell: qcom,coincell@2800 {
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index 1659706..1f27b21 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -38,97 +38,29 @@
 			label = "pmi8998_tz";
 		};
 
-		pmi8998_gpios: gpios {
-			compatible = "qcom,qpnp-pin";
+		pmi8998_gpios: pinctrl@c000 {
+			compatible = "qcom,spmi-gpio";
+			reg = <0xc000 0xe00>;
+			interrupts = <0x2 0xc0 0 IRQ_TYPE_NONE>,
+					<0x2 0xc1 0 IRQ_TYPE_NONE>,
+					<0x2 0xc2 0 IRQ_TYPE_NONE>,
+					<0x2 0xc4 0 IRQ_TYPE_NONE>,
+					<0x2 0xc5 0 IRQ_TYPE_NONE>,
+					<0x2 0xc7 0 IRQ_TYPE_NONE>,
+					<0x2 0xc8 0 IRQ_TYPE_NONE>,
+					<0x2 0xc9 0 IRQ_TYPE_NONE>,
+					<0x2 0xca 0 IRQ_TYPE_NONE>,
+					<0x2 0xcb 0 IRQ_TYPE_NONE>,
+					<0x2 0xcd 0 IRQ_TYPE_NONE>;
+			interrupt-names = "pmi8998_gpio1", "pmi8998_gpio2",
+					"pmi8998_gpio3", "pmi8998_gpio5",
+					"pmi8998_gpio6", "pmi8998_gpio8",
+					"pmi8998_gpio9", "pmi8998_gpio10",
+					"pmi8998_gpio11", "pmi8998_gpio12",
+					"pmi8998_gpio14";
 			gpio-controller;
 			#gpio-cells = <2>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			label = "pmi8998-gpio";
-
-			gpio@c000 {
-				reg = <0xc000 0x100>;
-				qcom,pin-num = <1>;
-				status = "disabled";
-			};
-
-			gpio@c100 {
-				reg = <0xc100 0x100>;
-				qcom,pin-num = <2>;
-				status = "disabled";
-			};
-
-			gpio@c200 {
-				reg = <0xc200 0x100>;
-				qcom,pin-num = <3>;
-				status = "disabled";
-			};
-
-			gpio@c300 {
-				reg = <0xc300 0x100>;
-				qcom,pin-num = <4>;
-				status = "disabled";
-			};
-
-			gpio@c400 {
-				reg = <0xc400 0x100>;
-				qcom,pin-num = <5>;
-				status = "disabled";
-			};
-
-			gpio@c500 {
-				reg = <0xc500 0x100>;
-				qcom,pin-num = <6>;
-				status = "disabled";
-			};
-
-			gpio@c600 {
-				reg = <0xc600 0x100>;
-				qcom,pin-num = <7>;
-				status = "disabled";
-			};
-
-			gpio@c700 {
-				reg = <0xc700 0x100>;
-				qcom,pin-num = <8>;
-				status = "disabled";
-			};
-
-			gpio@c800 {
-				reg = <0xc800 0x100>;
-				qcom,pin-num = <9>;
-				status = "disabled";
-			};
-
-			gpio@c900 {
-				reg = <0xc900 0x100>;
-				qcom,pin-num = <10>;
-				status = "disabled";
-			};
-
-			gpio@ca00 {
-				reg = <0xca00 0x100>;
-				qcom,pin-num = <11>;
-				status = "disabled";
-			};
-
-			gpio@cb00 {
-				reg = <0xcb00 0x100>;
-				qcom,pin-num = <12>;
-				status = "disabled";
-			};
-
-			gpio@cc00 {
-				reg = <0xcc00 0x100>;
-				qcom,pin-num = <13>;
-				status = "disabled";
-			};
-
-			gpio@cd00 {
-				reg = <0xcd00 0x100>;
-				qcom,pin-num = <14>;
-				status = "disabled";
-			};
+			qcom,gpios-disallowed = <4 7 13>;
 		};
 
 		pmi8998_rradc: rradc@4500 {
@@ -372,7 +304,7 @@
 			qcom,en-ext-pfet-sc-pro;
 			qcom,pmic-revid = <&pmi8998_revid>;
 			qcom,loop-auto-gm-en;
-			status = "okay";
+			status = "disabled";
 		};
 
 		flash_led: qcom,leds@d300 {
diff --git a/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
index a8d559c..4b3fa93 100644
--- a/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
@@ -11,9 +11,9 @@
  */
 
 &soc {
-	tlmm: pinctrl@03800000 {
+	tlmm: pinctrl@03400000 {
 		compatible = "qcom,sdm830-pinctrl";
-		reg = <0x03800000 0xc00000>;
+		reg = <0x03400000 0xc00000>;
 		interrupts = <0 208 0>;
 		gpio-controller;
 		#gpio-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
new file mode 100644
index 0000000..d5646bf
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm845.dtsi"
+#include "sdm845-cdp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. sdm845 4K Display Panel CDP";
+	compatible = "qcom,sdm845-mtp", "qcom,sdm845", "qcom,mtp";
+	qcom,board-id = <1 1>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
new file mode 100644
index 0000000..d641276
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm845.dtsi"
+#include "sdm845-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. sdm845 4K Display Panel MTP";
+	compatible = "qcom,sdm845-mtp", "qcom,sdm845", "qcom,mtp";
+	qcom,board-id = <8 1>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
index 115c7b8..b66ca94 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
@@ -27,7 +27,8 @@
 		reg = <0x170f700c 0x4>,
 		      <0x170f7010 0x4>;
 		reg-names = "avtimer_lsb_addr", "avtimer_msb_addr";
-		qcom,clk-div = <27>;
+		qcom,clk-div = <192>;
+		qcom,clk-mult = <10>;
 	};
 
 	sound-tavil {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
index 2d2b264..a51f411 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
@@ -24,22 +24,38 @@
 			<0x1380000 0x40000>,
 			<0x1380000 0x40000>,
 			<0x1740000 0x40000>,
+			<0x1620000 0x40000>,
 			<0x1620000 0x40000>;
 
 		reg-names = "aggre1_noc-base", "aggre2_noc-base",
 			"config_noc-base", "dc_noc-base",
 			"gladiator_noc-base", "mc_virt-base", "mem_noc-base",
-			"mmss_noc-base", "system_noc-base";
+			"mmss_noc-base", "system_noc-base", "ipa_virt-base";
 
 		mbox-names = "apps_rsc", "disp_rsc";
 		mboxes = <&apps_rsc 0 &disp_rsc 0>;
 
+	/*RSCs*/
+		rsc_apps: rsc-apps {
+			cell-id = <MSM_BUS_RSC_APPS>;
+			label = "apps_rsc";
+			qcom,rsc-dev;
+			qcom,req_state = <2>;
+		};
+
+		rsc_disp: rsc-disp {
+			cell-id = <MSM_BUS_RSC_DISP>;
+			label = "disp_rsc";
+			qcom,rsc-dev;
+			qcom,req_state = <3>;
+		};
+
 	/*BCMs*/
 		bcm_acv: bcm-acv {
 			cell-id = <MSM_BUS_BCM_ACV>;
 			label = "ACV";
 			qcom,bcm-name = "ACV";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -47,7 +63,7 @@
 			cell-id = <MSM_BUS_BCM_ALC>;
 			label = "ALC";
 			qcom,bcm-name = "ALC";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -55,7 +71,7 @@
 			cell-id = <MSM_BUS_BCM_MC0>;
 			label = "MC0";
 			qcom,bcm-name = "MC0";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -63,7 +79,7 @@
 			cell-id = <MSM_BUS_BCM_SH0>;
 			label = "SH0";
 			qcom,bcm-name = "SH0";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -71,7 +87,7 @@
 			cell-id = <MSM_BUS_BCM_MM0>;
 			label = "MM0";
 			qcom,bcm-name = "MM0";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -79,7 +95,7 @@
 			cell-id = <MSM_BUS_BCM_SH1>;
 			label = "SH1";
 			qcom,bcm-name = "SH1";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -87,7 +103,7 @@
 			cell-id = <MSM_BUS_BCM_MM1>;
 			label = "MM1";
 			qcom,bcm-name = "MM1";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -95,7 +111,7 @@
 			cell-id = <MSM_BUS_BCM_SH2>;
 			label = "SH2";
 			qcom,bcm-name = "SH2";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -103,7 +119,7 @@
 			cell-id = <MSM_BUS_BCM_MM2>;
 			label = "MM2";
 			qcom,bcm-name = "MM2";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -111,7 +127,7 @@
 			cell-id = <MSM_BUS_BCM_SH3>;
 			label = "SH3";
 			qcom,bcm-name = "SH3";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -119,7 +135,7 @@
 			cell-id = <MSM_BUS_BCM_MM3>;
 			label = "MM3";
 			qcom,bcm-name = "MM3";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -127,7 +143,7 @@
 			cell-id = <MSM_BUS_BCM_SH4>;
 			label = "SH4";
 			qcom,bcm-name = "SH4";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -135,15 +151,7 @@
 			cell-id = <MSM_BUS_BCM_SH5>;
 			label = "SH5";
 			qcom,bcm-name = "SH5";
-			qcom,drv-id = <2>;
-			qcom,bcm-dev;
-		};
-
-		bcm_mm5: bcm-mm5 {
-			cell-id = <MSM_BUS_BCM_MM5>;
-			label = "MM5";
-			qcom,bcm-name = "MM5";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -151,7 +159,7 @@
 			cell-id = <MSM_BUS_BCM_SN0>;
 			label = "SN0";
 			qcom,bcm-name = "SN0";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -159,7 +167,7 @@
 			cell-id = <MSM_BUS_BCM_CE0>;
 			label = "CE0";
 			qcom,bcm-name = "CE0";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -167,7 +175,7 @@
 			cell-id = <MSM_BUS_BCM_IP0>;
 			label = "IP0";
 			qcom,bcm-name = "IP0";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -175,7 +183,14 @@
 			cell-id = <MSM_BUS_BCM_CN0>;
 			label = "CN0";
 			qcom,bcm-name = "CN0";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_qup0: bcm-qup0 {
+			cell-id = <MSM_BUS_BCM_QUP0>;
+			label = "QUP0";
+			qcom,bcm-name = "QUP0";
 			qcom,bcm-dev;
 		};
 
@@ -183,7 +198,7 @@
 			cell-id = <MSM_BUS_BCM_SN1>;
 			label = "SN1";
 			qcom,bcm-name = "SN1";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -191,7 +206,7 @@
 			cell-id = <MSM_BUS_BCM_SN2>;
 			label = "SN2";
 			qcom,bcm-name = "SN2";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -199,7 +214,7 @@
 			cell-id = <MSM_BUS_BCM_SN3>;
 			label = "SN3";
 			qcom,bcm-name = "SN3";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -207,7 +222,7 @@
 			cell-id = <MSM_BUS_BCM_SN4>;
 			label = "SN4";
 			qcom,bcm-name = "SN4";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -215,7 +230,7 @@
 			cell-id = <MSM_BUS_BCM_SN5>;
 			label = "SN5";
 			qcom,bcm-name = "SN5";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -223,7 +238,7 @@
 			cell-id = <MSM_BUS_BCM_SN6>;
 			label = "SN6";
 			qcom,bcm-name = "SN6";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -231,7 +246,7 @@
 			cell-id = <MSM_BUS_BCM_SN7>;
 			label = "SN7";
 			qcom,bcm-name = "SN7";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -239,7 +254,7 @@
 			cell-id = <MSM_BUS_BCM_SN8>;
 			label = "SN8";
 			qcom,bcm-name = "SN8";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -247,7 +262,7 @@
 			cell-id = <MSM_BUS_BCM_SN9>;
 			label = "SN9";
 			qcom,bcm-name = "SN9";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -255,7 +270,7 @@
 			cell-id = <MSM_BUS_BCM_SN11>;
 			label = "SN11";
 			qcom,bcm-name = "SN11";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -263,7 +278,7 @@
 			cell-id = <MSM_BUS_BCM_SN12>;
 			label = "SN12";
 			qcom,bcm-name = "SN12";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -271,7 +286,7 @@
 			cell-id = <MSM_BUS_BCM_SN14>;
 			label = "SN14";
 			qcom,bcm-name = "SN14";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -279,7 +294,7 @@
 			cell-id = <MSM_BUS_BCM_SN15>;
 			label = "SN15";
 			qcom,bcm-name = "SN15";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -287,7 +302,7 @@
 			cell-id = <MSM_BUS_BCM_MC0_DISPLAY>;
 			label = "MC0_DISPLAY";
 			qcom,bcm-name = "MC0";
-			qcom,drv-id = <0>;
+			qcom,rscs = <&rsc_disp>;
 			qcom,bcm-dev;
 		};
 
@@ -295,7 +310,7 @@
 			cell-id = <MSM_BUS_BCM_SH0_DISPLAY>;
 			label = "SH0_DISPLAY";
 			qcom,bcm-name = "SH0";
-			qcom,drv-id = <0>;
+			qcom,rscs = <&rsc_disp>;
 			qcom,bcm-dev;
 		};
 
@@ -303,7 +318,7 @@
 			cell-id = <MSM_BUS_BCM_MM0_DISPLAY>;
 			label = "MM0_DISPLAY";
 			qcom,bcm-name = "MM0";
-			qcom,drv-id = <0>;
+			qcom,rscs = <&rsc_disp>;
 			qcom,bcm-dev;
 		};
 
@@ -311,7 +326,7 @@
 			cell-id = <MSM_BUS_BCM_MM1_DISPLAY>;
 			label = "MM1_DISPLAY";
 			qcom,bcm-name = "MM1";
-			qcom,drv-id = <0>;
+			qcom,rscs = <&rsc_disp>;
 			qcom,bcm-dev;
 		};
 
@@ -319,7 +334,7 @@
 			cell-id = <MSM_BUS_BCM_MM2_DISPLAY>;
 			label = "MM2_DISPLAY";
 			qcom,bcm-name = "MM2";
-			qcom,drv-id = <0>;
+			qcom,rscs = <&rsc_disp>;
 			qcom,bcm-dev;
 		};
 
@@ -327,7 +342,7 @@
 			cell-id = <MSM_BUS_BCM_MM3_DISPLAY>;
 			label = "MM3_DISPLAY";
 			qcom,bcm-name = "MM3";
-			qcom,drv-id = <0>;
+			qcom,rscs = <&rsc_disp>;
 			qcom,bcm-dev;
 		};
 
@@ -382,6 +397,15 @@
 			clocks = <>;
 		};
 
+		fab_ipa_virt: fab-ipa_virt {
+			cell-id = <MSM_BUS_FAB_IPA_VIRT>;
+			label = "fab-ipa_virt";
+			qcom,fab-dev;
+			qcom,base-name = "ipa_virt-base";
+			qcom,bypass-qos-prg;
+			clocks = <>;
+		};
+
 		fab_mc_virt: fab-mc_virt {
 			cell-id = <MSM_BUS_FAB_MC_VIRT>;
 			label = "fab-mc_virt";
@@ -468,6 +492,7 @@
 			qcom,agg-ports = <1>;
 			qcom,connections = <&slv_qns_a1noc_snoc>;
 			qcom,bus-dev = <&fab_aggre1_noc>;
+			qcom,bcms = <&bcm_qup0>;
 		};
 
 		mas_qhm_tsif: mas-qhm-tsif {
@@ -544,6 +569,7 @@
 			qcom,agg-ports = <1>;
 			qcom,connections = <&slv_qns_a2noc_snoc>;
 			qcom,bus-dev = <&fab_aggre2_noc>;
+			qcom,bcms = <&bcm_qup0>;
 		};
 
 		mas_qnm_cnoc: mas-qnm-cnoc {
@@ -575,7 +601,6 @@
 			qcom,qport = <2>;
 			qcom,connections = <&slv_qns_a2noc_snoc>;
 			qcom,bus-dev = <&fab_aggre2_noc>;
-			qcom,bcms = <&bcm_ip0>;
 		};
 
 		mas_xm_pcie3_1: mas-xm-pcie3-1 {
@@ -761,6 +786,15 @@
 			qcom,bus-dev = <&fab_gladiator_noc>;
 		};
 
+		mas_ipa_core: mas-ipa-core {
+			cell-id = <MSM_BUS_MASTER_IPA_CORE>;
+			label = "mas-ipa-core";
+			qcom,buswidth = <1>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_ipa_core>;
+			qcom,bus-dev = <&fab_ipa_virt>;
+		};
+
 		mas_llcc_mc: mas-llcc-mc {
 			cell-id = <MSM_BUS_MASTER_LLCC>;
 			label = "mas-llcc-mc";
@@ -863,7 +897,6 @@
 			qcom,agg-ports = <1>;
 			qcom,connections = <&slv_srvc_mnoc>;
 			qcom,bus-dev = <&fab_mmss_noc>;
-			qcom,bcms = <&bcm_mm5>;
 		};
 
 		mas_qxm_camnoc_hf: mas-qxm-camnoc-hf {
@@ -1586,6 +1619,15 @@
 			qcom,bus-dev = <&fab_gladiator_noc>;
 		};
 
+		slv_ipa_core:slv-ipa-core {
+			cell-id = <MSM_BUS_SLAVE_IPA>;
+			label = "slv-ipa-core";
+			qcom,buswidth = <1>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_ipa_virt>;
+			qcom,bcms = <&bcm_ip0>;
+		};
+
 		slv_ebi:slv-ebi {
 			cell-id = <MSM_BUS_SLAVE_EBI_CH0>;
 			label = "slv-ebi";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index d47dd36..cd324e6 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -10,8 +10,123 @@
  * GNU General Public License for more details.
  */
 
+#include <dt-bindings/gpio/gpio.h>
+
 &soc {
 	sound-tavil {
 		qcom,us-euro-gpios = <&tavil_us_euro_sw>;
 	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+		label = "gpio-keys";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&key_home_default
+			     &key_vol_up_default
+			     &key_cam_snapshot_default
+			     &key_cam_focus_default>;
+
+		home {
+			label = "home";
+			gpios = <&pm8998_gpios 5 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <102>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+
+		vol_up {
+			label = "volume_up";
+			gpios = <&pm8998_gpios 6 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <115>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+
+		cam_snapshot {
+			label = "cam_snapshot";
+			gpios = <&pm8998_gpios 7 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <766>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+
+		cam_focus {
+			label = "cam_focus";
+			gpios = <&pm8998_gpios 8 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <528>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+	};
+};
+
+&ufsphy_mem {
+	compatible = "qcom,ufs-phy-qmp-v3";
+
+	vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
+	vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
+	vdda-phy-max-microamp = <62900>;
+	vdda-pll-max-microamp = <18300>;
+
+	status = "ok";
+};
+
+&ufshc_mem {
+	vdd-hba-supply = <&ufs_phy_gdsc>;
+	vdd-hba-fixed-regulator;
+	vcc-supply = <&pm8998_l20>;
+	vccq2-supply = <&pm8998_s4>;
+	vcc-max-microamp = <600000>;
+	vccq2-max-microamp = <600000>;
+
+	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
+	qcom,vddp-ref-clk-max-microamp = <100>;
+
+	status = "ok";
+};
+
+&ufsphy_card {
+	compatible = "qcom,ufs-phy-qmp-v3";
+
+	vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
+	vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
+	vdda-phy-max-microamp = <62900>;
+	vdda-pll-max-microamp = <18300>;
+
+	status = "ok";
+};
+
+&ufshc_card {
+	vdd-hba-supply = <&ufs_card_gdsc>;
+	vdd-hba-fixed-regulator;
+	vcc-supply = <&pm8998_l21>;
+	vccq2-supply = <&pm8998_s4>;
+	vcc-max-microamp = <300000>;
+	vccq2-max-microamp = <300000>;
+
+	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
+	qcom,vddp-ref-clk-max-microamp = <100>;
+
+	status = "ok";
+};
+
+&pmi8998_flash2 {
+	pinctrl-names = "led_enable", "led_disable";
+	pinctrl-0 = <&flash_led3_front_en>;
+	pinctrl-1 = <&flash_led3_front_dis>;
+};
+
+&pmi8998_torch2 {
+	pinctrl-names = "led_enable", "led_disable";
+	pinctrl-0 = <&flash_led3_front_en>;
+	pinctrl-1 = <&flash_led3_front_dis>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index e7ff343..a3adcec 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -476,6 +476,16 @@
 			};
 
 			port@2 {
+				reg = <2>;
+				funnel_in2_in_funnel_modem: endpoint {
+					slave-mode;
+					remote-endpoint =
+					  <&funnel_modem_out_funnel_in2>;
+				};
+
+			};
+
+			port@3 {
 				reg = <5>;
 				funnel_in2_in_funnel_apss_merg: endpoint {
 					slave-mode;
@@ -495,12 +505,17 @@
 		coresight-name = "coresight-tpda";
 
 		qcom,tpda-atid = <65>;
-		qcom,bc-elem-size = <13 32>;
-		qcom,tc-elem-size = <7 32>,
+		qcom,bc-elem-size = <10 32>,
 				    <13 32>;
-		qcom,dsb-elem-size = <13 32>;
-		qcom,cmb-elem-size = <7 32>,
-				     <8 32>,
+		qcom,tc-elem-size = <13 32>;
+		qcom,dsb-elem-size = <0 32>,
+				     <2 32>,
+				     <3 32>,
+				     <10 32>,
+				     <11 32>,
+				     <13 32>;
+		qcom,cmb-elem-size = <3 64>,
+				     <7 64>,
 				     <13 64>;
 
 		clocks = <&clock_gcc RPMH_QDSS_CLK>,
@@ -520,6 +535,33 @@
 			};
 
 			port@1 {
+				reg = <0>;
+				tpda_in_tpdm_center: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_center_out_tpda>;
+				};
+			};
+
+			port@2 {
+				reg = <2>;
+				tpda_in_funnel_dl_mm: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_dl_mm_out_tpda>;
+				};
+			};
+
+			port@3 {
+				reg = <3>;
+				tpda_in_funnel_ddr_0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_ddr_0_out_tpda>;
+				};
+			};
+
+			port@4 {
 				reg = <7>;
 				tpda_in_tpdm_vsense: endpoint {
 					slave-mode;
@@ -528,16 +570,25 @@
 				};
 			};
 
-			port@2 {
-				reg = <8>;
-				tpda_in_tpdm_dcc: endpoint {
+			port@5 {
+				reg = <10>;
+				tpda_in_tpdm_qm: endpoint {
 					slave-mode;
 					remote-endpoint =
-						<&tpdm_dcc_out_tpda>;
+						<&tpdm_qm_out_tpda>;
 				};
 			};
 
-			port@3 {
+			port@6 {
+				reg = <11>;
+				tpda_in_tpdm_north: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_north_out_tpda>;
+				};
+			};
+
+			port@7 {
 				reg = <13>;
 				tpda_in_tpdm_pimem: endpoint {
 					slave-mode;
@@ -548,6 +599,423 @@
 		};
 	};
 
+	funnel_modem: funnel@6832000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6832000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-modem";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_modem_out_funnel_in2: endpoint {
+					remote-endpoint =
+					    <&funnel_in2_in_funnel_modem>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				funnel_modem_in_tpda_modem: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpda_modem_out_funnel_modem>;
+				};
+			};
+		};
+	};
+
+	tpda_modem: tpda@6831000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x6831000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-modem";
+
+		qcom,tpda-atid = <67>;
+		qcom,dsb-elem-size = <0 32>;
+		qcom,cmb-elem-size = <0 64>;
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_modem_out_funnel_modem: endpoint {
+					remote-endpoint =
+						<&funnel_modem_in_tpda_modem>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				tpda_modem_in_tpdm_modem: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_modem_out_tpda_modem>;
+				};
+			};
+		};
+	};
+
+	tpdm_modem: tpdm@6830000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x6830000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-modem";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_modem_out_tpda_modem: endpoint {
+				remote-endpoint = <&tpda_modem_in_tpdm_modem>;
+			};
+		};
+	};
+
+	tpdm_center: tpdm@6c28000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x6c28000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-center";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_center_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_center>;
+			};
+		};
+	};
+
+	tpdm_north: tpdm@6a24000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x6a24000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-north";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_north_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_north>;
+			};
+		};
+	};
+
+	tpdm_qm: tpdm@69d0000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x69d0000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-qm";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_qm_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_qm>;
+			};
+		};
+	};
+
+	tpda_apss: tpda@7862000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x7862000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-apss";
+
+		qcom,tpda-atid = <66>;
+		qcom,dsb-elem-size = <0 32>;
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_apss_out_funnel_apss_merg: endpoint {
+					remote-endpoint =
+					       <&funnel_apss_merg_in_tpda_apss>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				tpda_apss_in_tpdm_apss: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_apss_out_tpda_apss>;
+				};
+			};
+		};
+	};
+
+	tpdm_apss: tpdm@7860000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x7860000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-apss";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_apss_out_tpda_apss: endpoint {
+				remote-endpoint = <&tpda_apss_in_tpdm_apss>;
+			};
+		};
+	};
+
+	tpda_llm_silver: tpda@78c0000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x78c0000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-llm-silver";
+
+		qcom,tpda-atid = <72>;
+		qcom,cmb-elem-size = <0 64>;
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_llm_silver_out_funnel_apss_merg: endpoint {
+					remote-endpoint =
+					<&funnel_apss_merg_in_tpda_llm_silver>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				tpda_llm_silver_in_tpdm_llm_silver: endpoint {
+					slave-mode;
+					remote-endpoint =
+					<&tpdm_llm_silver_out_tpda_llm_silver>;
+				};
+			};
+		};
+	};
+
+	tpdm_llm_silver: tpdm@78a0000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x78a0000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-llm-silver";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_llm_silver_out_tpda_llm_silver: endpoint {
+				remote-endpoint =
+					<&tpda_llm_silver_in_tpdm_llm_silver>;
+			};
+		};
+	};
+
+	tpda_llm_gold: tpda@78d0000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x78d0000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-llm-gold";
+
+		qcom,tpda-atid = <73>;
+		qcom,cmb-elem-size = <0 64>;
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_llm_gold_out_funnel_apss_merg: endpoint {
+					remote-endpoint =
+					  <&funnel_apss_merg_in_tpda_llm_gold>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				tpda_llm_gold_in_tpdm_llm_gold: endpoint {
+					slave-mode;
+					remote-endpoint =
+					  <&tpdm_llm_gold_out_tpda_llm_gold>;
+				};
+			};
+		};
+	};
+
+	tpdm_llm_gold: tpdm@78b0000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x78b0000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-llm-gold";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_llm_gold_out_tpda_llm_gold: endpoint {
+				remote-endpoint =
+					<&tpda_llm_gold_in_tpdm_llm_gold>;
+			};
+		};
+	};
+
+	funnel_dl_mm: funnel@6c0b000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6c0b000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-dl-mm";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_dl_mm_out_tpda: endpoint {
+					remote-endpoint =
+					    <&tpda_in_funnel_dl_mm>;
+				};
+			};
+
+			port@1 {
+				reg = <1>;
+				funnel_dl_mm_in_tpdm_mm: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&tpdm_mm_out_funnel_dl_mm>;
+				};
+			};
+		};
+	};
+
+	tpdm_mm: tpdm@6c08000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x6c08000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-mm";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_mm_out_funnel_dl_mm: endpoint {
+				remote-endpoint = <&funnel_dl_mm_in_tpdm_mm>;
+			};
+		};
+	};
+
+	funnel_ddr_0: funnel@69e2000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x69e2000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-ddr-0";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_ddr_0_out_tpda: endpoint {
+					remote-endpoint =
+					    <&tpda_in_funnel_ddr_0>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				funnel_ddr_0_in_tpdm_ddr: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&tpdm_ddr_out_funnel_ddr_0>;
+				};
+			};
+		};
+	};
+
+	tpdm_ddr: tpdm@69e0000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x69e0000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-ddr";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_ddr_out_funnel_ddr_0: endpoint {
+				remote-endpoint = <&funnel_ddr_0_in_tpdm_ddr>;
+			};
+		};
+	};
+
 	tpdm_pimem: tpdm@6850000 {
 		compatible = "qcom,coresight-tpdm";
 		reg = <0x6850000 0x1000>;
@@ -566,25 +1034,6 @@
 		};
 	};
 
-
-	tpdm_dcc: tpdm@6870000 {
-		compatible = "qcom,coresight-tpdm";
-		reg = <0x6870000 0x1000>;
-		reg-names = "tpdm-base";
-
-		coresight-name = "coresight-tpdm-dcc";
-
-		clocks = <&clock_gcc RPMH_QDSS_CLK>,
-			 <&clock_gcc RPMH_QDSS_A_CLK>;
-		clock-names = "core_clk", "core_a_clk";
-
-		port {
-			tpdm_dcc_out_tpda: endpoint {
-				remote-endpoint = <&tpda_in_tpdm_dcc>;
-			};
-		};
-	};
-
 	tpdm_vsense: tpdm@6840000 {
 		compatible = "qcom,coresight-tpdm";
 		reg = <0x6840000 0x1000>;
@@ -1129,13 +1578,40 @@
 			};
 
 			port@2 {
-				reg = <1>;
+				reg = <2>;
 				funnel_apss_merg_in_tpda_olc: endpoint {
 					slave-mode;
 					remote-endpoint =
 					    <&tpda_olc_out_funnel_apss_merg>;
 				};
 			};
+
+			port@3 {
+				reg = <4>;
+				funnel_apss_merg_in_tpda_apss: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&tpda_apss_out_funnel_apss_merg>;
+				};
+			};
+
+			port@4 {
+				reg = <5>;
+				funnel_apss_merg_in_tpda_llm_silver: endpoint {
+					slave-mode;
+					remote-endpoint =
+					<&tpda_llm_silver_out_funnel_apss_merg>;
+				};
+			};
+
+			port@5 {
+				reg = <6>;
+				funnel_apss_merg_in_tpda_llm_gold: endpoint {
+					slave-mode;
+					remote-endpoint =
+					  <&tpda_llm_gold_out_funnel_apss_merg>;
+				};
+			};
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
new file mode 100644
index 0000000..a6efb50
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -0,0 +1,287 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+
+	msm_bus: qcom,kgsl-busmon{
+		label = "kgsl-busmon";
+		compatible = "qcom,kgsl-busmon";
+	};
+
+	gpubw: qcom,gpubw {
+		compatible = "qcom,devbw";
+		governor = "bw_vbif";
+		qcom,src-dst-ports = <26 512>;
+		/*
+		 * active-only flag is used while registering the bus
+		 * governor.It helps release the bus vote when the CPU
+		 * subsystem is inactiv3
+		 */
+		qcom,active-only;
+		qcom,bw-tbl =
+			<     0 /*  off     */ >,
+			<   762 /*  100 MHz */ >,
+			<  1144 /*  150 MHz */ >,
+			<  1525 /*  200 MHz */ >,
+			<  2288 /*  300 MHz */ >,
+			<  3143 /*  412 MHz */ >,
+			<  4173 /*  547 MHz */ >,
+			<  5195 /*  681 MHz */ >,
+			<  5859 /*  768 MHz */ >,
+			<  7759 /*  1017 MHz */ >,
+			<  9887 /*  1296 MHz */ >,
+			<  11863 /*  1555 MHz */ >,
+			<  13763 /*  1804 MHz */ >;
+	};
+
+	msm_gpu: qcom,kgsl-3d0@5000000 {
+		label = "kgsl-3d0";
+		compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
+		status = "ok";
+		reg = <0x5000000 0x40000>;
+		reg-names = "kgsl_3d0_reg_memory";
+		interrupts = <0 300 0>;
+		interrupt-names = "kgsl_3d0_irq";
+		qcom,id = <0>;
+
+		qcom,chipid = <0x06030000>;
+
+		qcom,initial-pwrlevel = <2>;
+
+		qcom,gpu-quirk-hfi-use-reg;
+		qcom,gpu-quirk-two-pass-use-wfi;
+
+		qcom,idle-timeout = <100000000>; //msecs
+		qcom,no-nap;
+
+		qcom,highest-bank-bit = <15>;
+
+		qcom,min-access-length = <32>;
+
+		qcom,ubwc-mode = <2>;
+
+		qcom,snapshot-size = <1048576>; //bytes
+
+		qcom,gpu-qdss-stm = <0x161c0000 0x40000>; // base addr, size
+
+		qcom,tsens-name = "tsens_tz_sensor12";
+
+		clocks = <&clock_gfx GPU_CC_GX_GFX3D_CLK>,
+			<&clock_gcc GCC_GPU_CFG_AHB_CLK>,
+			<&clock_gpucc GPU_CC_CXO_CLK>,
+			<&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
+			<&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>;
+
+		clock-names = "core_clk", "iface_clk", "rbbmtimer_clk",
+			"mem_clk", "mem_iface_clk";
+
+		qcom,isense-clk-on-level = <1>;
+
+		/* Bus Scale Settings */
+		qcom,gpubw-dev = <&gpubw>;
+		qcom,bus-control;
+		qcom,msm-bus,name = "grp3d";
+		qcom,msm-bus,num-cases = <13>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<26 512 0 0>,
+
+				<26 512 0 800000>,      // 1 bus=100
+				<26 512 0 1200000>,     // 2 bus=150
+				<26 512 0 1600000>,     // 3 bus=200
+				<26 512 0 2400000>,     // 4 bus=300
+				<26 512 0 3296000>,     // 5 bus=412
+				<26 512 0 4376000>,     // 6 bus=547
+				<26 512 0 5448000>,     // 7 bus=681
+				<26 512 0 6144000>,     // 8 bus=768
+				<26 512 0 8136000>,     // 9 bus=1017
+				<26 512 0 10368000>,    // 10 bus=1296
+				<26 512 0 12440000>,    // 11 bus=1555
+				<26 512 0 14432000>;    // 12 bus=1804
+
+		/* GDSC regulator names */
+		regulator-names = "vddcx", "vdd";
+		/* GDSC oxili regulators */
+		vddcx-supply = <&gpu_cx_gdsc>;
+		vdd-supply = <&gpu_gx_gdsc>;
+
+		/* GPU related llc slices */
+		cache-slice-names = "gpu", "gpuhtw";
+		cache-slices = <&llcc 12>, <&llcc 11>;
+
+		/* GPU Mempools */
+		qcom,gpu-mempools {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "qcom,gpu-mempools";
+
+			/* 4K Page Pool configuration */
+			qcom,gpu-mempool@0 {
+				reg = <0>;
+				qcom,mempool-page-size = <4096>;
+				qcom,mempool-reserved = <2048>;
+				qcom,mempool-allocate;
+			};
+			/* 8K Page Pool configuration */
+			qcom,gpu-mempool@1 {
+				reg = <1>;
+				qcom,mempool-page-size = <8192>;
+				qcom,mempool-reserved = <1024>;
+				qcom,mempool-allocate;
+			};
+			/* 64K Page Pool configuration */
+			qcom,gpu-mempool@2 {
+				reg = <2>;
+				qcom,mempool-page-size = <65536>;
+				qcom,mempool-reserved = <256>;
+			};
+			/* 1M Page Pool configuration */
+			qcom,gpu-mempool@3 {
+				reg = <3>;
+				qcom,mempool-page-size = <1048576>;
+				qcom,mempool-reserved = <32>;
+			};
+		};
+
+		/* Power levels */
+		qcom,gpu-pwrlevels {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			compatible = "qcom,gpu-pwrlevels";
+
+			qcom,gpu-pwrlevel@0 {
+				reg = <0>;
+				qcom,gpu-freq = <548000000>;
+				qcom,bus-freq = <12>;
+				qcom,bus-min = <11>;
+				qcom,bus-max = <12>;
+			};
+
+
+			qcom,gpu-pwrlevel@1 {
+				reg = <1>;
+				qcom,gpu-freq = <425000000>;
+				qcom,bus-freq = <7>;
+				qcom,bus-min = <6>;
+				qcom,bus-max = <8>;
+			};
+
+			qcom,gpu-pwrlevel@2 {
+				reg = <2>;
+				qcom,gpu-freq = <280000000>;
+				qcom,bus-freq = <4>;
+				qcom,bus-min = <3>;
+				qcom,bus-max = <5>;
+			};
+
+			qcom,gpu-pwrlevel@3 {
+				reg = <3>;
+				qcom,gpu-freq = <27000000>;
+				qcom,bus-freq = <0>;
+				qcom,bus-min = <0>;
+				qcom,bus-max = <0>;
+			};
+		};
+
+	};
+
+	kgsl_msm_iommu: qcom,kgsl-iommu {
+		compatible = "qcom,kgsl-smmu-v2";
+
+		reg = <0x05040000 0x10000>;
+		qcom,protect = <0x40000 0x10000>;
+		qcom,micro-mmu-control = <0x6000>;
+
+		clocks =<&clock_gcc GCC_GPU_CFG_AHB_CLK>,
+			<&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
+			<&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>;
+
+		clock-names = "iface_clk", "mem_clk", "mem_iface_clk";
+
+		qcom,secure_align_mask = <0xfff>;
+		qcom,global_pt;
+
+		gfx3d_user: gfx3d_user {
+			compatible = "qcom,smmu-kgsl-cb";
+			label = "gfx3d_user";
+			iommus = <&kgsl_smmu 0>;
+			qcom,gpu-offset = <0x48000>;
+		};
+
+		gfx3d_secure: gfx3d_secure {
+			compatible = "qcom,smmu-kgsl-cb";
+			iommus = <&kgsl_smmu 2>;
+		};
+	};
+
+	gmu: qcom,gmu {
+		label = "kgsl-gmu";
+		compatible = "qcom,gpu-gmu";
+
+		reg = <0x506a000 0x26000>, <0xb200000 0x300000>;
+		reg-names = "kgsl_gmu_reg", "kgsl_gmu_pdc_reg";
+
+		interrupts = <0 304 0>, <0 305 0>;
+		interrupt-names = "kgsl_hfi_irq", "kgsl_gmu_irq";
+
+		qcom,msm-bus,name = "cnoc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<26 10036 0 0>,		// CNOC off
+			<26 10036 0 100>;	// CNOC on
+
+		regulator-names = "vddcx", "vdd";
+		vddcx-supply = <&gpu_cx_gdsc>;
+		vdd-supply = <&gpu_gx_gdsc>;
+
+
+		clocks = <&clock_gpucc GPU_CC_CX_GMU_CLK>,
+				<&clock_gcc GCC_GPU_CFG_AHB_CLK>,
+				<&clock_gpucc GPU_CC_CXO_CLK>,
+				<&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
+				<&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>;
+
+		clock-names = "gmu_clk", "ahb_clk", "cxo_clk",
+				"axi_clk", "memnoc_clk";
+
+		qcom,gmu-pwrlevels {
+			compatible = "qcom,gmu-pwrlevels";
+
+			qcom,gmu-pwrlevel@0 {
+				reg = <0>;
+				qcom,gmu-freq = <400000000>;
+			};
+
+			qcom,gmu-pwrlevel@1 {
+				reg = <1>;
+				qcom,gmu-freq = <19200000>;
+			};
+
+			qcom,gmu-pwrlevel@2 {
+				reg = <2>;
+				qcom,gmu-freq = <0>;
+			};
+		};
+
+		gmu_user: gmu_user {
+			compatible = "qcom,smmu-gmu-user-cb";
+			iommus = <&kgsl_smmu 4>;
+		};
+
+		gmu_kernel: gmu_kernel {
+			compatible = "qcom,smmu-gmu-kernel-cb";
+			iommus = <&kgsl_smmu 5>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index cfba6f4..6d61506 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -9,3 +9,109 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
+
+#include <dt-bindings/gpio/gpio.h>
+
+&soc {
+	gpio_keys {
+		compatible = "gpio-keys";
+		label = "gpio-keys";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&key_vol_up_default
+			     &key_cam_snapshot_default
+			     &key_cam_focus_default>;
+
+		vol_up {
+			label = "volume_up";
+			gpios = <&pm8998_gpios 6 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <115>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+
+		cam_snapshot {
+			label = "cam_snapshot";
+			gpios = <&pm8998_gpios 7 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <766>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+
+		cam_focus {
+			label = "cam_focus";
+			gpios = <&pm8998_gpios 8 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <528>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+	};
+};
+
+&ufsphy_mem {
+	compatible = "qcom,ufs-phy-qmp-v3";
+
+	vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
+	vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
+	vdda-phy-max-microamp = <62900>;
+	vdda-pll-max-microamp = <18300>;
+
+	status = "ok";
+};
+
+&ufshc_mem {
+	vdd-hba-supply = <&ufs_phy_gdsc>;
+	vdd-hba-fixed-regulator;
+	vcc-supply = <&pm8998_l20>;
+	vccq2-supply = <&pm8998_s4>;
+	vcc-max-microamp = <600000>;
+	vccq2-max-microamp = <600000>;
+
+	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
+	qcom,vddp-ref-clk-max-microamp = <100>;
+
+	status = "ok";
+};
+
+&ufsphy_card {
+	compatible = "qcom,ufs-phy-qmp-v3";
+
+	vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
+	vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
+	vdda-phy-max-microamp = <62900>;
+	vdda-pll-max-microamp = <18300>;
+
+	status = "ok";
+};
+
+&ufshc_card {
+	vdd-hba-supply = <&ufs_card_gdsc>;
+	vdd-hba-fixed-regulator;
+	vcc-supply = <&pm8998_l21>;
+	vccq2-supply = <&pm8998_s4>;
+	vcc-max-microamp = <300000>;
+	vccq2-max-microamp = <300000>;
+
+	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
+	qcom,vddp-ref-clk-max-microamp = <100>;
+
+	status = "ok";
+};
+
+&pmi8998_flash2 {
+	pinctrl-names = "led_enable", "led_disable";
+	pinctrl-0 = <&flash_led3_front_en>;
+	pinctrl-1 = <&flash_led3_front_dis>;
+};
+
+&pmi8998_torch2 {
+	pinctrl-names = "led_enable", "led_disable";
+	pinctrl-0 = <&flash_led3_front_en>;
+	pinctrl-1 = <&flash_led3_front_dis>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index 6d6f775..f300684 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -11,15 +11,79 @@
  */
 
 &soc {
-	tlmm: pinctrl@03800000 {
+	tlmm: pinctrl@03400000 {
 		compatible = "qcom,sdm845-pinctrl";
-		reg = <0x03800000 0xc00000>;
+		reg = <0x03400000 0xc00000>;
 		interrupts = <0 208 0>;
 		gpio-controller;
 		#gpio-cells = <2>;
 		interrupt-controller;
 		#interrupt-cells = <2>;
 
+		ufs_dev_reset_assert: ufs_dev_reset_assert {
+			config {
+				pins = "ufs_reset";
+				bias-pull-down;		/* default: pull down */
+				/*
+				 * UFS_RESET driver strengths are having
+				 * different values/steps compared to typical
+				 * GPIO drive strengths.
+				 *
+				 * Following table clarifies:
+				 *
+				 * HDRV value | UFS_RESET | Typical GPIO
+				 *   (dec)    |   (mA)    |    (mA)
+				 *     0      |   0.8     |    2
+				 *     1      |   1.55    |    4
+				 *     2      |   2.35    |    6
+				 *     3      |   3.1     |    8
+				 *     4      |   3.9     |    10
+				 *     5      |   4.65    |    12
+				 *     6      |   5.4     |    14
+				 *     7      |   6.15    |    16
+				 *
+				 * POR value for UFS_RESET HDRV is 3 which means
+				 * 3.1mA and we want to use that. Hence just
+				 * specify 8mA to "drive-strength" binding and
+				 * that should result into writing 3 to HDRV
+				 * field.
+				 */
+				drive-strength = <8>;	/* default: 3.1 mA */
+				output-low; /* active low reset */
+			};
+		};
+
+		ufs_dev_reset_deassert: ufs_dev_reset_deassert {
+			config {
+				pins = "ufs_reset";
+				bias-pull-down;		/* default: pull down */
+				/*
+				 * default: 3.1 mA
+				 * check comments under ufs_dev_reset_assert
+				 */
+				drive-strength = <8>;
+				output-high; /* active low reset */
+			};
+		};
+
+		flash_led3_front {
+			flash_led3_front_en: flash_led3_front_en {
+				mux {
+					pins = "gpio21";
+					drive_strength = <2>;
+					output-high;
+				};
+			};
+
+			flash_led3_front_dis: flash_led3_front_dis {
+				mux {
+					pins = "gpio21";
+					drive_strength = <2>;
+					output-low;
+				};
+			};
+		};
+
 		wcd9xxx_intr {
 			wcd_intr_default: wcd_intr_default{
 				mux {
@@ -238,6 +302,61 @@
 			};
 		};
 
+		pmx_sde: pmx_sde {
+			sde_dsi_active: sde_dsi_active {
+				mux {
+					pins = "gpio6", "gpio52";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio6", "gpio52";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable = <0>;   /* no pull */
+				};
+			};
+			sde_dsi_suspend: sde_dsi_suspend {
+				mux {
+					pins = "gpio6", "gpio52";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio6", "gpio52";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+				};
+			};
+		};
+
+		pmx_sde_te {
+			sde_te_active: sde_te_active {
+				mux {
+					pins = "gpio10";
+					function = "mdp_vsync";
+				};
+
+				config {
+					pins = "gpio10";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+				};
+			};
+
+			sde_te_suspend: sde_te_suspend {
+				mux {
+					pins = "gpio10";
+					function = "mdp_vsync";
+				};
+
+				config {
+					pins = "gpio10";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+				};
+			};
+		};
+
 		sec_aux_pcm {
 			sec_aux_pcm_sleep: sec_aux_pcm_sleep {
 				mux {
@@ -1056,5 +1175,1145 @@
 				};
 			};
 		};
+
+		/* QUPv3 South SE mappings */
+		/* SE 0 pin mappings */
+		qupv3_se0_i2c_pins: qupv3_se0_i2c_pins {
+			qupv3_se0_i2c_active: qupv3_se0_i2c_active {
+				mux {
+					pins = "gpio0", "gpio1";
+					function = "qup0";
+				};
+
+				config {
+					pins = "gpio0", "gpio1";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se0_i2c_sleep: qupv3_se0_i2c_sleep {
+				mux {
+					pins = "gpio0", "gpio1";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio0", "gpio1";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se0_spi_pins: qupv3_se0_spi_pins {
+			qupv3_se0_spi_active: qupv3_se0_spi_active {
+				mux {
+					pins = "gpio0", "gpio1", "gpio2",
+								"gpio3";
+					function = "qup0";
+				};
+
+				config {
+					pins = "gpio0", "gpio1", "gpio2",
+								"gpio3";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se0_spi_sleep: qupv3_se0_spi_sleep {
+				mux {
+					pins = "gpio0", "gpio1", "gpio2",
+								"gpio3";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio0", "gpio1", "gpio2",
+								"gpio3";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 1 pin mappings */
+		qupv3_se1_i2c_pins: qupv3_se1_i2c_pins {
+			qupv3_se1_i2c_active: qupv3_se1_i2c_active {
+				mux {
+					pins = "gpio17", "gpio18";
+					function = "qup1";
+				};
+
+				config {
+					pins = "gpio17", "gpio18";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se1_i2c_sleep: qupv3_se1_i2c_sleep {
+				mux {
+					pins = "gpio17", "gpio18";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio17", "gpio18";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se1_spi_pins: qupv3_se1_spi_pins {
+			qupv3_se1_spi_active: qupv3_se1_spi_active {
+				mux {
+					pins = "gpio17", "gpio18", "gpio19",
+								"gpio20";
+					function = "qup1";
+				};
+
+				config {
+					pins = "gpio17", "gpio18", "gpio19",
+								"gpio20";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se1_spi_sleep: qupv3_se1_spi_sleep {
+				mux {
+					pins = "gpio17", "gpio18", "gpio19",
+								"gpio20";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio17", "gpio18", "gpio19",
+								"gpio20";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 2 pin mappings */
+		qupv3_se2_i2c_pins: qupv3_se2_i2c_pins {
+			qupv3_se2_i2c_active: qupv3_se2_i2c_active {
+				mux {
+					pins = "gpio27", "gpio28";
+					function = "qup2";
+				};
+
+				config {
+					pins = "gpio27", "gpio28";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se2_i2c_sleep: qupv3_se2_i2c_sleep {
+				mux {
+					pins = "gpio27", "gpio28";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio27", "gpio28";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se2_spi_pins: qupv3_se2_spi_pins {
+			qupv3_se2_spi_active: qupv3_se2_spi_active {
+				mux {
+					pins = "gpio27", "gpio28", "gpio29",
+								"gpio30";
+					function = "qup2";
+				};
+
+				config {
+					pins = "gpio27", "gpio28", "gpio29",
+								"gpio30";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se2_spi_sleep: qupv3_se2_spi_sleep {
+				mux {
+					pins = "gpio27", "gpio28", "gpio29",
+								"gpio30";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio27", "gpio28", "gpio29",
+								"gpio30";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 3 pin mappings */
+		qupv3_se3_i2c_pins: qupv3_se3_i2c_pins {
+			qupv3_se3_i2c_active: qupv3_se3_i2c_active {
+				mux {
+					pins = "gpio41", "gpio42";
+					function = "qup3";
+				};
+
+				config {
+					pins = "gpio41", "gpio42";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se3_i2c_sleep: qupv3_se3_i2c_sleep {
+				mux {
+					pins = "gpio41", "gpio42";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio41", "gpio42";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se3_spi_pins: qupv3_se3_spi_pins {
+			qupv3_se3_spi_active: qupv3_se3_spi_active {
+				mux {
+					pins = "gpio41", "gpio42", "gpio43",
+								"gpio44";
+					function = "qup3";
+				};
+
+				config {
+					pins = "gpio41", "gpio42", "gpio43",
+								"gpio44";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se3_spi_sleep: qupv3_se3_spi_sleep {
+				mux {
+					pins = "gpio41", "gpio42", "gpio43",
+								"gpio44";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio41", "gpio42", "gpio43",
+								"gpio44";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 4 pin mappings */
+		qupv3_se4_i2c_pins: qupv3_se4_i2c_pins {
+			qupv3_se4_i2c_active: qupv3_se4_i2c_active {
+				mux {
+					pins = "gpio89", "gpio90";
+					function = "qup4";
+				};
+
+				config {
+					pins = "gpio89", "gpio90";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se4_i2c_sleep: qupv3_se4_i2c_sleep {
+				mux {
+					pins = "gpio89", "gpio90";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio89", "gpio90";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se4_spi_pins: qupv3_se4_spi_pins {
+			qupv3_se4_spi_active: qupv3_se4_spi_active {
+				mux {
+					pins = "gpio89", "gpio90", "gpio91",
+								"gpio92";
+					function = "qup4";
+				};
+
+				config {
+					pins = "gpio89", "gpio90", "gpio91",
+								"gpio92";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se4_spi_sleep: qupv3_se4_spi_sleep {
+				mux {
+					pins = "gpio89", "gpio90", "gpio91",
+								"gpio92";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio89", "gpio90", "gpio91",
+								"gpio92";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 5 pin mappings */
+		qupv3_se5_i2c_pins: qupv3_se5_i2c_pins {
+			qupv3_se5_i2c_active: qupv3_se5_i2c_active {
+				mux {
+					pins = "gpio85", "gpio86";
+					function = "qup5";
+				};
+
+				config {
+					pins = "gpio85", "gpio86";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se5_i2c_sleep: qupv3_se5_i2c_sleep {
+				mux {
+					pins = "gpio85", "gpio86";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio85", "gpio86";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se5_spi_pins: qupv3_se5_spi_pins {
+			qupv3_se5_spi_active: qupv3_se5_spi_active {
+				mux {
+					pins = "gpio85", "gpio86", "gpio87",
+								"gpio88";
+					function = "qup5";
+				};
+
+				config {
+					pins = "gpio85", "gpio86", "gpio87",
+								"gpio88";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se5_spi_sleep: qupv3_se5_spi_sleep {
+				mux {
+					pins = "gpio85", "gpio86", "gpio87",
+								"gpio88";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio85", "gpio86", "gpio87",
+								"gpio88";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 6 pin mappings */
+		qupv3_se6_i2c_pins: qupv3_se6_i2c_pins {
+			qupv3_se6_i2c_active: qupv3_se6_i2c_active {
+				mux {
+					pins = "gpio45", "gpio46";
+					function = "qup6";
+				};
+
+				config {
+					pins = "gpio45", "gpio46";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se6_i2c_sleep: qupv3_se6_i2c_sleep {
+				mux {
+					pins = "gpio45", "gpio46";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio45", "gpio46";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se6_4uart_pins: qupv3_se6_4uart_pins {
+			qupv3_se6_4uart_active: qupv3_se6_4uart_active {
+				mux {
+					pins = "gpio45", "gpio46", "gpio47",
+								"gpio48";
+					function = "qup6";
+				};
+
+				config {
+					pins = "gpio45", "gpio46", "gpio47",
+								"gpio48";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se6_4uart_sleep: qupv3_se6_4uart_sleep {
+				mux {
+					pins = "gpio45", "gpio46", "gpio47",
+								"gpio48";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio45", "gpio46", "gpio47",
+								"gpio48";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se6_spi_pins: qupv3_se6_spi_pins {
+			qupv3_se6_spi_active: qupv3_se6_spi_active {
+				mux {
+					pins = "gpio45", "gpio46", "gpio47",
+								"gpio48";
+					function = "qup6";
+				};
+
+				config {
+					pins = "gpio45", "gpio46", "gpio47",
+								"gpio48";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se6_spi_sleep: qupv3_se6_spi_sleep {
+				mux {
+					pins = "gpio45", "gpio46", "gpio47",
+								"gpio48";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio45", "gpio46", "gpio47",
+								"gpio48";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 7 pin mappings */
+		qupv3_se7_i2c_pins: qupv3_se7_i2c_pins {
+			qupv3_se7_i2c_active: qupv3_se7_i2c_active {
+				mux {
+					pins = "gpio93", "gpio94";
+					function = "qup7";
+				};
+
+				config {
+					pins = "gpio93", "gpio94";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se7_i2c_sleep: qupv3_se7_i2c_sleep {
+				mux {
+					pins = "gpio93", "gpio94";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio93", "gpio94";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se7_4uart_pins: qupv3_se7_4uart_pins {
+			qupv3_se7_4uart_active: qupv3_se7_4uart_active {
+				mux {
+					pins = "gpio93", "gpio94", "gpio95",
+								"gpio96";
+					function = "qup7";
+				};
+
+				config {
+					pins = "gpio93", "gpio94", "gpio95",
+								"gpio96";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se7_4uart_sleep: qupv3_se7_4uart_sleep {
+				mux {
+					pins = "gpio93", "gpio94", "gpio95",
+								"gpio96";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio93", "gpio94", "gpio95",
+								"gpio96";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se7_spi_pins: qupv3_se7_spi_pins {
+			qupv3_se7_spi_active: qupv3_se7_spi_active {
+				mux {
+					pins = "gpio93", "gpio94", "gpio95",
+								"gpio96";
+					function = "qup7";
+				};
+
+				config {
+					pins = "gpio93", "gpio94", "gpio95",
+								"gpio96";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se7_spi_sleep: qupv3_se7_spi_sleep {
+				mux {
+					pins = "gpio93", "gpio94", "gpio95",
+								"gpio96";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio93", "gpio94", "gpio95",
+								"gpio96";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* QUPv3 North instances */
+		/* SE 8 pin mappings */
+		qupv3_se8_i2c_pins: qupv3_se8_i2c_pins {
+			qupv3_se8_i2c_active: qupv3_se8_i2c_active {
+				mux {
+					pins = "gpio65", "gpio66";
+					function = "qup8";
+				};
+
+				config {
+					pins = "gpio65", "gpio66";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se8_i2c_sleep: qupv3_se8_i2c_sleep {
+				mux {
+					pins = "gpio65", "gpio66";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio65", "gpio66";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se8_spi_pins: qupv3_se8_spi_pins {
+			qupv3_se8_spi_active: qupv3_se8_spi_active {
+				mux {
+					pins = "gpio65", "gpio66", "gpio67",
+								"gpio68";
+					function = "qup8";
+				};
+
+				config {
+					pins = "gpio65", "gpio66", "gpio67",
+								"gpio68";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se8_spi_sleep: qupv3_se8_spi_sleep {
+				mux {
+					pins = "gpio65", "gpio66", "gpio67",
+								"gpio68";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio65", "gpio66", "gpio67",
+								"gpio68";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 9 pin mappings */
+		qupv3_se9_i2c_pins: qupv3_se9_i2c_pins {
+			qupv3_se9_i2c_active: qupv3_se9_i2c_active {
+				mux {
+					pins = "gpio6", "gpio7";
+					function = "qup9";
+				};
+
+				config {
+					pins = "gpio6", "gpio7";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se9_i2c_sleep: qupv3_se9_i2c_sleep {
+				mux {
+					pins = "gpio6", "gpio7";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio6", "gpio7";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se9_2uart_pins: qupv3_se9_2uart_pins {
+			qupv3_se9_2uart_active: qupv3_se9_2uart_active {
+				mux {
+					pins = "gpio4", "gpio5";
+					function = "qup9";
+				};
+
+				config {
+					pins = "gpio4", "gpio5";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se9_2uart_sleep: qupv3_se9_2uart_sleep {
+				mux {
+					pins = "gpio4", "gpio5";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio4", "gpio5";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se9_spi_pins: qupv3_se9_spi_pins {
+			qupv3_se9_spi_active: qupv3_se9_spi_active {
+				mux {
+					pins = "gpio4", "gpio5", "gpio6",
+								"gpio7";
+					function = "qup9";
+				};
+
+				config {
+					pins = "gpio4", "gpio5", "gpio6",
+								"gpio7";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se9_spi_sleep: qupv3_se9_spi_sleep {
+				mux {
+					pins = "gpio4", "gpio5", "gpio6",
+								"gpio7";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio4", "gpio5", "gpio6",
+								"gpio7";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 10 pin mappings */
+		qupv3_se10_i2c_pins: qupv3_se10_i2c_pins {
+			qupv3_se10_i2c_active: qupv3_se10_i2c_active {
+				mux {
+					pins = "gpio55", "gpio56";
+					function = "qup10";
+				};
+
+				config {
+					pins = "gpio55", "gpio56";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se10_i2c_sleep: qupv3_se10_i2c_sleep {
+				mux {
+					pins = "gpio55", "gpio56";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio55", "gpio56";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se10_2uart_pins: qupv3_se10_2uart_pins {
+			qupv3_se10_2uart_active: qupv3_se10_2uart_active {
+				mux {
+					pins = "gpio53", "gpio54";
+					function = "qup10";
+				};
+
+				config {
+					pins = "gpio53", "gpio54";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se10_2uart_sleep: qupv3_se10_2uart_sleep {
+				mux {
+					pins = "gpio53", "gpio54";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio53", "gpio54";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se10_spi_pins: qupv3_se10_spi_pins {
+			qupv3_se10_spi_active: qupv3_se10_spi_active {
+				mux {
+					pins = "gpio53", "gpio54", "gpio55",
+								"gpio56";
+					function = "qup10";
+				};
+
+				config {
+					pins = "gpio53", "gpio54", "gpio55",
+								"gpio56";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se10_spi_sleep: qupv3_se10_spi_sleep {
+				mux {
+					pins = "gpio53", "gpio54", "gpio55",
+								"gpio56";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio53", "gpio54", "gpio55",
+								"gpio56";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 11 pin mappings */
+		qupv3_se11_i2c_pins: qupv3_se11_i2c_pins {
+			qupv3_se11_i2c_active: qupv3_se11_i2c_active {
+				mux {
+					pins = "gpio31", "gpio32";
+					function = "qup11";
+				};
+
+				config {
+					pins = "gpio31", "gpio32";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se11_i2c_sleep: qupv3_se11_i2c_sleep {
+				mux {
+					pins = "gpio31", "gpio32";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio31", "gpio32";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se11_spi_pins: qupv3_se11_spi_pins {
+			qupv3_se11_spi_active: qupv3_se11_spi_active {
+				mux {
+					pins = "gpio31", "gpio32", "gpio33",
+								"gpio34";
+					function = "qup11";
+				};
+
+				config {
+					pins = "gpio31", "gpio32", "gpio33",
+								"gpio34";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se11_spi_sleep: qupv3_se11_spi_sleep {
+				mux {
+					pins = "gpio31", "gpio32", "gpio33",
+								"gpio34";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio31", "gpio32", "gpio33",
+								"gpio34";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 12 pin mappings */
+		qupv3_se12_i2c_pins: qupv3_se12_i2c_pins {
+			qupv3_se12_i2c_active: qupv3_se12_i2c_active {
+				mux {
+					pins = "gpio49", "gpio50";
+					function = "qup12";
+				};
+
+				config {
+					pins = "gpio49", "gpio50";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se12_i2c_sleep: qupv3_se12_i2c_sleep {
+				mux {
+					pins = "gpio49", "gpio50";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio49", "gpio50";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se12_spi_pins: qupv3_se12_spi_pins {
+			qupv3_se12_spi_active: qupv3_se12_spi_active {
+				mux {
+					pins = "gpio49", "gpio50", "gpio51",
+								"gpio52";
+					function = "qup12";
+				};
+
+				config {
+					pins = "gpio49", "gpio50", "gpio51",
+								"gpio52";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se12_spi_sleep: qupv3_se12_spi_sleep {
+				mux {
+					pins = "gpio49", "gpio50", "gpio51",
+								"gpio52";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio49", "gpio50", "gpio51",
+								"gpio52";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 13 pin mappings */
+		qupv3_se13_i2c_pins: qupv3_se13_i2c_pins {
+			qupv3_se13_i2c_active: qupv3_se13_i2c_active {
+				mux {
+					pins = "gpio105", "gpio106";
+					function = "qup13";
+				};
+
+				config {
+					pins = "gpio105", "gpio106";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se13_i2c_sleep: qupv3_se13_i2c_sleep {
+				mux {
+					pins = "gpio105", "gpio106";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio105", "gpio106";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se13_spi_pins: qupv3_se13_spi_pins {
+			qupv3_se13_spi_active: qupv3_se13_spi_active {
+				mux {
+					pins = "gpio105", "gpio106", "gpio107",
+								"gpio108";
+					function = "qup13";
+				};
+
+				config {
+					pins = "gpio105", "gpio106", "gpio107",
+								"gpio108";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se13_spi_sleep: qupv3_se13_spi_sleep {
+				mux {
+					pins = "gpio105", "gpio106", "gpio107",
+								"gpio108";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio105", "gpio106", "gpio107",
+								"gpio108";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 14 pin mappings */
+		qupv3_se14_i2c_pins: qupv3_se14_i2c_pins {
+			qupv3_se14_i2c_active: qupv3_se14_i2c_active {
+				mux {
+					pins = "gpio33", "gpio34";
+					function = "qup14";
+				};
+
+				config {
+					pins = "gpio33", "gpio34";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se14_i2c_sleep: qupv3_se14_i2c_sleep {
+				mux {
+					pins = "gpio33", "gpio34";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio33", "gpio34";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se14_spi_pins: qupv3_se14_spi_pins {
+			qupv3_se14_spi_active: qupv3_se14_spi_active {
+				mux {
+					pins = "gpio31", "gpio32", "gpio33",
+								"gpio34";
+					function = "qup14";
+				};
+
+				config {
+					pins = "gpio31", "gpio32", "gpio33",
+								"gpio34";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se14_spi_sleep: qupv3_se14_spi_sleep {
+				mux {
+					pins = "gpio31", "gpio32", "gpio33",
+								"gpio34";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio31", "gpio32", "gpio33",
+								"gpio34";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 15 pin mappings */
+		qupv3_se15_i2c_pins: qupv3_se15_i2c_pins {
+			qupv3_se15_i2c_active: qupv3_se15_i2c_active {
+				mux {
+					pins = "gpio81", "gpio82";
+					function = "qup15";
+				};
+
+				config {
+					pins = "gpio81", "gpio82";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se15_i2c_sleep: qupv3_se15_i2c_sleep {
+				mux {
+					pins = "gpio81", "gpio82";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio81", "gpio82";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se15_spi_pins: qupv3_se15_spi_pins {
+			qupv3_se15_spi_active: qupv3_se15_spi_active {
+				mux {
+					pins = "gpio81", "gpio82", "gpio83",
+								"gpio84";
+					function = "qup15";
+				};
+
+				config {
+					pins = "gpio81", "gpio82", "gpio83",
+								"gpio84";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se15_spi_sleep: qupv3_se15_spi_sleep {
+				mux {
+					pins = "gpio81", "gpio82", "gpio83",
+								"gpio84";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio81", "gpio82", "gpio83",
+								"gpio84";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+	};
+};
+
+&pm8998_gpios {
+	key_home {
+		key_home_default: key_home_default {
+			pins = "gpio5";
+			function = "normal";
+			input-enable;
+			bias-pull-up;
+			power-source = <0>;
+		};
+	};
+
+	key_vol_up {
+		key_vol_up_default: key_vol_up_default {
+			pins = "gpio6";
+			function = "normal";
+			input-enable;
+			bias-pull-up;
+			power-source = <0>;
+		};
+	};
+
+	key_cam_snapshot {
+		key_cam_snapshot_default: key_cam_snapshot_default {
+			pins = "gpio7";
+			function = "normal";
+			input-enable;
+			bias-pull-up;
+			power-source = <0>;
+		};
+	};
+
+	key_cam_focus {
+		key_cam_focus_default: key_cam_focus_default {
+			pins = "gpio8";
+			function = "normal";
+			input-enable;
+			bias-pull-up;
+			power-source = <0>;
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dts b/arch/arm64/boot/dts/qcom/sdm845-qrd.dts
new file mode 100644
index 0000000..228b924
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dts
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm845.dtsi"
+#include "sdm845-qrd.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM845 QRD";
+	compatible = "qcom,sdm845-qrd", "qcom,sdm845", "qcom,qrd";
+	qcom,board-id = <11 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
new file mode 100644
index 0000000..6ea92ee
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -0,0 +1,11 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
new file mode 100644
index 0000000..1c31a7a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
@@ -0,0 +1,632 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	/* QUPv3 South instances */
+
+	/*
+	 * HS UART instances. HS UART usecases can be supported on these
+	 * instances only.
+	 */
+	qupv3_se6_4uart: qcom,qup_uart@0x898000 {
+		compatible = "qcom,msm-geni-serial-hs", "qcom,msm-geni-uart";
+		reg = <0x898000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S6_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se6_4uart_active>;
+		pinctrl-1 = <&qupv3_se6_4uart_sleep>;
+		interrupts = <GIC_SPI 607 0>;
+		status = "disabled";
+	};
+
+	qupv3_se7_4uart: qcom,qup_uart@0x89c000 {
+		compatible = "qcom,msm-geni-serial-hs", "qcom,msm-geni-uart";
+		reg = <0x89c000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S7_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se7_4uart_active>;
+		pinctrl-1 = <&qupv3_se7_4uart_sleep>;
+		interrupts = <GIC_SPI 608 0>;
+		status = "disabled";
+	};
+
+	/* I2C */
+	qupv3_se0_i2c: i2c@880000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0x880000 0x4000>;
+		interrupts = <GIC_SPI 601 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se0_i2c_active>;
+		pinctrl-1 = <&qupv3_se0_i2c_sleep>;
+		status = "disabled";
+	};
+
+	qupv3_se1_i2c: i2c@884000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0x884000 0x4000>;
+		interrupts = <GIC_SPI 602 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S1_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se1_i2c_active>;
+		pinctrl-1 = <&qupv3_se1_i2c_sleep>;
+		status = "disabled";
+	};
+
+	qupv3_se2_i2c: i2c@888000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0x888000 0x4000>;
+		interrupts = <GIC_SPI 603 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S2_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se2_i2c_active>;
+		pinctrl-1 = <&qupv3_se2_i2c_sleep>;
+		status = "disabled";
+	};
+
+	qupv3_se3_i2c: i2c@88c000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0x88c000 0x4000>;
+		interrupts = <GIC_SPI 604 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S3_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se3_i2c_active>;
+		pinctrl-1 = <&qupv3_se3_i2c_sleep>;
+		status = "disabled";
+	};
+
+	qupv3_se4_i2c: i2c@890000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0x890000 0x4000>;
+		interrupts = <GIC_SPI 605 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S4_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se4_i2c_active>;
+		pinctrl-1 = <&qupv3_se4_i2c_sleep>;
+		status = "disabled";
+	};
+
+	qupv3_se5_i2c: i2c@894000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0x894000 0x4000>;
+		interrupts = <GIC_SPI 606 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S5_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se5_i2c_active>;
+		pinctrl-1 = <&qupv3_se5_i2c_sleep>;
+		status = "disabled";
+	};
+
+	qupv3_se6_i2c: i2c@898000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0x898000 0x4000>;
+		interrupts = <GIC_SPI 607 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S6_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se6_i2c_active>;
+		pinctrl-1 = <&qupv3_se6_i2c_sleep>;
+		status = "disabled";
+	};
+
+	qupv3_se7_i2c: i2c@89c000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0x89c000 0x4000>;
+		interrupts = <GIC_SPI 608 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S7_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se7_i2c_active>;
+		pinctrl-1 = <&qupv3_se7_i2c_sleep>;
+		status = "disabled";
+	};
+
+	/* SPI */
+	qupv3_se0_spi: spi@880000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x880000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se0_spi_active>;
+		pinctrl-1 = <&qupv3_se0_spi_sleep>;
+		interrupts = <GIC_SPI 601 0>;
+		spi-max-frequency = <50000000>;
+		status = "disabled";
+	};
+
+	qupv3_se1_spi: spi@884000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x884000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S1_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se1_spi_active>;
+		pinctrl-1 = <&qupv3_se1_spi_sleep>;
+		interrupts = <GIC_SPI 602 0>;
+		spi-max-frequency = <50000000>;
+		status = "disabled";
+	};
+
+	qupv3_se2_spi: spi@888000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x888000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S2_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se2_spi_active>;
+		pinctrl-1 = <&qupv3_se2_spi_sleep>;
+		interrupts = <GIC_SPI 603 0>;
+		spi-max-frequency = <50000000>;
+		status = "disabled";
+	};
+
+	qupv3_se3_spi: spi@88c000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x88c000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S3_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se3_spi_active>;
+		pinctrl-1 = <&qupv3_se3_spi_sleep>;
+		interrupts = <GIC_SPI 604 0>;
+		spi-max-frequency = <50000000>;
+		status = "disabled";
+	};
+
+	qupv3_se4_spi: spi@890000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x890000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S4_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se4_spi_active>;
+		pinctrl-1 = <&qupv3_se4_spi_sleep>;
+		interrupts = <GIC_SPI 605 0>;
+		spi-max-frequency = <50000000>;
+		status = "disabled";
+	};
+
+	qupv3_se5_spi: spi@894000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x894000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S5_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se5_spi_active>;
+		pinctrl-1 = <&qupv3_se5_spi_sleep>;
+		interrupts = <GIC_SPI 606 0>;
+		spi-max-frequency = <50000000>;
+		status = "disabled";
+	};
+
+	qupv3_se6_spi: spi@898000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x898000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S6_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se6_spi_active>;
+		pinctrl-1 = <&qupv3_se6_spi_sleep>;
+		interrupts = <GIC_SPI 607 0>;
+		spi-max-frequency = <50000000>;
+		status = "disabled";
+	};
+
+	qupv3_se7_spi: spi@89c000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x89c000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S7_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se7_spi_active>;
+		pinctrl-1 = <&qupv3_se7_spi_sleep>;
+		interrupts = <GIC_SPI 608 0>;
+		spi-max-frequency = <50000000>;
+		status = "disabled";
+	};
+
+	/* QUPv3 North Instances */
+	/* 2-wire UART */
+
+	/* Debug UART Instance for CDP/MTP platform */
+	qupv3_se9_2uart: qcom,qup_uart@0xa84000 {
+		compatible = "qcom,msm-geni-console", "qcom,msm-geni-uart";
+		reg = <0xa84000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S1_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se9_2uart_active>;
+		pinctrl-1 = <&qupv3_se9_2uart_sleep>;
+		interrupts = <GIC_SPI 354 0>;
+		status = "disabled";
+	};
+
+	/* Debug UART Instance for RUMI platform */
+	qupv3_se10_2uart: qcom,qup_uart@0xa88000 {
+		compatible = "qcom,msm-geni-console", "qcom,msm-geni-uart";
+		reg = <0xa88000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S2_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se10_2uart_active>;
+		pinctrl-1 = <&qupv3_se10_2uart_sleep>;
+		interrupts = <GIC_SPI 355 0>;
+		status = "disabled";
+	};
+
+	/* I2C */
+	qupv3_se8_i2c: i2c@a80000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0xa80000 0x4000>;
+		interrupts = <GIC_SPI 353 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S0_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se8_i2c_active>;
+		pinctrl-1 = <&qupv3_se8_i2c_sleep>;
+		status = "disabled";
+	};
+
+	qupv3_se9_i2c: i2c@a84000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0xa84000 0x4000>;
+		interrupts = <GIC_SPI 354 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S1_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se9_i2c_active>;
+		pinctrl-1 = <&qupv3_se9_i2c_sleep>;
+		status = "disabled";
+	};
+
+	qupv3_se10_i2c: i2c@a88000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0xa88000 0x4000>;
+		interrupts = <GIC_SPI 355 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S2_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se10_i2c_active>;
+		pinctrl-1 = <&qupv3_se10_i2c_sleep>;
+		status = "disabled";
+	};
+
+	qupv3_se11_i2c: i2c@a8c000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0xa8c000 0x4000>;
+		interrupts = <GIC_SPI 356 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S3_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se11_i2c_active>;
+		pinctrl-1 = <&qupv3_se11_i2c_sleep>;
+		status = "disabled";
+	};
+
+	qupv3_se12_i2c: i2c@a90000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0xa90000 0x4000>;
+		interrupts = <GIC_SPI 357 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S4_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se12_i2c_active>;
+		pinctrl-1 = <&qupv3_se12_i2c_sleep>;
+		status = "disabled";
+	};
+
+	qupv3_se13_i2c: i2c@a94000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0xa94000 0x4000>;
+		interrupts = <GIC_SPI 358 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S5_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se13_i2c_active>;
+		pinctrl-1 = <&qupv3_se13_i2c_sleep>;
+		status = "disabled";
+	};
+
+	qupv3_se14_i2c: i2c@a98000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0xa98000 0x4000>;
+		interrupts = <GIC_SPI 359 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S6_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se14_i2c_active>;
+		pinctrl-1 = <&qupv3_se14_i2c_sleep>;
+		status = "disabled";
+	};
+
+	qupv3_se15_i2c: i2c@a9c000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0xa9c000 0x4000>;
+		interrupts = <GIC_SPI 360 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S7_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se15_i2c_active>;
+		pinctrl-1 = <&qupv3_se15_i2c_sleep>;
+		status = "disabled";
+	};
+
+	/* SPI */
+	qupv3_se8_spi: spi@a80000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa80000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S0_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se8_spi_active>;
+		pinctrl-1 = <&qupv3_se8_spi_sleep>;
+		interrupts = <GIC_SPI 353 0>;
+		spi-max-frequency = <50000000>;
+		status = "disabled";
+	};
+
+	qupv3_se9_spi: spi@a84000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa84000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S1_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se9_spi_active>;
+		pinctrl-1 = <&qupv3_se9_spi_sleep>;
+		interrupts = <GIC_SPI 354 0>;
+		spi-max-frequency = <50000000>;
+		status = "disabled";
+	};
+
+	qupv3_se10_spi: spi@a88000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa88000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S2_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se10_spi_active>;
+		pinctrl-1 = <&qupv3_se10_spi_sleep>;
+		interrupts = <GIC_SPI 355 0>;
+		spi-max-frequency = <50000000>;
+		status = "disabled";
+	};
+
+	qupv3_se11_spi: spi@a8c000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa8c000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S3_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se11_spi_active>;
+		pinctrl-1 = <&qupv3_se11_spi_sleep>;
+		interrupts = <GIC_SPI 356 0>;
+		spi-max-frequency = <50000000>;
+		status = "disabled";
+	};
+
+	qupv3_se12_spi: spi@a90000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa90000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S4_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se12_spi_active>;
+		pinctrl-1 = <&qupv3_se12_spi_sleep>;
+		interrupts = <GIC_SPI 357 0>;
+		spi-max-frequency = <50000000>;
+		status = "disabled";
+	};
+
+	qupv3_se13_spi: spi@a94000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa94000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S5_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se13_spi_active>;
+		pinctrl-1 = <&qupv3_se13_spi_sleep>;
+		interrupts = <GIC_SPI 358 0>;
+		spi-max-frequency = <50000000>;
+		status = "disabled";
+	};
+
+	qupv3_se14_spi: spi@a98000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa98000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S6_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se14_spi_active>;
+		pinctrl-1 = <&qupv3_se14_spi_sleep>;
+		interrupts = <GIC_SPI 359 0>;
+		spi-max-frequency = <50000000>;
+		status = "disabled";
+	};
+
+	qupv3_se15_spi: spi@a9c000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa9c000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S7_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se15_spi_active>;
+		pinctrl-1 = <&qupv3_se15_spi_sleep>;
+		interrupts = <GIC_SPI 360 0>;
+		spi-max-frequency = <50000000>;
+		status = "disabled";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index 228bbb3..540f82f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -26,30 +26,253 @@
 		regulator-min-microvolt = <1800000>;
 		regulator-max-microvolt = <1800000>;
 	};
-
-	apc0_pwrcl_vreg: regulator-pwrcl {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "apc0_pwrcl_corner";
-		regulator-min-microvolt = <1>;
-		regulator-max-microvolt = <23>;
-	};
-
-	apc0_l3_vreg: regulator-l3 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "apc0_l3_corner";
-		regulator-min-microvolt = <1>;
-		regulator-max-microvolt = <19>;
-	};
-
-	apc1_perfcl_vreg: regulator-perfcl {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "apc1_perfcl_corner";
-		regulator-min-microvolt = <1>;
-		regulator-max-microvolt = <26>;
-	};
 };
 
 &soc {
+	/* CPR controller regulators */
+	apc0_cpr: cprh-ctrl@17dc0000 {
+		compatible = "qcom,cprh-sdm845-v1-kbss-regulator";
+		reg =	<0x17dc0000 0x4000>,
+			<0x00784000 0x1000>,
+			<0x17840000 0x1000>;
+		reg-names = "cpr_ctrl", "fuse_base", "saw";
+		clocks = <&clock_gcc GCC_CPUSS_RBCPR_CLK>;
+		clock-names = "core_clk";
+		qcom,cpr-ctrl-name = "apc0";
+		qcom,cpr-controller-id = <0>;
+
+		qcom,cpr-sensor-time = <1000>;
+		qcom,cpr-loop-time = <5000000>;
+		qcom,cpr-idle-cycles = <15>;
+		qcom,cpr-up-down-delay-time = <3000>;
+		qcom,cpr-step-quot-init-min = <11>;
+		qcom,cpr-step-quot-init-max = <12>;
+		qcom,cpr-count-mode = <0>;		/* All at once */
+		qcom,cpr-count-repeat = <1>;
+		qcom,cpr-down-error-step-limit = <1>;
+		qcom,cpr-up-error-step-limit = <1>;
+		qcom,cpr-corner-switch-delay-time = <1042>;
+		qcom,cpr-voltage-settling-time = <1760>;
+
+		qcom,voltage-step = <4000>;
+		qcom,voltage-base = <352000>;
+		qcom,cpr-saw-use-unit-mV;
+
+		qcom,saw-avs-ctrl = <0x101C031>;
+		qcom,saw-avs-limit = <0x3A00000>;
+
+		qcom,cpr-panic-reg-addr-list =
+			<0x17dc3a84 0x17dc3a88 0x17840c18>;
+		qcom,cpr-panic-reg-name-list =
+			"APSS_SILVER_CPRH_STATUS_0",
+			"APSS_SILVER_CPRH_STATUS_1",
+			"SILVER_SAW4_PMIC_STS";
+
+		thread@1 {
+			qcom,cpr-thread-id = <1>;
+			qcom,cpr-consecutive-up = <0>;
+			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-up-threshold = <2>;
+			qcom,cpr-down-threshold = <2>;
+
+			apc0_pwrcl_vreg: regulator {
+				regulator-name = "apc0_pwrcl_corner";
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <17>;
+
+				qcom,cpr-fuse-corners = <3>;
+				qcom,cpr-fuse-combos = <8>;
+				qcom,cpr-speed-bins = <1>;
+				qcom,cpr-speed-bin-corners = <17>;
+				qcom,cpr-corners = <17>;
+
+				qcom,cpr-corner-fmax-map = <6 12 17>;
+
+				qcom,cpr-voltage-ceiling =
+					<688000  688000  688000  688000  688000
+					 688000  756000  756000  756000  812000
+					 812000  812000  872000  872000  872000
+					 872000  928000>;
+
+				qcom,cpr-voltage-floor =
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  584000
+					 584000  584000  632000  632000  632000
+					 632000  672000>;
+
+				qcom,cpr-floor-to-ceiling-max-range =
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000>;
+
+				qcom,corner-frequencies =
+					<300000000  422400000  499200000
+					 576000000  652800000  748800000
+					 825600000  902400000  979200000
+					1056000000 1132800000 1209600000
+					1286400000 1363200000 1440000000
+					1516800000 1593600000>;
+
+				qcom,cpr-open-loop-voltage-fuse-adjustment =
+					<100000 100000 100000>;
+
+				qcom,allow-voltage-interpolation;
+				qcom,allow-quotient-interpolation;
+				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+			};
+		};
+
+		thread@0 {
+			qcom,cpr-thread-id = <0>;
+			qcom,cpr-consecutive-up = <0>;
+			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-up-threshold = <2>;
+			qcom,cpr-down-threshold = <2>;
+
+			apc0_l3_vreg: regulator {
+				regulator-name = "apc0_l3_corner";
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <9>;
+
+				qcom,cpr-fuse-corners = <3>;
+				qcom,cpr-fuse-combos = <8>;
+				qcom,cpr-speed-bins = <1>;
+				qcom,cpr-speed-bin-corners = <9>;
+				qcom,cpr-corners = <9>;
+
+				qcom,cpr-corner-fmax-map = <4 7 9>;
+
+				qcom,cpr-voltage-ceiling =
+					<688000  688000  688000  688000  756000
+					 812000  812000  872000  928000>;
+
+				qcom,cpr-voltage-floor =
+					<568000  568000  568000  568000  568000
+					 584000  584000  632000  672000>;
+
+				qcom,cpr-floor-to-ceiling-max-range =
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000>;
+
+				qcom,corner-frequencies =
+					<300000000  422400000  499200000
+					 576000000  652800000  729600000
+					 806400000  883200000  960000000>;
+
+				qcom,cpr-open-loop-voltage-fuse-adjustment =
+					<100000 100000 100000>;
+
+				qcom,allow-voltage-interpolation;
+				qcom,allow-quotient-interpolation;
+				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+			};
+		};
+	};
+
+	apc1_cpr: cprh-ctrl@17db0000 {
+		compatible = "qcom,cprh-sdm845-kbss-regulator";
+		reg =	<0x17db0000 0x4000>,
+			<0x00784000 0x1000>,
+			<0x17830000 0x1000>;
+		reg-names = "cpr_ctrl", "fuse_base", "saw";
+		clocks = <&clock_gcc GCC_CPUSS_RBCPR_CLK>;
+		clock-names = "core_clk";
+		qcom,cpr-ctrl-name = "apc1";
+		qcom,cpr-controller-id = <1>;
+
+		qcom,cpr-sensor-time = <1000>;
+		qcom,cpr-loop-time = <5000000>;
+		qcom,cpr-idle-cycles = <15>;
+		qcom,cpr-up-down-delay-time = <3000>;
+		qcom,cpr-step-quot-init-min = <9>;
+		qcom,cpr-step-quot-init-max = <14>;
+		qcom,cpr-count-mode = <0>;		/* All at once */
+		qcom,cpr-count-repeat = <1>;
+		qcom,cpr-down-error-step-limit = <1>;
+		qcom,cpr-up-error-step-limit = <1>;
+		qcom,cpr-corner-switch-delay-time = <1042>;
+		qcom,cpr-voltage-settling-time = <1760>;
+
+		qcom,apm-threshold-voltage = <800000>;
+		qcom,apm-crossover-voltage = <880000>;
+		qcom,mem-acc-threshold-voltage = <852000>;
+		qcom,mem-acc-crossover-voltage = <852000>;
+
+		qcom,voltage-step = <4000>;
+		qcom,voltage-base = <352000>;
+		qcom,cpr-saw-use-unit-mV;
+
+		qcom,saw-avs-ctrl = <0x101C031>;
+		qcom,saw-avs-limit = <0x4200000>;
+
+		qcom,cpr-panic-reg-addr-list =
+			<0x17db3a84 0x17830c18>;
+		qcom,cpr-panic-reg-name-list =
+			"APSS_GOLD_CPRH_STATUS_0", "GOLD_SAW4_PMIC_STS";
+
+		thread@0 {
+			qcom,cpr-thread-id = <0>;
+			qcom,cpr-consecutive-up = <0>;
+			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-up-threshold = <2>;
+			qcom,cpr-down-threshold = <2>;
+
+			apc1_perfcl_vreg: regulator {
+				regulator-name = "apc1_perfcl_corner";
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <24>;
+
+				qcom,cpr-fuse-corners = <3>;
+				qcom,cpr-fuse-combos = <8>;
+				qcom,cpr-speed-bins = <1>;
+				qcom,cpr-speed-bin-corners = <22>;
+				qcom,cpr-corners = <22>;
+
+				qcom,cpr-corner-fmax-map =
+					<10 17 22>;
+
+				qcom,cpr-voltage-ceiling =
+					<756000  756000  756000  756000  756000
+					 756000  756000  756000  756000  756000
+					 812000  812000  828000  828000  828000
+					 828000  828000  884000  952000  952000
+					1056000 1056000>;
+
+				qcom,cpr-voltage-floor =
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 584000  584000  632000  632000  632000
+					 632000  632000  672000  712000  712000
+					 772000  772000>;
+
+				qcom,cpr-floor-to-ceiling-max-range =
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  40000  40000  40000
+					 40000  40000>;
+
+				qcom,corner-frequencies =
+					<300000000  422400000  499200000
+					 576000000  652800000  729600000
+					 806400000  883200000  960000000
+					1036800000 1113600000 1190400000
+					1267200000 1344000000 1420800000
+					1497600000 1574400000 1651200000
+					1728000000 1804800000 1881600000
+					1958400000>;
+
+				qcom,cpr-open-loop-voltage-fuse-adjustment =
+					<100000 100000 100000>;
+
+				qcom,allow-voltage-interpolation;
+				qcom,allow-quotient-interpolation;
+				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+			};
+		};
+	};
+
 	/* RPMh regulators: */
 
 	/* PM8998 S1 = VDD_EBI supply */
@@ -165,12 +388,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa1";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l1: regulator-l1 {
 			regulator-name = "pm8998_l1";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <880000>;
 			regulator-max-microvolt = <880000>;
 			qcom,init-voltage = <880000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -178,12 +406,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa2";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 30000>;
 		pm8998_l2: regulator-l2 {
 			regulator-name = "pm8998_l2";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1200000>;
 			regulator-max-microvolt = <1200000>;
 			qcom,init-voltage = <1200000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -191,12 +424,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa3";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l3: regulator-l3 {
 			regulator-name = "pm8998_l3";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1000000>;
 			regulator-max-microvolt = <1000000>;
 			qcom,init-voltage = <1000000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -217,12 +455,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa5";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l5: regulator-l5 {
 			regulator-name = "pm8998_l5";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <800000>;
 			regulator-max-microvolt = <800000>;
 			qcom,init-voltage = <800000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -230,12 +473,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa6";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l6: regulator-l6 {
 			regulator-name = "pm8998_l6";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1856000>;
 			regulator-max-microvolt = <1856000>;
 			qcom,init-voltage = <1856000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -243,12 +491,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa7";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l7: regulator-l7 {
 			regulator-name = "pm8998_l7";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -256,12 +509,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa8";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l8: regulator-l8 {
 			regulator-name = "pm8998_l8";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1200000>;
 			regulator-max-microvolt = <1200000>;
 			qcom,init-voltage = <1200000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -269,12 +527,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa9";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l9: regulator-l9 {
 			regulator-name = "pm8998_l9";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1808000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <1808000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -282,12 +545,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa10";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l10: regulator-l10 {
 			regulator-name = "pm8998_l10";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1808000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <1808000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -295,12 +563,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa11";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l11: regulator-l11 {
 			regulator-name = "pm8998_l11";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1000000>;
 			regulator-max-microvolt = <1000000>;
 			qcom,init-voltage = <1000000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -308,12 +581,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa12";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l12: regulator-l12 {
 			regulator-name = "pm8998_l12";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -321,12 +599,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa13";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l13: regulator-l13 {
 			regulator-name = "pm8998_l13";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1808000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <1808000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -334,12 +617,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa14";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l14: regulator-l14 {
 			regulator-name = "pm8998_l14";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -347,12 +635,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa15";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l15: regulator-l15 {
 			regulator-name = "pm8998_l15";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -360,12 +653,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa16";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l16: regulator-l16 {
 			regulator-name = "pm8998_l16";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <2704000>;
 			regulator-max-microvolt = <2704000>;
 			qcom,init-voltage = <2704000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -373,12 +671,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa17";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l17: regulator-l17 {
 			regulator-name = "pm8998_l17";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1304000>;
 			regulator-max-microvolt = <1304000>;
 			qcom,init-voltage = <1304000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -386,12 +689,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa18";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l18: regulator-l18 {
 			regulator-name = "pm8998_l18";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <2704000>;
 			regulator-max-microvolt = <2704000>;
 			qcom,init-voltage = <2704000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -399,12 +707,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa19";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l19: regulator-l19 {
 			regulator-name = "pm8998_l19";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <3008000>;
 			regulator-max-microvolt = <3008000>;
 			qcom,init-voltage = <3008000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -412,12 +725,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa20";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l20: regulator-l20 {
 			regulator-name = "pm8998_l20";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <2960000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <2960000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -425,12 +743,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa21";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l21: regulator-l21 {
 			regulator-name = "pm8998_l21";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <2960000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <2960000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -438,12 +761,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa22";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l22: regulator-l22 {
 			regulator-name = "pm8998_l22";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <2864000>;
 			regulator-max-microvolt = <2864000>;
 			qcom,init-voltage = <2864000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -451,12 +779,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa23";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l23: regulator-l23 {
 			regulator-name = "pm8998_l23";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <3312000>;
 			regulator-max-microvolt = <3312000>;
 			qcom,init-voltage = <3312000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -464,12 +797,18 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa24";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
+		pm8998_l24-parent-supply = <&pm8998_l12>;
 		pm8998_l24: regulator-l24 {
 			regulator-name = "pm8998_l24";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <3088000>;
 			regulator-max-microvolt = <3088000>;
 			qcom,init-voltage = <3088000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -477,12 +816,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa25";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l25: regulator-l25 {
 			regulator-name = "pm8998_l25";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <3104000>;
 			regulator-max-microvolt = <3104000>;
 			qcom,init-voltage = <3104000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -490,12 +834,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa26";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l26: regulator-l26 {
 			regulator-name = "pm8998_l26";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1200000>;
 			regulator-max-microvolt = <1200000>;
 			qcom,init-voltage = <1200000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -516,14 +865,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa28";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l28: regulator-l28 {
 			regulator-name = "pm8998_l28";
-		mboxes = <&apps_rsc 0>;
-		qcom,resource-name = "";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <3008000>;
 			regulator-max-microvolt = <3008000>;
 			qcom,init-voltage = <3008000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-rumi.dts b/arch/arm64/boot/dts/qcom/sdm845-rumi.dts
index 221eb38..0f31c0a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-rumi.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-rumi.dts
@@ -29,7 +29,7 @@
 	};
 };
 
-&usb3 {
+&usb0 {
 	/delete-property/ qcom,usb-dbm;
 	qcom,charging-disabled;
 	dwc3@a600000 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
index 663ff7e..80f34bf 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
@@ -17,11 +17,8 @@
 
 	vdda-phy-supply = <&pm8998_l1>;
 	vdda-pll-supply = <&pm8998_l2>;
-	vddp-ref-clk-supply = <&pm8998_l26>;
 	vdda-phy-max-microamp = <44000>;
 	vdda-pll-max-microamp = <14600>;
-	vddp-ref-clk-max-microamp = <100>;
-	vddp-ref-clk-always-on;
 
 	status = "ok";
 };
@@ -38,6 +35,9 @@
 	vcc-max-microamp = <600000>;
 	vccq2-max-microamp = <600000>;
 
+	qcom,vddp-ref-clk-supply = <&pm8998_l26>;
+	qcom,vddp-ref-clk-max-microamp = <100>;
+
 	qcom,disable-lpm;
 	rpm-level = <0>;
 	spm-level = <0>;
@@ -109,16 +109,21 @@
 	};
 };
 
+&apc0_cpr {
+	qcom,cpr-ignore-invalid-fuses;
+};
+
+&apc1_cpr {
+	qcom,cpr-ignore-invalid-fuses;
+};
+
 &ufsphy_card {
 	compatible = "qcom,ufs-phy-qrbtc-sdm845";
 
 	vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
 	vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
-	vddp-ref-clk-supply = <&pm8998_l2>;
 	vdda-phy-max-microamp = <62900>;
 	vdda-pll-max-microamp = <18300>;
-	vddp-ref-clk-max-microamp = <100>;
-	vddp-ref-clk-always-on;
 
 	status = "ok";
 };
@@ -134,6 +139,9 @@
 	vcc-max-microamp = <300000>;
 	vccq2-max-microamp = <300000>;
 
+	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
+	qcom,vddp-ref-clk-max-microamp = <100>;
+
 	qcom,disable-lpm;
 	rpm-level = <0>;
 	spm-level = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index 7144acd..2ff9b2f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -10,7 +10,279 @@
  * GNU General Public License for more details.
  */
 
+#include "dsi-panel-sim-video.dtsi"
+#include "dsi-panel-sim-cmd.dtsi"
+#include "dsi-panel-sim-dualmipi-video.dtsi"
+#include "dsi-panel-sim-dualmipi-cmd.dtsi"
+#include "dsi-panel-sharp-dsc-4k-video.dtsi"
+#include "dsi-panel-sharp-dsc-4k-cmd.dtsi"
+#include "dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi"
+#include "dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi"
+#include "dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi"
+#include "dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi"
+#include "dsi-panel-sharp-1080p-cmd.dtsi"
+#include "dsi-panel-sharp-dualmipi-1080p-120hz.dtsi"
+#include "dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi"
+
 &soc {
+	dsi_panel_pwr_supply: dsi_panel_pwr_supply {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,panel-supply-entry@0 {
+			reg = <0>;
+			qcom,supply-name = "vddio";
+			qcom,supply-min-voltage = <1800000>;
+			qcom,supply-max-voltage = <1800000>;
+			qcom,supply-enable-load = <62000>;
+			qcom,supply-disable-load = <80>;
+			qcom,supply-post-on-sleep = <20>;
+		};
+
+		qcom,panel-supply-entry@1 {
+			reg = <1>;
+			qcom,supply-name = "lab";
+			qcom,supply-min-voltage = <4600000>;
+			qcom,supply-max-voltage = <6000000>;
+			qcom,supply-enable-load = <100000>;
+			qcom,supply-disable-load = <100>;
+		};
+
+		qcom,panel-supply-entry@2 {
+			reg = <2>;
+			qcom,supply-name = "ibb";
+			qcom,supply-min-voltage = <4600000>;
+			qcom,supply-max-voltage = <6000000>;
+			qcom,supply-enable-load = <100000>;
+			qcom,supply-disable-load = <100>;
+			qcom,supply-post-on-sleep = <20>;
+		};
+	};
+
+	dsi_panel_pwr_supply_no_labibb: dsi_panel_pwr_supply_no_labibb {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,panel-supply-entry@0 {
+			reg = <0>;
+			qcom,supply-name = "vddio";
+			qcom,supply-min-voltage = <1800000>;
+			qcom,supply-max-voltage = <1800000>;
+			qcom,supply-enable-load = <62000>;
+			qcom,supply-disable-load = <80>;
+			qcom,supply-post-on-sleep = <20>;
+		};
+	};
+
+	dsi_panel_pwr_supply_vdd_no_labibb: dsi_panel_pwr_supply_vdd_no_labibb {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,panel-supply-entry@0 {
+			reg = <0>;
+			qcom,supply-name = "vddio";
+			qcom,supply-min-voltage = <1800000>;
+			qcom,supply-max-voltage = <1800000>;
+			qcom,supply-enable-load = <62000>;
+			qcom,supply-disable-load = <80>;
+			qcom,supply-post-on-sleep = <20>;
+		};
+
+		qcom,panel-supply-entry@1 {
+			reg = <1>;
+			qcom,supply-name = "vdd";
+			qcom,supply-min-voltage = <3000000>;
+			qcom,supply-max-voltage = <3000000>;
+			qcom,supply-enable-load = <857000>;
+			qcom,supply-disable-load = <0>;
+			qcom,supply-post-on-sleep = <0>;
+		};
+	};
+
+	dsi_sharp_4k_dsc_video_display: qcom,dsi-display@0 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_sharp_4k_dsc_video_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-te-gpio = <&tlmm 10 0>;
+		qcom,platform-reset-gpio = <&tlmm 6 0>;
+
+		qcom,dsi-panel = <&dsi_sharp_4k_dsc_video>;
+		vddio-supply = <&pm8998_l14>;
+		lab-supply = <&lab_regulator>;
+		ibb-supply = <&ibb_regulator>;
+	};
+
+	dsi_sharp_4k_dsc_cmd_display: qcom,dsi-display@1 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_sharp_4k_dsc_cmd_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-te-gpio = <&tlmm 10 0>;
+		qcom,platform-reset-gpio = <&tlmm 6 0>;
+
+		qcom,dsi-panel = <&dsi_sharp_4k_dsc_cmd>;
+		vddio-supply = <&pm8998_l14>;
+		lab-supply = <&lab_regulator>;
+		ibb-supply = <&ibb_regulator>;
+	};
+
+	dsi_sharp_1080_cmd_display: qcom,dsi-display@2 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_sharp_1080_cmd_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-te-gpio = <&tlmm 10 0>;
+		qcom,platform-reset-gpio = <&tlmm 6 0>;
+
+		qcom,dsi-panel = <&dsi_sharp_1080_cmd>;
+		vddio-supply = <&pm8998_l14>;
+		lab-supply = <&lab_regulator>;
+		ibb-supply = <&ibb_regulator>;
+	};
+
+	dsi_dual_sharp_1080_120hz_cmd_display: qcom,dsi-display@3 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_dual_sharp_1080_120hz_cmd_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-te-gpio = <&tlmm 10 0>;
+		qcom,platform-reset-gpio = <&tlmm 6 0>;
+
+		qcom,dsi-panel = <&dsi_dual_sharp_1080_120hz_cmd>;
+		vddio-supply = <&pm8998_l14>;
+		lab-supply = <&lab_regulator>;
+		ibb-supply = <&ibb_regulator>;
+	};
+
+	dsi_dual_nt35597_truly_video_display: qcom,dsi-display@4 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_dual_nt35597_truly_video_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-te-gpio = <&tlmm 10 0>;
+		qcom,platform-reset-gpio = <&tlmm 6 0>;
+
+		qcom,dsi-panel = <&dsi_dual_nt35597_truly_video>;
+		vddio-supply = <&pm8998_l14>;
+		lab-supply = <&lab_regulator>;
+		ibb-supply = <&ibb_regulator>;
+	};
+
+	dsi_dual_nt35597_truly_cmd_display: qcom,dsi-display@5 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_dual_nt35597_truly_cmd_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-te-gpio = <&tlmm 10 0>;
+		qcom,platform-reset-gpio = <&tlmm 6 0>;
+
+		qcom,dsi-panel = <&dsi_dual_nt35597_truly_cmd>;
+		vddio-supply = <&pm8998_l14>;
+		lab-supply = <&lab_regulator>;
+		ibb-supply = <&ibb_regulator>;
+	};
+
+	dsi_nt35597_truly_dsc_cmd_display: qcom,dsi-display@6 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_nt35597_truly_dsc_cmd_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-te-gpio = <&tlmm 10 0>;
+		qcom,platform-reset-gpio = <&tlmm 6 0>;
+
+		qcom,dsi-panel = <&dsi_nt35597_truly_dsc_cmd>;
+		vddio-supply = <&pm8998_l14>;
+		lab-supply = <&lab_regulator>;
+		ibb-supply = <&ibb_regulator>;
+	};
+
+	dsi_nt35597_truly_dsc_video_display: qcom,dsi-display@7 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_nt35597_truly_dsc_video_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-te-gpio = <&tlmm 10 0>;
+		qcom,platform-reset-gpio = <&tlmm 6 0>;
+
+		qcom,dsi-panel = <&dsi_nt35597_truly_dsc_video>;
+		vddio-supply = <&pm8998_l14>;
+		lab-supply = <&lab_regulator>;
+		ibb-supply = <&ibb_regulator>;
+	};
+
 	sde_wb: qcom,wb-display@0 {
 		compatible = "qcom,wb-display";
 		cell-index = <0>;
@@ -21,3 +293,45 @@
 &mdss_mdp {
 	connectors = <&sde_wb>;
 };
+
+&dsi_dual_nt35597_truly_video {
+	qcom,mdss-dsi-panel-timings = [00 1c 07 07 23 21 07 07 05 03 04];
+	qcom,mdss-dsi-t-clk-post = <0x0D>;
+	qcom,mdss-dsi-t-clk-pre = <0x2D>;
+};
+
+&dsi_dual_nt35597_truly_cmd {
+	qcom,mdss-dsi-panel-timings = [00 1c 07 07 23 21 07 07 05 03 04];
+	qcom,mdss-dsi-t-clk-post = <0x0D>;
+	qcom,mdss-dsi-t-clk-pre = <0x2D>;
+};
+
+&dsi_nt35597_truly_dsc_cmd {
+	qcom,mdss-dsi-panel-timings = [00 15 05 05 20 1f 05 05 03 03 04];
+	qcom,mdss-dsi-t-clk-post = <0x0b>;
+	qcom,mdss-dsi-t-clk-pre = <0x23>;
+};
+
+&dsi_nt35597_truly_dsc_video {
+	qcom,mdss-dsi-panel-timings = [00 15 05 05 20 1f 05 05 03 03 04];
+	qcom,mdss-dsi-t-clk-post = <0x0b>;
+	qcom,mdss-dsi-t-clk-pre = <0x23>;
+};
+
+&dsi_sharp_4k_dsc_video {
+	qcom,mdss-dsi-panel-timings = [00 12 04 04 1e 1e 04 04 02 03 04];
+	qcom,mdss-dsi-t-clk-post = <0x0a>;
+	qcom,mdss-dsi-t-clk-pre = <0x1e>;
+};
+
+&dsi_sharp_4k_dsc_cmd {
+	qcom,mdss-dsi-panel-timings = [00 12 04 04 1e 1e 04 04 02 03 04];
+	qcom,mdss-dsi-t-clk-post = <0x0a>;
+	qcom,mdss-dsi-t-clk-pre = <0x1e>;
+};
+
+&dsi_dual_sharp_1080_120hz_cmd {
+	qcom,mdss-dsi-panel-timings = [00 24 09 09 26 24 09 09 06 03 04];
+	qcom,mdss-dsi-t-clk-post = <0x0f>;
+	qcom,mdss-dsi-t-clk-pre = <0x36>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index ca3c4fa..d99e6de 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -13,7 +13,7 @@
 &soc {
 	mdss_mdp: qcom,mdss_mdp@ae00000 {
 		compatible = "qcom,sde-kms";
-		reg = <0x0ae00000 0x81a24>,
+		reg = <0x0ae00000 0x81d40>,
 		      <0x0aeb0000 0x2008>;
 		reg-names = "mdp_phys",
 			"vbif_phys";
@@ -77,6 +77,10 @@
 		qcom,sde-dsc-off = <0x81000 0x81400 0x81800 0x81c00>;
 		qcom,sde-dsc-size = <0x140>;
 
+		qcom,sde-dither-off = <0x30e0 0x30e0 0x30e0 0x30e0>;
+		qcom,sde-dither-version = <0x00010000>;
+		qcom,sde-dither-size = <0x20>;
+
 		qcom,sde-intf-max-prefetch-lines = <0x15 0x15 0x15 0x15>;
 
 		qcom,sde-sspp-type = "vig", "vig", "vig", "vig",
@@ -108,6 +112,7 @@
 		qcom,sde-sspp-qseed-off = <0xa00>;
 		qcom,sde-mixer-linewidth = <2560>;
 		qcom,sde-sspp-linewidth = <2560>;
+		qcom,sde-wb-linewidth = <4096>;
 		qcom,sde-mixer-blendstages = <0xb>;
 		qcom,sde-highest-bank-bit = <0x2>;
 		qcom,sde-panic-per-pipe;
@@ -123,6 +128,8 @@
 		qcom,sde-vbif-size = <0x1040>;
 		qcom,sde-vbif-id = <0>;
 
+		qcom,sde-inline-rotator = <&mdss_rotator 0>;
+
 		qcom,sde-sspp-vig-blocks {
 			qcom,sde-vig-csc-off = <0x1a00>;
 			qcom,sde-vig-qseed-off = <0xa00>;
@@ -200,8 +207,12 @@
 		reg-names = "mdp_phys",
 			"rot_vbif_phys";
 
+		#list-cells = <1>;
+
 		qcom,mdss-rot-mode = <1>;
 		qcom,mdss-highest-bank-bit = <0x2>;
+		qcom,sde-ubwc-malsize = <1>;
+		qcom,sde-ubwc-swizzle = <1>;
 
 		/* Bus Scale Settings */
 		qcom,msm-bus,name = "mdss_rotator";
@@ -234,6 +245,11 @@
 		qcom,mdss-default-ot-rd-limit = <32>;
 		qcom,mdss-default-ot-wr-limit = <32>;
 
+		qcom,mdss-sbuf-headroom = <20>;
+
+		cache-slice-names = "rotator";
+		cache-slices = <&llcc 4>;
+
 		smmu_rot_unsec: qcom,smmu_rot_unsec_cb {
 			compatible = "qcom,smmu_sde_rot_unsec";
 			iommus = <&apps_smmu 0x1090>;
@@ -247,4 +263,157 @@
 			gdsc-mdss-supply = <&hlos1_vote_mmnoc_mmu_tbu_sf_gdsc>;
 		};
 	};
+
+	mdss_dsi0: qcom,mdss_dsi_ctrl0@ae94000 {
+		compatible = "qcom,dsi-ctrl-hw-v2.0";
+		label = "dsi-ctrl-0";
+		status = "disabled";
+		cell-index = <0>;
+		reg =   <0xae94000 0x400>;
+		reg-names = "dsi_ctrl";
+		interrupt-parent = <&mdss_mdp>;
+		interrupts = <4 0>;
+		vdda-1p2-supply = <&pm8998_l26>;
+		vdda-0p9-supply = <&pm8998_l1>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK>,
+		<&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+		<&clock_dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
+		<&clock_dispcc DISP_CC_MDSS_PCLK0_CLK>,
+		<&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clock-names = "byte_clk", "byte_clk_rcg", "byte_intf_clk",
+					"pixel_clk", "pixel_clk_rcg";
+
+		qcom,ctrl-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			qcom,ctrl-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-0p9";
+				qcom,supply-min-voltage = <925000>;
+				qcom,supply-max-voltage = <925000>;
+				qcom,supply-enable-load = <17000>;
+				qcom,supply-disable-load = <32>;
+			};
+
+			qcom,ctrl-supply-entry@1 {
+				reg = <0>;
+				qcom,supply-name = "vdda-1p2";
+				qcom,supply-min-voltage = <1250000>;
+				qcom,supply-max-voltage = <1250000>;
+				qcom,supply-enable-load = <18160>;
+				qcom,supply-disable-load = <1>;
+			};
+		};
+	};
+
+	mdss_dsi1: qcom,mdss_dsi_ctrl1@ae96000 {
+		compatible = "qcom,dsi-ctrl-hw-v2.0";
+		label = "dsi-ctrl-1";
+		status = "disabled";
+		cell-index = <1>;
+		reg =   <0xae96000 0x400>;
+		reg-names = "dsi_ctrl";
+		interrupt-parent = <&mdss_mdp>;
+		interrupts = <5 0>;
+		vdda-1p2-supply = <&pm8998_l26>;
+		vdda-0p9-supply = <&pm8998_l1>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK>,
+		<&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+		<&clock_dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
+		<&clock_dispcc DISP_CC_MDSS_PCLK0_CLK>,
+		<&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clock-names = "byte_clk", "byte_clk_rcg", "byte_intf_clk",
+				"pixel_clk", "pixel_clk_rcg";
+		qcom,ctrl-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,ctrl-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-0p9";
+				qcom,supply-min-voltage = <925000>;
+				qcom,supply-max-voltage = <925000>;
+				qcom,supply-enable-load = <17000>;
+				qcom,supply-disable-load = <32>;
+			};
+
+			qcom,ctrl-supply-entry@1 {
+				reg = <0>;
+				qcom,supply-name = "vdda-1p2";
+				qcom,supply-min-voltage = <1250000>;
+				qcom,supply-max-voltage = <1250000>;
+				qcom,supply-enable-load = <18160>;
+				qcom,supply-disable-load = <1>;
+			};
+		};
+	};
+
+	mdss_dsi_phy0: qcom,mdss_dsi_phy0@ae94400 {
+		compatible = "qcom,dsi-phy-v3.0";
+		status = "disabled";
+		label = "dsi-phy-0";
+		cell-index = <0>;
+		reg = <0xae94400 0x7c0>;
+		reg-names = "dsi_phy";
+		gdsc-supply = <&mdss_core_gdsc>;
+		vdda-1p2-supply = <&pm8998_l26>;
+		qcom,platform-strength-ctrl = [55 03
+						55 03
+						55 03
+						55 03
+						55 00];
+		qcom,platform-lane-config = [00 00 00 00
+						00 00 00 00
+						00 00 00 00
+						00 00 00 00
+						00 00 00 80];
+		qcom,platform-regulator-settings = [1d 1d 1d 1d 1d];
+		qcom,phy-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			qcom,phy-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-1p2";
+				qcom,supply-min-voltage = <1250000>;
+				qcom,supply-max-voltage = <1250000>;
+				qcom,supply-enable-load = <2500>;
+				qcom,supply-disable-load = <1>;
+			};
+		};
+	};
+
+	mdss_dsi_phy1: qcom,mdss_dsi_phy0@ae96400 {
+		compatible = "qcom,dsi-phy-v3.0";
+		status = "disabled";
+		label = "dsi-phy-1";
+		cell-index = <1>;
+		reg = <0xae96400 0x7c0>;
+		reg-names = "dsi_phy";
+		gdsc-supply = <&mdss_core_gdsc>;
+		vdda-1p2-supply = <&pm8998_l26>;
+		qcom,platform-strength-ctrl = [55 03
+						55 03
+						55 03
+						55 03
+						55 00];
+		qcom,platform-regulator-settings = [1d 1d 1d 1d 1d];
+		qcom,platform-lane-config = [00 00 00 00
+						00 00 00 00
+						00 00 00 00
+						00 00 00 00
+						00 00 00 80];
+		qcom,phy-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			qcom,phy-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-1p2";
+				qcom,supply-min-voltage = <1250000>;
+				qcom,supply-max-voltage = <1250000>;
+				qcom,supply-enable-load = <2500>;
+				qcom,supply-disable-load = <1>;
+			};
+		};
+	};
+
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index 06879c2..1c66f89 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -13,17 +13,20 @@
 
 #include <dt-bindings/clock/qcom,gcc-sdm845.h>
 &soc {
-	usb3: ssusb@a600000 {
+	/* Primary USB port related DWC3 controller */
+	usb0: ssusb@a600000 {
 		compatible = "qcom,dwc-usb3-msm";
 		reg = <0x0a600000 0xf8c00>,
 		      <0x088ee000 0x400>;
 		reg-names = "core_base", "ahb2phy_base";
+		iommus = <&apps_smmu 0x740>;
+		qcom,smmu-s1-bypass;
 		#address-cells = <1>;
 		#size-cells = <1>;
 		ranges;
 
-		interrupts = <0 346 0>, <0 130 0>;
-		interrupt-names = "hs_phy_irq", "pwr_event_irq";
+		interrupts = <0 489 0>, <0 130 0>, <0 486 0>;
+		interrupt-names = "hs_phy_irq", "pwr_event_irq", "ss_phy_irq";
 
 		USB3_GDSC-supply = <&usb30_prim_gdsc>;
 		qcom,usb-dbm = <&dbm_1p5>;
@@ -55,12 +58,15 @@
 			interrupts = <0 133 0>;
 			usb-phy = <&qusb_phy0>, <&usb_nop_phy>;
 			tx-fifo-resize;
+			linux,sysdev_is_parent;
 			snps,disable-clk-gating;
 			snps,has-lpm-erratum;
 			snps,hird-threshold = /bits/ 8 <0x10>;
+			maximum-speed = "high-speed";
 		};
 	};
 
+	/* Primary USB port related QUSB2 PHY */
 	qusb_phy0: qusb@88e2000 {
 		compatible = "qcom,qusb2phy-v2";
 		reg = <0x088e2000 0x400>;
@@ -71,21 +77,33 @@
 		vdda33-supply = <&pm8998_l24>;
 		qcom,vdd-voltage-level = <0 880000 880000>;
 		qcom,qusb-phy-init-seq =
-				/* <value reg_offset> */
-					<0x13 0x04
-					0x7c 0x18c
-					0x80 0x2c
-					0x0a 0x184
-					0x00 0x240>;
+			/* <value reg_offset> */
+			   <0x23 0x210 /* PWR_CTRL1 */
+			    0x03 0x04  /* PLL_ANALOG_CONTROLS_TWO */
+			    0x7c 0x18c /* PLL_CLOCK_INVERTERS */
+			    0x80 0x2c  /* PLL_CMODE */
+			    0x0a 0x184 /* PLL_LOCK_DELAY */
+			    0x19 0xb4  /* PLL_DIGITAL_TIMERS_TWO */
+			    0x40 0x194 /* PLL_BIAS_CONTROL_1 */
+			    0x20 0x198 /* PLL_BIAS_CONTROL_2 */
+			    0x21 0x214 /* PWR_CTRL2 */
+			    0x00 0x220 /* IMP_CTRL1 */
+			    0x58 0x224 /* IMP_CTRL2 */
+			    0x32 0x240 /* TUNE1 */
+			    0x29 0x244 /* TUNE2 */
+			    0xca 0x248 /* TUNE3 */
+			    0x04 0x24c /* TUNE4 */
+			    0x00 0x250 /* TUNE5 */
+			    0x00 0x23c /* CHG_CTRL2 */
+			    0x22 0x210>; /* PWR_CTRL1 */
+
 		phy_type= "utmi";
-		clocks = <&clock_gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>,
-			 <&clock_gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
+		clocks = <&clock_rpmh RPMH_CXO_CLK>,
 			 <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
-		clock-names = "ref_clk_src", "ref_clk", "cfg_ahb_clk";
+		clock-names = "ref_clk_src", "cfg_ahb_clk";
 
-		resets = <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_BCR>;
+		resets = <&clock_gcc GCC_QUSB2PHY_PRIM_BCR>;
 		reset-names = "phy_reset";
-
 	};
 
 	dbm_1p5: dbm@a8f8000 {
@@ -97,4 +115,230 @@
 	usb_nop_phy: usb_nop_phy {
 		compatible = "usb-nop-xceiv";
 	};
+
+	/* Secondary USB port related DWC3 controller */
+	usb1: ssusb@a800000 {
+		compatible = "qcom,dwc-usb3-msm";
+		reg = <0x0a800000 0xf8c00>,
+		      <0x088ee000 0x400>;
+		reg-names = "core_base", "ahb2phy_base";
+		iommus = <&apps_smmu 0x760>;
+		qcom,smmu-s1-bypass;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		interrupts = <0 491 0>, <0 135 0>, <0 487 0>;
+		interrupt-names = "hs_phy_irq", "pwr_event_irq", "ss_phy_irq";
+
+		USB3_GDSC-supply = <&usb30_sec_gdsc>;
+		qcom,dwc-usb3-msm-tx-fifo-size = <21288>;
+
+		clocks = <&clock_gcc GCC_USB30_SEC_MASTER_CLK>,
+			 <&clock_gcc GCC_CFG_NOC_USB3_SEC_AXI_CLK>,
+			 <&clock_gcc GCC_AGGRE_USB3_SEC_AXI_CLK>,
+			 <&clock_gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
+			 <&clock_gcc GCC_USB30_SEC_SLEEP_CLK>,
+			 <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+			 <&clock_gcc GCC_USB3_SEC_CLKREF_CLK>;
+
+		clock-names = "core_clk", "iface_clk", "bus_aggr_clk",
+				"utmi_clk", "sleep_clk", "cfg_ahb_clk", "xo";
+
+		qcom,core-clk-rate = <133333333>;
+		qcom,core-clk-rate-hs = <66666667>;
+
+		resets = <&clock_gcc GCC_USB30_SEC_BCR>;
+		reset-names = "core_reset";
+		status = "disabled";
+
+		dwc3@a600000 {
+			compatible = "snps,dwc3";
+			reg = <0x0a800000 0xcd00>;
+			interrupt-parent = <&intc>;
+			interrupts = <0 138 0>;
+			usb-phy = <&qusb_phy1>, <&usb_qmp_phy>;
+			tx-fifo-resize;
+			linux,sysdev_is_parent;
+			snps,disable-clk-gating;
+			snps,has-lpm-erratum;
+			snps,hird-threshold = /bits/ 8 <0x10>;
+		};
+	};
+
+	/* Secondary USB port related QUSB2 PHY */
+	qusb_phy1: qusb@88e3000 {
+		compatible = "qcom,qusb2phy-v2";
+		reg = <0x088e3000 0x400>;
+		reg-names = "qusb_phy_base";
+
+		vdd-supply = <&pm8998_l1>;
+		vdda18-supply = <&pm8998_l12>;
+		vdda33-supply = <&pm8998_l24>;
+		qcom,vdd-voltage-level = <0 880000 880000>;
+		qcom,qusb-phy-init-seq =
+			/* <value reg_offset> */
+			   <0x23 0x210 /* PWR_CTRL1 */
+			   0x03 0x04  /* PLL_ANALOG_CONTROLS_TWO */
+			   0x7c 0x18c /* PLL_CLOCK_INVERTERS */
+			   0x80 0x2c  /* PLL_CMODE */
+			   0x0a 0x184 /* PLL_LOCK_DELAY */
+			   0x19 0xb4  /* PLL_DIGITAL_TIMERS_TWO */
+			   0x40 0x194 /* PLL_BIAS_CONTROL_1 */
+			   0x20 0x198 /* PLL_BIAS_CONTROL_2 */
+			   0x21 0x214 /* PWR_CTRL2 */
+			   0x00 0x220 /* IMP_CTRL1 */
+			   0x58 0x224 /* IMP_CTRL2 */
+			   0x32 0x240 /* TUNE1 */
+			   0x29 0x244 /* TUNE2 */
+			   0xca 0x248 /* TUNE3 */
+			   0x04 0x24c /* TUNE4 */
+			   0x00 0x250 /* TUNE5 */
+			   0x00 0x23c /* CHG_CTRL2 */
+			   0x22 0x210>; /* PWR_CTRL1 */
+
+		phy_type= "utmi";
+		clocks = <&clock_rpmh RPMH_CXO_CLK>,
+			 <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+		clock-names = "ref_clk_src", "cfg_ahb_clk";
+
+		resets = <&clock_gcc GCC_QUSB2PHY_SEC_BCR>;
+		reset-names = "phy_reset";
+		status = "disabled";
+	};
+
+	/* Secondary USB port related QMP PHY */
+	usb_qmp_phy: ssphy@88eb000 {
+		compatible = "qcom,usb-ssphy-qmp-v2";
+		reg = <0x88eb000 0x1000>,
+			<0x01fcbff0 0x4>;
+		reg-names = "qmp_phy_base",
+			    "vls_clamp_reg";
+
+		vdd-supply = <&pm8998_l1>;
+		core-supply = <&pm8998_l26>;
+		qcom,vdd-voltage-level = <0 880000 880000>;
+		qcom,vbus-valid-override;
+		qcom,qmp-phy-init-seq =
+		/* <reg_offset, value, delay> */
+			<0x048 0x07 0x00 /* QSERDES_COM_PLL_IVCO */
+			 0x080 0x14 0x00 /* QSERDES_COM_SYSCLK_EN_SEL */
+			 0x034 0x04 0x00 /* QSERDES_COM_BIAS_EN_CLKBUFLR_EN */
+			 0x138 0x30 0x00 /* QSERDES_COM_CLK_SELECT */
+			 0x03c 0x02 0x00 /* QSERDES_COM_SYS_CLK_CTRL */
+			 0x08c 0x08 0x00 /* QSERDES_COM_RESETSM_CNTRL2 */
+			 0x15c 0x06 0x00 /* QSERDES_COM_CMN_CONFIG */
+			 0x164 0x01 0x00 /* QSERDES_COM_SVS_MODE_CLK_SEL */
+			 0x13c 0x80 0x00 /* QSERDES_COM_HSCLK_SEL */
+			 0x0b0 0x82 0x00 /* QSERDES_COM_DEC_START_MODE0 */
+			 0x0b8 0xab 0x00 /* QSERDES_COM_DIV_FRAC_START1_MODE0 */
+			 0x0bc 0xea 0x00 /* QSERDES_COM_DIV_FRAC_START2_MODE0 */
+			 0x0c0 0x02 0x00 /* QSERDES_COM_DIV_FRAC_START3_MODE0 */
+			 0x060 0x06 0x00 /* QSERDES_COM_CP_CTRL_MODE0 */
+			 0x068 0x16 0x00 /* QSERDES_COM_PLL_RCTRL_MODE0 */
+			 0x070 0x36 0x00 /* QSERDES_COM_PLL_CCTRL_MODE0 */
+			 0x0dc 0x00 0x00 /* QSERDES_COM_INTEGLOOP_GAIN1_MODE0 */
+			 0x0d8 0x3f 0x00 /* QSERDES_COM_INTEGLOOP_GAIN0_MODE0 */
+			 0x0f8 0x01 0x00 /* QSERDES_COM_VCO_TUNE2_MODE0 */
+			 0x0f4 0xc9 0x00 /* QSERDES_COM_VCO_TUNE1_MODE0 */
+			 0x148 0x0a 0x00 /* QSERDES_COM_CORECLK_DIV_MODE0 */
+			 0x0a0 0x00 0x00 /* QSERDES_COM_LOCK_CMP3_MODE0 */
+			 0x09c 0x34 0x00 /* QSERDES_COM_LOCK_CMP2_MODE0 */
+			 0x098 0x15 0x00 /* QSERDES_COM_LOCK_CMP1_MODE0 */
+			 0x090 0x04 0x00 /* QSERDES_COM_LOCK_CMP_EN */
+			 0x154 0x00 0x00 /* QSERDES_COM_CORE_CLK_EN */
+			 0x094 0x00 0x00 /* QSERDES_COM_LOCK_CMP_CFG */
+			 0x0f0 0x00 0x00 /* QSERDES_COM_VCO_TUNE_MAP */
+			 0x040 0x0a 0x00 /* QSERDES_COM_SYSCLK_BUF_ENABLE */
+			 0x0d0 0x80 0x00 /* QSERDES_COM_INTEGLOOP_INITVAL */
+			 0x010 0x01 0x00 /* QSERDES_COM_SSC_EN_CENTER */
+			 0x01c 0x31 0x00 /* QSERDES_COM_SSC_PER1 */
+			 0x020 0x01 0x00 /* QSERDES_COM_SSC_PER2 */
+			 0x014 0x00 0x00 /* QSERDES_COM_SSC_ADJ_PER1 */
+			 0x018 0x00 0x00 /* QSERDES_COM_SSC_ADJ_PER2 */
+			 0x024 0x85 0x00 /* QSERDES_COM_SSC_STEP_SIZE1 */
+			 0x028 0x07 0x00 /* QSERDES_COM_SSC_STEP_SIZE2 */
+			 0x4c0 0x0c 0x00 /* QSERDES_RX_VGA_CAL_CNTRL2 */
+			 0x564 0x50 0x00 /* QSERDES_RX_RX_MODE_00 */
+			 0x430 0x0b 0x00 /* QSERDES_RX_UCDR_FASTLOCK_FO_GAIN */
+			 0x4d4 0x0e 0x00 /* QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 */
+			 0x4d8 0x4e 0x00 /* QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3 */
+			 0x4dc 0x18 0x00 /* QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4 */
+			 0x4f8 0x77 0x00 /* RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 */
+			 0x4fc 0x80 0x00 /* RX_RX_OFFSET_ADAPTOR_CNTRL2 */
+			 0x504 0x03 0x00 /* QSERDES_RX_SIGDET_CNTRL */
+			 0x50c 0x1c 0x00 /* QSERDES_RX_SIGDET_DEGLITCH_CNTRL */
+			 0x434 0x75 0x00 /* RX_UCDR_SO_SATURATION_AND_ENABLE */
+			 0x444 0x80 0x00 /* QSERDES_RX_UCDR_PI_CONTROLS */
+			 0x408 0x0a 0x00 /* QSERDES_RX_UCDR_FO_GAIN */
+			 0x40c 0x06 0x00 /* QSERDES_RX_UCDR_SO_GAIN */
+			 0x500 0x00 0x00 /* QSERDES_RX_SIGDET_ENABLES */
+			 0x260 0x10 0x00 /* QSERDES_TX_HIGHZ_DRVR_EN */
+			 0x2a4 0x12 0x00 /* QSERDES_TX_RCV_DETECT_LVL_2 */
+			 0x28c 0xc6 0x00 /* QSERDES_TX_LANE_MODE_1 */
+			 0x248 0x09 0x00 /* TX_RES_CODE_LANE_OFFSET_RX */
+			 0x244 0x0d 0x00 /* TX_RES_CODE_LANE_OFFSET_TX */
+			 0x8c8 0x83 0x00 /* USB3_UNI_PCS_FLL_CNTRL2 */
+			 0x8cc 0x09 0x00 /* USB3_UNI_PCS_FLL_CNT_VAL_L */
+			 0x8d0 0xa2 0x00 /* USB3_UNI_PCS_FLL_CNT_VAL_H_TOL */
+			 0x8d4 0x40 0x00 /* USB3_UNI_PCS_FLL_MAN_CODE */
+			 0x8c4 0x02 0x00 /* USB3_UNI_PCS_FLL_CNTRL1 */
+			 0x864 0x1b 0x00 /* USB3_UNI_PCS_POWER_STATE_CONFIG2 */
+			 0x80c 0x9f 0x00 /* USB3_UNI_PCS_TXMGN_V0 */
+			 0x810 0x9f 0x00 /* USB3_UNI_PCS_TXMGN_V1 */
+			 0x814 0xb5 0x00 /* USB3_UNI_PCS_TXMGN_V2 */
+			 0x818 0x4c 0x00 /* USB3_UNI_PCS_TXMGN_V3 */
+			 0x81c 0x64 0x00 /* USB3_UNI_PCS_TXMGN_V4 */
+			 0x820 0x6a 0x00 /* USB3_UNI_PCS_TXMGN_LS */
+			 0x824 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_V0 */
+			 0x828 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_V0 */
+			 0x82c 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_V1 */
+			 0x830 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_V1 */
+			 0x834 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_V2 */
+			 0x838 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_V2 */
+			 0x83c 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_V3 */
+			 0x840 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_V3 */
+			 0x844 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_V4 */
+			 0x848 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_V4 */
+			 0x84c 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_LS */
+			 0x850 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_LS */
+			 0x85c 0x02 0x00 /* USB3_UNI_PCS_RATE_SLEW_CNTRL */
+			 0x8a0 0x04 0x00 /* PCS_PWRUP_RESET_DLY_TIME_AUXCLK */
+			 0x88c 0x44 0x00 /* USB3_UNI_PCS_TSYNC_RSYNC_TIME */
+			 0x880 0xd1 0x00 /* USB3_UNI_PCS_LOCK_DETECT_CONFIG1 */
+			 0x884 0x1f 0x00 /* USB3_UNI_PCS_LOCK_DETECT_CONFIG2 */
+			 0x888 0x47 0x00 /* USB3_UNI_PCS_LOCK_DETECT_CONFIG3 */
+			 0x870 0xe7 0x00 /* USB3_UNI_PCS_RCVR_DTCT_DLY_P1U2_L */
+			 0x874 0x03 0x00 /* USB3_UNI_PCS_RCVR_DTCT_DLY_P1U2_H */
+			 0x878 0x40 0x00 /* USB3_UNI_PCS_RCVR_DTCT_DLY_U3_L */
+			 0x87c 0x00 0x00 /* USB3_UNI_PCS_RCVR_DTCT_DLY_U3_H */
+			 0x9d8 0xba 0x00 /* USB3_UNI_PCS_RX_SIGDET_LVL */
+			 0x8b8 0x75 0x00 /* RXEQTRAINING_WAIT_TIME */
+			 0x8b0 0x86 0x00 /* PCS_LFPS_TX_ECSTART_EQTLOCK */
+			 0x8bc 0x13 0x00 /* PCS_RXEQTRAINING_RUN_TIME */
+			 0xa0c 0x21 0x00 /* USB3_UNI_PCS_REFGEN_REQ_CONFIG1 */
+			 0xa10 0x60 0x00 /* USB3_UNI_PCS_REFGEN_REQ_CONFIG2 */
+			 0xffffffff 0xffffffff 0x00>;
+
+		qcom,qmp-phy-reg-offset =
+				<0x974 /* USB3_UNI_PCS_PCS_STATUS */
+				 0x8d8 /* USB3_UNI_PCS_AUTONOMOUS_MODE_CTRL */
+				 0x8dc /* USB3_UNI_PCS_LFPS_RXTERM_IRQ_CLEAR */
+				 0x804 /* USB3_UNI_PCS_POWER_DOWN_CONTROL */
+				 0x800 /* USB3_UNI_PCS_SW_RESET */
+				 0x808>; /* USB3_UNI_PCS_START_CONTROL */
+
+		clocks = <&clock_gcc GCC_USB3_SEC_PHY_AUX_CLK>,
+			 <&clock_gcc GCC_USB3_SEC_PHY_PIPE_CLK>,
+			 <&clock_rpmh RPMH_CXO_CLK>,
+			 <&clock_gcc GCC_USB3_SEC_CLKREF_CLK>;
+
+		clock-names = "aux_clk", "pipe_clk", "ref_clk_src",
+				"ref_clk";
+
+		resets = <&clock_gcc GCC_USB3_PHY_SEC_BCR>,
+			<&clock_gcc GCC_USB3PHY_PHY_SEC_BCR>;
+		reset-names = "phy_reset", "phy_phy_reset";
+		status = "disabled";
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
index a2dd422..af88108 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
@@ -16,120 +16,120 @@
 
 &soc {
 	msm_vidc: qcom,vidc@aa00000 {
-		  compatible = "qcom,msm-vidc";
-		  status = "disabled";
-		  reg = <0xaa00000 0x200000>;
-		  interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
-		  qcom,hfi = "venus";
-		  qcom,firmware-name = "venus";
-		  qcom,max-secure-instances = <5>;
-		  qcom,max-hw-load = <2563200>; /* Full 4k @ 60 + 1080p @ 60 */
+		compatible = "qcom,msm-vidc";
+		status = "disabled";
+		reg = <0xaa00000 0x200000>;
+		interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,hfi = "venus";
+		qcom,firmware-name = "venus";
+		qcom,max-secure-instances = <5>;
+		qcom,max-hw-load = <2563200>; /* Full 4k @ 60 + 1080p @ 60 */
 
-		  /* Supply */
-		  venus-supply = <&venus_gdsc>;
-		  venus-core0-supply = <&vcodec0_gdsc>;
-		  venus-core1-supply = <&vcodec1_gdsc>;
+		/* Supply */
+		venus-supply = <&venus_gdsc>;
+		venus-core0-supply = <&vcodec0_gdsc>;
+		venus-core1-supply = <&vcodec1_gdsc>;
 
-		  /* Clocks */
-		  clock-names = "core_clk", "iface_clk", "bus_clk",
-			  "core0_clk", "core1_clk";
-		  clocks = <&clock_videocc VIDEO_CC_VENUS_CTL_CORE_CLK>,
-			 <&clock_videocc VIDEO_CC_VENUS_AHB_CLK>,
-			 <&clock_videocc VIDEO_CC_VENUS_CTL_AXI_CLK>,
-			 <&clock_videocc VIDEO_CC_VCODEC0_CORE_CLK>,
-			 <&clock_videocc VIDEO_CC_VCODEC1_CORE_CLK>;
-		  qcom,proxy-clock-names = "core_clk", "iface_clk",
-			  "bus_clk", "core0_clk", "core1_clk";
-		  qcom,clock-configs = <0x0 0x0 0x0 0x0 0x0>;
+		/* Clocks */
+		clock-names = "core_clk", "iface_clk", "bus_clk",
+			"core0_clk", "core1_clk";
+		clocks = <&clock_videocc VIDEO_CC_VENUS_CTL_CORE_CLK>,
+			<&clock_videocc VIDEO_CC_VENUS_AHB_CLK>,
+			<&clock_videocc VIDEO_CC_VENUS_CTL_AXI_CLK>,
+			<&clock_videocc VIDEO_CC_VCODEC0_CORE_CLK>,
+			<&clock_videocc VIDEO_CC_VCODEC1_CORE_CLK>;
+		qcom,proxy-clock-names = "core_clk", "iface_clk",
+			"bus_clk", "core0_clk", "core1_clk";
+		qcom,clock-configs = <0x0 0x0 0x0 0x0 0x0>;
+		qcom,allowed-clock-rates = <200000000 320000000 380000000
+			444000000 533000000>;
+		qcom,clock-freq-tbl {
+			qcom,profile-enc {
+				qcom,codec-mask = <0x55555555>;
+				qcom,vpp-cycles-per-mb = <675>;
+				qcom,vsp-cycles-per-mb = <125>;
+				qcom,low-power-cycles-per-mb = <320>;
+			};
+			qcom,profile-dec {
+				qcom,codec-mask = <0xffffffff>;
+				qcom,vpp-cycles-per-mb = <200>;
+				qcom,vsp-cycles-per-mb = <50>;
+			};
+		};
 
-		  qcom,load-freq-tbl =
-			  /* Encoders */
-			  <1944000 444000000 0x55555555>, /* 4k UHD @ 60 */
-			  < 244800 200000000 0x55555555>, /* 720p @ 30 */
-			  /* Decoders */
-			  <1944000 444000000 0xffffffff>, /* 4k UHD @ 60 */
-			  < 244800 200000000 0xffffffff>; /* 1080p @ 30 */
+		/* Buses */
+		bus_cnoc {
+			compatible = "qcom,msm-vidc,bus";
+			label = "cnoc";
+			qcom,bus-master = <MSM_BUS_MASTER_AMPSS_M0>;
+			qcom,bus-slave = <MSM_BUS_SLAVE_VENUS_CFG>;
+			qcom,bus-governor = "performance";
+			qcom,bus-range-kbps = <1000 1000>;
+		};
 
-		  /* Buses */
-		  bus_cnoc {
-			  compatible = "qcom,msm-vidc,bus";
-			  label = "cnoc";
-			  qcom,bus-master = <MSM_BUS_MASTER_AMPSS_M0>;
-			  qcom,bus-slave = <MSM_BUS_SLAVE_VENUS_CFG>;
-			  qcom,bus-governor = "performance";
-			  qcom,bus-range-kbps = <1000 1000>;
-		  };
-		  venus_bus_ddr {
-			  compatible = "qcom,msm-vidc,bus";
-			  label = "venus-ddr";
-			  qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
-			  qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
-			  qcom,bus-governor = "performance";
-			  qcom,bus-range-kbps = <1000 3388000>;
-		  };
-		  arm9_bus_ddr {
-			  compatible = "qcom,msm-vidc,bus";
-			  label = "venus-arm9-ddr";
-			  qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
-			  qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
-			  qcom,bus-governor = "performance";
-			  qcom,bus-range-kbps = <1000 1000>;
-		  };
+		venus_bus_ddr {
+			compatible = "qcom,msm-vidc,bus";
+			label = "venus-ddr";
+			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
+			qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
+			qcom,bus-governor = "performance";
+			qcom,bus-range-kbps = <1000 3388000>;
+		};
+		arm9_bus_ddr {
+			compatible = "qcom,msm-vidc,bus";
+			label = "venus-arm9-ddr";
+			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
+			qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
+			qcom,bus-governor = "performance";
+			qcom,bus-range-kbps = <1000 1000>;
+		};
 
-		  /* MMUs */
-		  non_secure_cb {
-			  compatible = "qcom,msm-vidc,context-bank";
-			  label = "venus_ns";
-			  iommus =
-				  <&apps_smmu 0x10a0>,
-				  <&apps_smmu 0x10a8>,
-				  <&apps_smmu 0x10b0>;
-			  buffer-types = <0xfff>;
-			  virtual-addr-pool = <0x70800000 0x6f800000>;
-		  };
+		/* MMUs */
+		non_secure_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_ns";
+			iommus =
+				<&apps_smmu 0x10a0>,
+				<&apps_smmu 0x10a8>,
+				<&apps_smmu 0x10b0>;
+			buffer-types = <0xfff>;
+			virtual-addr-pool = <0x70800000 0x6f800000>;
+		};
 
-		  firmware_cb {
-			  compatible = "qcom,msm-vidc,context-bank";
-			  qcom,fw-context-bank;
-			  iommus =
-				  <&apps_smmu 0x10b2>;
-		  };
+		secure_bitstream_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_sec_bitstream";
+			iommus =
+				<&apps_smmu 0x10a1>,
+				<&apps_smmu 0x10a9>,
+				<&apps_smmu 0x10a5>,
+				<&apps_smmu 0x10ad>;
+			buffer-types = <0x241>;
+			virtual-addr-pool = <0x4b000000 0x25800000>;
+			qcom,secure-context-bank;
+		};
 
-		  secure_bitstream_cb {
-			  compatible = "qcom,msm-vidc,context-bank";
-			  label = "venus_sec_bitstream";
-			  iommus =
-				  <&apps_smmu 0x10a1>,
-				  <&apps_smmu 0x10a9>,
-				  <&apps_smmu 0x10a5>,
-				  <&apps_smmu 0x10ad>;
-			  buffer-types = <0x241>;
-			  virtual-addr-pool = <0x4b000000 0x25800000>;
-			  qcom,secure-context-bank;
-		  };
+		secure_pixel_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_sec_pixel";
+			iommus =
+				<&apps_smmu 0x10a3>,
+				<&apps_smmu 0x10ab>;
+			buffer-types = <0x106>;
+			virtual-addr-pool = <0x25800000 0x25800000>;
+			qcom,secure-context-bank;
+		};
 
-		  secure_pixel_cb {
-			  compatible = "qcom,msm-vidc,context-bank";
-			  label = "venus_sec_pixel";
-			  iommus =
-				  <&apps_smmu 0x10a3>,
-				  <&apps_smmu 0x10ab>;
-			  buffer-types = <0x106>;
-			  virtual-addr-pool = <0x25800000 0x25800000>;
-			  qcom,secure-context-bank;
-		  };
-
-		  secure_non_pixel_cb {
-			  compatible = "qcom,msm-vidc,context-bank";
-			  label = "venus_sec_non_pixel";
-			  iommus =
-				  <&apps_smmu 0x10a4>,
-				  <&apps_smmu 0x10ac>,
-				  <&apps_smmu 0x10b4>;
-			  buffer-types = <0x480>;
-			  virtual-addr-pool = <0x1000000 0x24800000>;
-			  qcom,secure-context-bank;
-		  };
-
-	  };
+		secure_non_pixel_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_sec_non_pixel";
+			iommus =
+				<&apps_smmu 0x10a4>,
+				<&apps_smmu 0x10ac>,
+				<&apps_smmu 0x10b4>;
+			buffer-types = <0x480>;
+			virtual-addr-pool = <0x1000000 0x24800000>;
+			qcom,secure-context-bank;
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index eb2c066..f591cca 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -42,6 +42,7 @@
 			compatible = "arm,armv8";
 			reg = <0x0 0x0>;
 			enable-method = "psci";
+			efficiency = <1024>;
 			cache-size = <0x8000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_0>;
@@ -67,169 +68,176 @@
 			};
 		};
 
-		CPU1: cpu@1 {
+		CPU1: cpu@100 {
 			device_type = "cpu";
 			compatible = "arm,armv8";
 			reg = <0x0 0x100>;
 			enable-method = "psci";
+			efficiency = <1024>;
 			cache-size = <0x8000>;
 			cpu-release-addr = <0x0 0x90000000>;
-			next-level-cache = <&L2_1>;
-			L2_1: l2-cache {
+			next-level-cache = <&L2_100>;
+			L2_100: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x20000>;
 			      cache-level = <2>;
 			      next-level-cache = <&L3_0>;
 			};
-			L1_I_1: l1-icache {
-				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x9000>;
-			};
-			L1_D_1: l1-dcache {
-				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x9000>;
-			};
-		};
-
-		CPU2: cpu@2 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x200>;
-			enable-method = "psci";
-			cache-size = <0x8000>;
-			cpu-release-addr = <0x0 0x90000000>;
-			next-level-cache = <&L2_2>;
-			L2_2: l2-cache {
-			      compatible = "arm,arch-cache";
-			      cache-size = <0x20000>;
-			      cache-level = <2>;
-			      next-level-cache = <&L3_0>;
-			};
-			L1_I_2: l1-icache {
-				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x9000>;
-			};
-			L1_D_2: l1-dcache {
-				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x9000>;
-			};
-		};
-
-		CPU3: cpu@3 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x300>;
-			enable-method = "psci";
-			cache-size = <0x8000>;
-			cpu-release-addr = <0x0 0x90000000>;
-			next-level-cache = <&L2_3>;
-			L2_3: l2-cache {
-			      compatible = "arm,arch-cache";
-			      cache-size = <0x20000>;
-			      cache-level = <2>;
-			      next-level-cache = <&L3_0>;
-			};
-			L1_I_3: l1-icache {
-				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x9000>;
-			};
-			L1_D_3: l1-dcache {
-				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x9000>;
-			};
-		};
-
-		CPU4: cpu@100 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x400>;
-			enable-method = "psci";
-			cache-size = <0x20000>;
-			cpu-release-addr = <0x0 0x90000000>;
-			next-level-cache = <&L2_4>;
-			L2_4: l2-cache {
-			      compatible = "arm,arch-cache";
-			      cache-size = <0x40000>;
-			      cache-level = <2>;
-			      next-level-cache = <&L3_0>;
-			};
 			L1_I_100: l1-icache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x12000>;
+				qcom,dump-size = <0x9000>;
 			};
 			L1_D_100: l1-dcache {
 				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+		};
+
+		CPU2: cpu@200 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x200>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			cache-size = <0x8000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_200>;
+			L2_200: l2-cache {
+			      compatible = "arm,arch-cache";
+			      cache-size = <0x20000>;
+			      cache-level = <2>;
+			      next-level-cache = <&L3_0>;
+			};
+			L1_I_200: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+			L1_D_200: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+		};
+
+		CPU3: cpu@300 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x300>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			cache-size = <0x8000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_300>;
+			L2_300: l2-cache {
+			      compatible = "arm,arch-cache";
+			      cache-size = <0x20000>;
+			      cache-level = <2>;
+			      next-level-cache = <&L3_0>;
+			};
+			L1_I_300: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+			L1_D_300: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+		};
+
+		CPU4: cpu@400 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x400>;
+			enable-method = "psci";
+			efficiency = <1740>;
+			cache-size = <0x20000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_400>;
+			L2_400: l2-cache {
+			      compatible = "arm,arch-cache";
+			      cache-size = <0x40000>;
+			      cache-level = <2>;
+			      next-level-cache = <&L3_0>;
+			};
+			L1_I_400: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+			L1_D_400: l1-dcache {
+				compatible = "arm,arch-cache";
 				qcom,dump-size = <0x12000>;
 			};
 		};
 
-		CPU5: cpu@101 {
+		CPU5: cpu@500 {
 			device_type = "cpu";
 			compatible = "arm,armv8";
 			reg = <0x0 0x500>;
 			enable-method = "psci";
+			efficiency = <1740>;
 			cache-size = <0x20000>;
 			cpu-release-addr = <0x0 0x90000000>;
-			next-level-cache = <&L2_5>;
-			L2_5: l2-cache {
+			next-level-cache = <&L2_500>;
+			L2_500: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x40000>;
 			      cache-level = <2>;
 			      next-level-cache = <&L3_0>;
 			};
-			L1_I_101: l1-icache {
+			L1_I_500: l1-icache {
 				compatible = "arm,arch-cache";
 				qcom,dump-size = <0x12000>;
 			};
-			L1_D_101: l1-dcache {
+			L1_D_500: l1-dcache {
 				compatible = "arm,arch-cache";
 				qcom,dump-size = <0x12000>;
 			};
 		};
 
-		CPU6: cpu@102 {
+		CPU6: cpu@600 {
 			device_type = "cpu";
 			compatible = "arm,armv8";
 			reg = <0x0 0x600>;
 			enable-method = "psci";
+			efficiency = <1740>;
 			cache-size = <0x20000>;
 			cpu-release-addr = <0x0 0x90000000>;
-			next-level-cache = <&L2_6>;
-			L2_6: l2-cache {
+			next-level-cache = <&L2_600>;
+			L2_600: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x40000>;
 			      cache-level = <2>;
 			      next-level-cache = <&L3_0>;
 			};
-			L1_I_102: l1-icache {
+			L1_I_600: l1-icache {
 				compatible = "arm,arch-cache";
 				qcom,dump-size = <0x12000>;
 			};
-			L1_D_102: l1-dcache {
+			L1_D_600: l1-dcache {
 				compatible = "arm,arch-cache";
 				qcom,dump-size = <0x12000>;
 			};
 		};
 
-		CPU7: cpu@103 {
+		CPU7: cpu@700 {
 			device_type = "cpu";
 			compatible = "arm,armv8";
 			reg = <0x0 0x700>;
 			enable-method = "psci";
+			efficiency = <1740>;
 			cache-size = <0x20000>;
 			cpu-release-addr = <0x0 0x90000000>;
-			next-level-cache = <&L2_7>;
-			L2_7: l2-cache {
+			next-level-cache = <&L2_700>;
+			L2_700: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x40000>;
 			      cache-level = <2>;
 			      next-level-cache = <&L3_0>;
 			};
-			L1_I_103: l1-icache {
+			L1_I_700: l1-icache {
 				compatible = "arm,arch-cache";
 				qcom,dump-size = <0x12000>;
 			};
-			L1_D_103: l1-dcache {
+			L1_D_700: l1-dcache {
 				compatible = "arm,arch-cache";
 				qcom,dump-size = <0x12000>;
 			};
@@ -300,37 +308,61 @@
 		pil_modem_mem: modem_region@8b000000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x8b000000 0 0x6e00000>;
+			reg = <0 0x8b000000 0 0x7300000>;
 		};
 
-		pil_video_mem: pil_video_region@91e00000 {
+		pil_video_mem: pil_video_region@92300000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x91e00000 0 0x500000>;
+			reg = <0 0x92300000 0 0x500000>;
 		};
 
-		pil_cdsp_mem: cdsp_regions@92300000 {
+		pil_cdsp_mem: cdsp_regions@92800000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x92300000 0 0x800000>;
+			reg = <0 0x92800000 0 0x800000>;
 		};
 
-		pil_adsp_mem: pil_adsp_region@92b00000 {
+		pil_adsp_mem: pil_adsp_region@93000000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x92b00000 0 0x1a00000>;
+			reg = <0 0x93000000 0 0x1a00000>;
 		};
 
-		pil_slpi_mem: pil_slpi_region@94500000 {
+		pil_mba_mem: pil_mba_region@0x94a00000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x94500000 0 0xf00000>;
+			reg = <0 0x94a00000 0 0x200000>;
 		};
 
-		pil_spss_mem: spss_region@95400000 {
+		pil_slpi_mem: pil_slpi_region@94c00000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x95400000 0 0x700000>;
+			reg = <0 0x94c00000 0 0x1400000>;
+		};
+
+		pil_ipa_fw_mem: pil_ipa_fw_region@96000000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x96000000 0 0x10000>;
+		};
+
+		pil_ipa_gsi_mem: pil_ipa_gsi_region@96010000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x96010000 0 0x5000>;
+		};
+
+		pil_gpu_mem: pil_gpu_region@96015000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x96015000 0 0x1000>;
+		};
+
+		pil_spss_mem: spss_region@96100000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x96100000 0 0x100000>;
 		};
 
 		adsp_mem: adsp_region {
@@ -338,7 +370,7 @@
 			alloc-ranges = <0 0x00000000 0 0xffffffff>;
 			reusable;
 			alignment = <0 0x400000>;
-			size = <0 0x800000>;
+			size = <0 0xc00000>;
 		};
 
 		qseecom_mem: qseecom_region {
@@ -380,6 +412,7 @@
 #include "msm-gdsc-sdm845.dtsi"
 #include "sdm845-sde.dtsi"
 #include "sdm845-sde-display.dtsi"
+#include "sdm845-qupv3.dtsi"
 
 &soc {
 	#address-cells = <1>;
@@ -492,8 +525,220 @@
 		cell-index = <0>;
 	};
 
+	msm_cpufreq: qcom,msm-cpufreq {
+		compatible = "qcom,msm-cpufreq";
+		clock-names = "cpu0_clk", "cpu4_clk";
+		clocks = <&clock_cpucc CPU0_PWRCL_CLK>,
+			 <&clock_cpucc CPU4_PERFCL_CLK>;
+
+		qcom,governor-per-policy;
+
+		qcom,cpufreq-table-0 =
+			<  300000 >,
+			<  422400 >,
+			<  499200 >,
+			<  576000 >,
+			<  652800 >,
+			<  748800 >,
+			<  825600 >,
+			<  902400 >,
+			<  979200 >,
+			< 1056000 >,
+			< 1132800 >,
+			< 1209600 >,
+			< 1286400 >,
+			< 1363200 >,
+			< 1440000 >,
+			< 1516800 >,
+			< 1593600 >;
+
+		qcom,cpufreq-table-4 =
+			<  300000 >,
+			<  422400 >,
+			<  499200 >,
+			<  576000 >,
+			<  652800 >,
+			<  729600 >,
+			<  806400 >,
+			<  883200 >,
+			<  960000 >,
+			< 1036800 >,
+			< 1113600 >,
+			< 1190400 >,
+			< 1267200 >,
+			< 1344000 >,
+			< 1420800 >,
+			< 1497600 >,
+			< 1574400 >,
+			< 1651200 >,
+			< 1728000 >,
+			< 1804800 >,
+			< 1881600 >,
+			< 1958400 >;
+	};
+
+	cpubw: qcom,cpubw {
+		compatible = "qcom,devbw";
+		governor = "performance";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			<  762 /*  200 MHz */ >,
+			< 1144 /*  300 MHz */ >,
+			< 1720 /*  451 MHz */ >,
+			< 2086 /*  547 MHz */ >,
+			< 2597 /*  681 MHz */ >,
+			< 2929 /*  768 MHz */ >,
+			< 3879 /* 1017 MHz */ >,
+			< 4943 /* 1296 MHz */ >,
+			< 5931 /* 1555 MHz */ >,
+			< 6881 /* 1804 MHz */ >;
+	};
+
+	bwmon: qcom,cpu-bwmon {
+		compatible = "qcom,bimc-bwmon4";
+		reg = <0x1436400 0x300>, <0x1436300 0x200>;
+		reg-names = "base", "global_base";
+		interrupts = <0 581 4>;
+		qcom,mport = <0>;
+		qcom,hw-timer-hz = <19200000>;
+		qcom,target-dev = <&cpubw>;
+	};
+
+	memlat_cpu0: qcom,memlat-cpu0 {
+		compatible = "qcom,devbw";
+		governor = "powersave";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			<  762 /*  200 MHz */ >,
+			< 1144 /*  300 MHz */ >,
+			< 1720 /*  451 MHz */ >,
+			< 2086 /*  547 MHz */ >,
+			< 2597 /*  681 MHz */ >,
+			< 2929 /*  768 MHz */ >,
+			< 3879 /* 1017 MHz */ >,
+			< 4943 /* 1296 MHz */ >,
+			< 5931 /* 1555 MHz */ >,
+			< 6881 /* 1804 MHz */ >;
+	};
+
+	memlat_cpu4: qcom,memlat-cpu4 {
+		compatible = "qcom,devbw";
+		governor = "powersave";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		status = "ok";
+		qcom,bw-tbl =
+			<  762 /*  200 MHz */ >,
+			< 1144 /*  300 MHz */ >,
+			< 1720 /*  451 MHz */ >,
+			< 2086 /*  547 MHz */ >,
+			< 2597 /*  681 MHz */ >,
+			< 2929 /*  768 MHz */ >,
+			< 3879 /* 1017 MHz */ >,
+			< 4943 /* 1296 MHz */ >,
+			< 5931 /* 1555 MHz */ >,
+			< 6881 /* 1804 MHz */ >;
+	};
+
+	devfreq_memlat_0: qcom,cpu0-memlat-mon {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3>;
+		qcom,target-dev = <&memlat_cpu0>;
+		qcom,cachemiss-ev = <0x2A>;
+		qcom,core-dev-table =
+			<  300000  762 >,
+			<  748800 1720 >,
+			<  979200 2929 >,
+			< 1209600 3879 >,
+			< 1516800 4943 >,
+			< 1593600 5931 >;
+	};
+
+	devfreq_memlat_4: qcom,cpu4-memlat-mon {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist = <&CPU4 &CPU5 &CPU6 &CPU7>;
+		qcom,target-dev = <&memlat_cpu4>;
+		qcom,cachemiss-ev = <0x2A>;
+		qcom,core-dev-table =
+			<  300000  762 >,
+			< 1036800 2929 >,
+			< 1190400 3879 >,
+			< 1574400 4943 >,
+			< 1804800 5931 >,
+			< 1958400 6881 >;
+	};
+
+	l3_cpu0: qcom,l3-cpu0 {
+		compatible = "devfreq-simple-dev";
+		clock-names = "devfreq_clk";
+		clocks = <&clock_cpucc L3_CLUSTER0_VOTE_CLK>;
+		governor = "performance";
+		freq-tbl-khz =
+			< 300000 >,
+			< 422400 >,
+			< 499200 >,
+			< 576000 >,
+			< 652800 >,
+			< 729600 >,
+			< 806400 >,
+			< 883200 >,
+			< 960000 >;
+	};
+
+	l3_cpu4: qcom,l3-cpu4 {
+		compatible = "devfreq-simple-dev";
+		clock-names = "devfreq_clk";
+		clocks = <&clock_cpucc L3_CLUSTER1_VOTE_CLK>;
+		governor = "performance";
+		freq-tbl-khz =
+			< 300000 >,
+			< 422400 >,
+			< 499200 >,
+			< 576000 >,
+			< 652800 >,
+			< 729600 >,
+			< 806400 >,
+			< 883200 >,
+			< 960000 >;
+	};
+
+	devfreq_l3lat_0: qcom,cpu0-l3lat-mon {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3>;
+		qcom,target-dev = <&l3_cpu0>;
+		qcom,cachemiss-ev = <0x17>;
+		qcom,core-dev-table =
+			<  300000 300000 >,
+			<  748800 576000 >,
+			<  979200 652800 >,
+			< 1209600 806400 >,
+			< 1516800 883200 >,
+			< 1593600 960000 >;
+	};
+
+	devfreq_l3lat_4: qcom,cpu4-l3lat-mon {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist = <&CPU4 &CPU5 &CPU6 &CPU7>;
+		qcom,target-dev = <&l3_cpu4>;
+		qcom,cachemiss-ev = <0x17>;
+		qcom,core-dev-table =
+			<  300000 300000 >,
+			< 1036800 652800 >,
+			< 1190400 806400 >,
+			< 1574400 883200 >,
+			< 1651200 960000 >;
+	};
+
+	cpu_pmu: cpu-pmu {
+		compatible = "arm,armv8-pmuv3";
+		qcom,irq-is-percpu;
+		interrupts = <1 5 4>;
+	};
+
 	clock_gcc: qcom,gcc@100000 {
-		compatible = "qcom,gcc-sdm845";
+		compatible = "qcom,gcc-sdm845", "syscon";
 		reg = <0x100000 0x1f0000>;
 		reg-names = "cc_base";
 		vdd_cx-supply = <&pm8998_s9_level>;
@@ -503,7 +748,7 @@
 	};
 
 	clock_videocc: qcom,videocc@ab00000 {
-		compatible = "qcom,video_cc-sdm845";
+		compatible = "qcom,video_cc-sdm845", "syscon";
 		reg = <0xab00000 0x10000>;
 		reg-names = "cc_base";
 		vdd_cx-supply = <&pm8998_s9_level>;
@@ -511,30 +756,159 @@
 		#reset-cells = <1>;
 	};
 
-	clock_camcc: qcom,camcc {
-		compatible = "qcom,dummycc";
-		clock-output-names = "camcc_clocks";
+	clock_camcc: qcom,camcc@ad00000 {
+		compatible = "qcom,cam_cc-sdm845", "syscon";
+		reg = <0xad00000 0x10000>;
+		reg-names = "cc_base";
+		vdd_cx-supply = <&pm8998_s9_level>;
+		vdd_mx-supply = <&pm8998_s6_level>;
 		#clock-cells = <1>;
 		#reset-cells = <1>;
 	};
 
-	clock_dispcc: qcom,dispcc {
-		compatible = "qcom,dummycc";
-		clock-output-names = "dispcc_clocks";
+	clock_dispcc: qcom,dispcc@af00000 {
+		compatible = "qcom,dispcc-sdm845", "syscon";
+		reg = <0xaf00000 0x100000>;
+		reg-names = "cc_base";
+		vdd_cx-supply = <&pm8998_s9_level>;
 		#clock-cells = <1>;
 		#reset-cells = <1>;
 	};
 
-	clock_gpucc: qcom,gpucc {
-		compatible = "qcom,dummycc";
-		clock-output-names = "gpucc_clocks";
+	clock_gpucc: qcom,gpucc@5090000 {
+		compatible = "qcom,gpucc-sdm845", "syscon";
+		reg = <0x5090000 0x9000>;
+		reg-names = "cc_base";
+		vdd_cx-supply = <&pm8998_s9_level>;
+		qcom,gpu_cc_gmu_clk_src-opp-handle = <&gmu>;
 		#clock-cells = <1>;
 		#reset-cells = <1>;
 	};
 
-	clock_cpucc: qcom,cpucc {
-		compatible = "qcom,dummycc";
-		clock-output-names = "cpucc_clocks";
+	clock_gfx: qcom,gfxcc@5090000 {
+		compatible = "qcom,gfxcc-sdm845";
+		reg = <0x5090000 0x9000>;
+		reg-names = "cc_base";
+		vdd_gfx-supply = <&pm8005_s1_level>;
+		vdd_mx-supply = <&pm8998_s6_level>;
+		qcom,gpu_cc_gx_gfx3d_clk_src-opp-handle = <&msm_gpu>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	clock_cpucc: qcom,cpucc@0x17d41000 {
+		compatible = "qcom,clk-cpu-osm";
+		reg = <0x17d41000 0x1400>,
+			<0x17d43000 0x1400>,
+			<0x17d45800 0x1400>,
+			<0x178d0000 0x1000>,
+			<0x178c0000 0x1000>,
+			<0x178b0000 0x1000>,
+			<0x17d42400 0x0c00>,
+			<0x17d44400 0x0c00>,
+			<0x17d46c00 0x0c00>,
+			<0x17810090 0x8>;
+		reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base",
+			"l3_pll", "pwrcl_pll", "perfcl_pll",
+			"l3_sequencer", "pwrcl_sequencer",
+			"perfcl_sequencer", "apps_itm_ctl";
+
+		vdd-l3-supply = <&apc0_l3_vreg>;
+		vdd-pwrcl-supply = <&apc0_pwrcl_vreg>;
+		vdd-perfcl-supply = <&apc1_perfcl_vreg>;
+
+		qcom,l3-speedbin0-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x501c0422 0x00002020 0x1 5 >,
+			<   729600000 0x501c0526 0x00002020 0x1 6 >,
+			<   806400000 0x501c062a 0x00002222 0x1 7 >;
+
+		qcom,pwrcl-speedbin0-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x501c0422 0x00002020 0x1 5 >,
+			<   748800000 0x501c0527 0x00002020 0x1 6 >,
+			<   825600000 0x401c062b 0x00002222 0x1 7 >,
+			<   902400000 0x4024072f 0x00002626 0x1 8 >,
+			<   979200000 0x40240833 0x00002929 0x1 9 >,
+			<  1056000000 0x402c0937 0x00002c2c 0x1 10 >,
+			<  1132800000 0x402c0a3b 0x00002f2f 0x1 11 >,
+			<  1209600000 0x402c0b3f 0x00003333 0x1 12 >;
+
+		qcom,perfcl-speedbin0-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x501c0422 0x00002020 0x1 5 >,
+			<   729600000 0x501c0526 0x00002020 0x1 6 >,
+			<   806400000 0x501c062a 0x00002222 0x1 7 >,
+			<   883200000 0x4024072b 0x00002525 0x1 8 >,
+			<   960000000 0x40240832 0x00002828 0x1 9 >,
+			<  1036800000 0x40240936 0x00002b2b 0x1 10 >,
+			<  1113600000 0x402c0a3a 0x00002e2e 0x1 11 >,
+			<  1190400000 0x402c0b3e 0x00003232 0x1 12 >;
+
+		qcom,l3-min-cpr-vc-bin0 = <7>;
+		qcom,pwrcl-min-cpr-vc-bin0 = <6>;
+		qcom,perfcl-min-cpr-vc-bin0 = <7>;
+
+		qcom,up-timer =
+			<1000 1000 1000>;
+		qcom,down-timer =
+			<100000 100000 100000>;
+		qcom,pc-override-index =
+			<0 0 0>;
+		qcom,set-ret-inactive;
+		qcom,enable-llm-freq-vote;
+		qcom,llm-freq-up-timer =
+			<1000 1000 1000>;
+		qcom,llm-freq-down-timer =
+			<327675 327675 327675>;
+		qcom,enable-llm-volt-vote;
+		qcom,llm-volt-up-timer =
+			<1000 1000 1000>;
+		qcom,llm-volt-down-timer =
+			<327675 327675 327675>;
+		qcom,cc-reads = <10>;
+		qcom,cc-delay = <5>;
+		qcom,cc-factor = <100>;
+		qcom,osm-clk-rate = <100000000>;
+		qcom,xo-clk-rate = <19200000>;
+
+		qcom,l-val-base =
+			<0x178d0004 0x178c0004 0x178b0004>;
+		qcom,apcs-pll-user-ctl =
+			<0x178d000c 0x178c000c 0x178b000c>;
+		qcom,apcs-pll-min-freq =
+			<0x17d41094 0x17d43094 0x17d45894>;
+		qcom,apm-mode-ctl =
+			<0x0 0x0 0x17d20010>;
+		qcom,apm-status-ctrl =
+			<0x0 0x0 0x17d20000>;
+		qcom,perfcl-isense-addr = <0x17871480>;
+		qcom,l3-mem-acc-addr = <0x17990170 0x17990170 0x17990170>;
+		qcom,pwrcl-mem-acc-addr = <0x17990160 0x17990164 0x17990164>;
+		qcom,perfcl-mem-acc-addr = <0x17990168 0x1799016c 0x1799016c>;
+		qcom,cfg-gfmux-addr =<0x178d0084 0x178c0084 0x178b0084>;
+		qcom,apcs-cbc-addr = <0x178d008c 0x178c008c 0x178b008c>;
+		qcom,apcs-ramp-ctl-addr = <0x17840904 0x17840904 0x17830904>;
+
+		qcom,perfcl-apcs-apm-threshold-voltage = <800000>;
+		qcom,perfcl-apcs-mem-acc-threshold-voltage = <852000>;
+		qcom,boost-fsm-en;
+		qcom,safe-fsm-en;
+		qcom,ps-fsm-en;
+		qcom,droop-fsm-en;
+		qcom,osm-pll-setup;
+
+		clock-names = "xo_ao";
+		clocks = <&clock_rpmh RPMH_CXO_CLK_A>;
 		#clock-cells = <1>;
 		#reset-cells = <1>;
 	};
@@ -546,15 +920,31 @@
 		mbox-names = "apps";
 	};
 
+	clock_debug: qcom,cc-debug@100000 {
+		compatible = "qcom,debugcc-sdm845";
+		qcom,cc-count = <5>;
+		qcom,gcc = <&clock_gcc>;
+		qcom,videocc = <&clock_videocc>;
+		qcom,camcc = <&clock_camcc>;
+		qcom,dispcc = <&clock_dispcc>;
+		qcom,gpucc = <&clock_gpucc>;
+		clock-names = "xo_clk_src";
+		clocks = <&clock_rpmh RPMH_CXO_CLK>;
+		#clock-cells = <1>;
+	};
+
 	ufsphy_mem: ufsphy_mem@1d87000 {
 		reg = <0x1d87000 0xda8>; /* PHY regs */
 		reg-names = "phy_mem";
 		#phy-cells = <0>;
 
-		/* TODO: add "ref_clk_src" */
-		clock-names = "ref_clk",
+		lanes-per-direction = <2>;
+
+		clock-names = "ref_clk_src",
+			"ref_clk",
 			"ref_aux_clk";
-		clocks = <&clock_gcc GCC_UFS_MEM_CLKREF_CLK>,
+		clocks = <&clock_rpmh RPMH_LN_BB_CLK1>,
+			<&clock_gcc GCC_UFS_MEM_CLKREF_CLK>,
 			<&clock_gcc GCC_UFS_PHY_PHY_AUX_CLK>;
 
 		status = "disabled";
@@ -570,13 +960,13 @@
 		lanes-per-direction = <2>;
 		dev-ref-clk-freq = <0>; /* 19.2 MHz */
 
-		/* TODO: add "ref_clk" */
 		clock-names =
 			"core_clk",
 			"bus_aggr_clk",
 			"iface_clk",
 			"core_clk_unipro",
 			"core_clk_ice",
+			"ref_clk",
 			"tx_lane0_sync_clk",
 			"rx_lane0_sync_clk",
 			"rx_lane1_sync_clk";
@@ -587,6 +977,7 @@
 			<&clock_gcc GCC_UFS_PHY_AHB_CLK>,
 			<&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
 			<&clock_gcc GCC_UFS_PHY_ICE_CORE_CLK>,
+			<&clock_rpmh RPMH_LN_BB_CLK1>,
 			<&clock_gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
 			<&clock_gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
 			<&clock_gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>;
@@ -598,34 +989,46 @@
 			<75000000 300000000>,
 			<0 0>,
 			<0 0>,
+			<0 0>,
 			<0 0>;
 
 		qcom,msm-bus,name = "ufshc_mem";
 		qcom,msm-bus,num-cases = <22>;
 		qcom,msm-bus,num-paths = <2>;
 		qcom,msm-bus,vectors-KBps =
-		<95 512 0 0>, <1 650 0 0>,          /* No vote */
-		<95 512 922 0>, <1 650 1000 0>,     /* PWM G1 */
-		<95 512 1844 0>, <1 650 1000 0>,    /* PWM G2 */
-		<95 512 3688 0>, <1 650 1000 0>,    /* PWM G3 */
-		<95 512 7376 0>, <1 650 1000 0>,    /* PWM G4 */
-		<95 512 1844 0>, <1 650 1000 0>,    /* PWM G1 L2 */
-		<95 512 3688 0>, <1 650 1000 0>,    /* PWM G2 L2 */
-		<95 512 7376 0>, <1 650 1000 0>,    /* PWM G3 L2 */
-		<95 512 14752 0>, <1 650 1000 0>,   /* PWM G4 L2 */
-		<95 512 127796 0>, <1 650 1000 0>,  /* HS G1 RA */
-		<95 512 255591 0>, <1 650 1000 0>,  /* HS G2 RA */
-		<95 512 511181 0>, <1 650 1000 0>,  /* HS G3 RA */
-		<95 512 255591 0>, <1 650 1000 0>,  /* HS G1 RA L2 */
-		<95 512 511181 0>, <1 650 1000 0>,  /* HS G2 RA L2 */
-		<95 512 1022362 0>, <1 650 1000 0>, /* HS G3 RA L2 */
-		<95 512 149422 0>, <1 650 1000 0>,  /* HS G1 RB */
-		<95 512 298189 0>, <1 650 1000 0>,  /* HS G2 RB */
-		<95 512 596378 0>, <1 650 1000 0>,  /* HS G3 RB */
-		<95 512 298189 0>, <1 650 1000 0>,  /* HS G1 RB L2 */
-		<95 512 596378 0>, <1 650 1000 0>,  /* HS G2 RB L2 */
-		<95 512 1192756 0>, <1 650 1000 0>, /* HS G3 RB L2 */
-		<95 512 4096000 0>, <1 650 1000 0>; /* Max. bandwidth */
+		/*
+		 * During HS G3 UFS runs at nominal voltage corner, vote
+		 * higher bandwidth to push other buses in the data path
+		 * to run at nominal to achieve max throughput.
+		 * 4GBps pushes BIMC to run at nominal.
+		 * 200MBps pushes CNOC to run at nominal.
+		 * Vote for half of this bandwidth for HS G3 1-lane.
+		 * For max bandwidth, vote high enough to push the buses
+		 * to run in turbo voltage corner.
+		 */
+		<123 512 0 0>, <1 757 0 0>,          /* No vote */
+		<123 512 922 0>, <1 757 1000 0>,     /* PWM G1 */
+		<123 512 1844 0>, <1 757 1000 0>,    /* PWM G2 */
+		<123 512 3688 0>, <1 757 1000 0>,    /* PWM G3 */
+		<123 512 7376 0>, <1 757 1000 0>,    /* PWM G4 */
+		<123 512 1844 0>, <1 757 1000 0>,    /* PWM G1 L2 */
+		<123 512 3688 0>, <1 757 1000 0>,    /* PWM G2 L2 */
+		<123 512 7376 0>, <1 757 1000 0>,    /* PWM G3 L2 */
+		<123 512 14752 0>, <1 757 1000 0>,   /* PWM G4 L2 */
+		<123 512 127796 0>, <1 757 1000 0>,  /* HS G1 RA */
+		<123 512 255591 0>, <1 757 1000 0>,  /* HS G2 RA */
+		<123 512 2097152 0>, <1 757 102400 0>,  /* HS G3 RA */
+		<123 512 255591 0>, <1 757 1000 0>,  /* HS G1 RA L2 */
+		<123 512 511181 0>, <1 757 1000 0>,  /* HS G2 RA L2 */
+		<123 512 4194304 0>, <1 757 204800 0>, /* HS G3 RA L2 */
+		<123 512 149422 0>, <1 757 1000 0>,  /* HS G1 RB */
+		<123 512 298189 0>, <1 757 1000 0>,  /* HS G2 RB */
+		<123 512 2097152 0>, <1 757 102400 0>,  /* HS G3 RB */
+		<123 512 298189 0>, <1 757 1000 0>,  /* HS G1 RB L2 */
+		<123 512 596378 0>, <1 757 1000 0>,  /* HS G2 RB L2 */
+		<123 512 4194304 0>, <1 757 204800 0>, /* HS G3 RB L2 */
+		<123 512 7643136 0>, <1 757 307200 0>; /* Max. bandwidth */
+
 		qcom,bus-vector-names = "MIN",
 		"PWM_G1_L1", "PWM_G2_L1", "PWM_G3_L1", "PWM_G4_L1",
 		"PWM_G1_L2", "PWM_G2_L2", "PWM_G3_L2", "PWM_G4_L2",
@@ -635,6 +1038,18 @@
 		"HS_RB_G1_L2", "HS_RB_G2_L2", "HS_RB_G3_L2",
 		"MAX";
 
+		/* PM QoS */
+		qcom,pm-qos-cpu-groups = <0x0f 0xf0>;
+		qcom,pm-qos-cpu-group-latency-us = <70 70>;
+		qcom,pm-qos-default-cpu = <0>;
+
+		pinctrl-names = "dev-reset-assert", "dev-reset-deassert";
+		pinctrl-0 = <&ufs_dev_reset_assert>;
+		pinctrl-1 = <&ufs_dev_reset_deassert>;
+
+		resets = <&clock_gcc GCC_UFS_PHY_BCR>;
+		reset-names = "core_reset";
+
 		status = "disabled";
 	};
 
@@ -643,10 +1058,13 @@
 		reg-names = "phy_mem";
 		#phy-cells = <0>;
 
-		/* TODO: add "ref_clk_src" */
-		clock-names = "ref_clk",
+		lanes-per-direction = <1>;
+
+		clock-names = "ref_clk_src",
+			"ref_clk",
 			"ref_aux_clk";
-		clocks = <&clock_gcc GCC_UFS_CARD_CLKREF_CLK>,
+		clocks = <&clock_rpmh RPMH_LN_BB_CLK1>,
+			<&clock_gcc GCC_UFS_CARD_CLKREF_CLK>,
 			<&clock_gcc GCC_UFS_CARD_PHY_AUX_CLK>;
 
 		status = "disabled";
@@ -662,13 +1080,13 @@
 		lanes-per-direction = <1>;
 		dev-ref-clk-freq = <0>; /* 19.2 MHz */
 
-		/* TODO: add "ref_clk" */
 		clock-names =
 			"core_clk",
 			"bus_aggr_clk",
 			"iface_clk",
 			"core_clk_unipro",
 			"core_clk_ice",
+			"ref_clk",
 			"tx_lane0_sync_clk",
 			"rx_lane0_sync_clk";
 		/* TODO: add HW CTL clocks when available */
@@ -678,6 +1096,7 @@
 			<&clock_gcc GCC_UFS_CARD_AHB_CLK>,
 			<&clock_gcc GCC_UFS_CARD_UNIPRO_CORE_CLK>,
 			<&clock_gcc GCC_UFS_CARD_ICE_CORE_CLK>,
+			<&clock_rpmh RPMH_LN_BB_CLK1>,
 			<&clock_gcc GCC_UFS_CARD_TX_SYMBOL_0_CLK>,
 			<&clock_gcc GCC_UFS_CARD_RX_SYMBOL_0_CLK>;
 		freq-table-hz =
@@ -687,27 +1106,41 @@
 			<37500000 150000000>,
 			<75000000 300000000>,
 			<0 0>,
+			<0 0>,
 			<0 0>;
 
 		qcom,msm-bus,name = "ufshc_card";
 		qcom,msm-bus,num-cases = <9>;
 		qcom,msm-bus,num-paths = <2>;
 		qcom,msm-bus,vectors-KBps =
-		<95 512 0 0>, <1 650 0 0>,          /* No vote */
-		<95 512 922 0>, <1 650 1000 0>,     /* PWM G1 */
-		<95 512 127796 0>, <1 650 1000 0>,  /* HS G1 RA */
-		<95 512 255591 0>, <1 650 1000 0>,  /* HS G2 RA */
-		<95 512 511181 0>, <1 650 1000 0>,  /* HS G3 RA */
-		<95 512 149422 0>, <1 650 1000 0>,  /* HS G1 RB */
-		<95 512 298189 0>, <1 650 1000 0>,  /* HS G2 RB */
-		<95 512 596378 0>, <1 650 1000 0>,  /* HS G3 RB */
-		<95 512 4096000 0>, <1 650 1000 0>; /* Max. bandwidth */
+		<122 512 0 0>, <1 756 0 0>,          /* No vote */
+		<122 512 922 0>, <1 756 1000 0>,     /* PWM G1 */
+		<122 512 127796 0>, <1 756 1000 0>,  /* HS G1 RA */
+		<122 512 255591 0>, <1 756 1000 0>,  /* HS G2 RA */
+		<122 512 2097152 0>, <1 756 102400 0>,  /* HS G3 RA */
+		<122 512 149422 0>, <1 756 1000 0>,  /* HS G1 RB */
+		<122 512 298189 0>, <1 756 1000 0>,  /* HS G2 RB */
+		<122 512 2097152 0>, <1 756 102400 0>,  /* HS G3 RB */
+		<122 512 7643136 0>, <1 756 307200 0>; /* Max. bandwidth */
 		qcom,bus-vector-names = "MIN",
 		"PWM_G1_L1",
 		"HS_RA_G1_L1", "HS_RA_G2_L1", "HS_RA_G3_L1",
 		"HS_RB_G1_L1", "HS_RB_G2_L1", "HS_RB_G3_L1",
 		"MAX";
 
+		/* PM QoS */
+		qcom,pm-qos-cpu-groups = <0x0f 0xf0>;
+		qcom,pm-qos-cpu-group-latency-us = <70 70>;
+		qcom,pm-qos-default-cpu = <0>;
+
+		/*
+		 * Note: this instance doesn't have control over UFS device
+		 * reset
+		 */
+
+		resets = <&clock_gcc GCC_UFS_CARD_BCR>;
+		reset-names = "core_reset";
+
 		status = "disabled";
 	};
 
@@ -928,6 +1361,11 @@
 		qcom,rtb-size = <0x100000>;
 	};
 
+	qcom,msm-cdsp-loader {
+		compatible = "qcom,cdsp-loader";
+		qcom,proc-img-to-load = "cdsp";
+	};
+
 	qcom,msm_fastrpc {
 		compatible = "qcom,msm-fastrpc-compute";
 
@@ -1037,31 +1475,31 @@
 			qcom,dump-id = <0x60>;
 		};
 		qcom,l1_i_cache1 {
-			qcom,dump-node = <&L1_I_1>;
+			qcom,dump-node = <&L1_I_100>;
 			qcom,dump-id = <0x61>;
 		};
 		qcom,l1_i_cache2 {
-			qcom,dump-node = <&L1_I_2>;
+			qcom,dump-node = <&L1_I_200>;
 			qcom,dump-id = <0x62>;
 		};
 		qcom,l1_i_cache3 {
-			qcom,dump-node = <&L1_I_3>;
+			qcom,dump-node = <&L1_I_300>;
 			qcom,dump-id = <0x63>;
 		};
 		qcom,l1_i_cache100 {
-			qcom,dump-node = <&L1_I_100>;
+			qcom,dump-node = <&L1_I_400>;
 			qcom,dump-id = <0x64>;
 		};
 		qcom,l1_i_cache101 {
-			qcom,dump-node = <&L1_I_101>;
+			qcom,dump-node = <&L1_I_500>;
 			qcom,dump-id = <0x65>;
 		};
 		qcom,l1_i_cache102 {
-			qcom,dump-node = <&L1_I_102>;
+			qcom,dump-node = <&L1_I_600>;
 			qcom,dump-id = <0x66>;
 		};
 		qcom,l1_i_cache103 {
-			qcom,dump-node = <&L1_I_103>;
+			qcom,dump-node = <&L1_I_700>;
 			qcom,dump-id = <0x67>;
 		};
 		qcom,l1_d_cache0 {
@@ -1069,31 +1507,31 @@
 			qcom,dump-id = <0x80>;
 		};
 		qcom,l1_d_cache1 {
-			qcom,dump-node = <&L1_D_1>;
+			qcom,dump-node = <&L1_D_100>;
 			qcom,dump-id = <0x81>;
 		};
 		qcom,l1_d_cache2 {
-			qcom,dump-node = <&L1_D_2>;
+			qcom,dump-node = <&L1_D_200>;
 			qcom,dump-id = <0x82>;
 		};
 		qcom,l1_d_cache3 {
-			qcom,dump-node = <&L1_D_3>;
+			qcom,dump-node = <&L1_D_300>;
 			qcom,dump-id = <0x83>;
 		};
 		qcom,l1_d_cache100 {
-			qcom,dump-node = <&L1_D_100>;
+			qcom,dump-node = <&L1_D_400>;
 			qcom,dump-id = <0x84>;
 		};
 		qcom,l1_d_cache101 {
-			qcom,dump-node = <&L1_D_101>;
+			qcom,dump-node = <&L1_D_500>;
 			qcom,dump-id = <0x85>;
 		};
 		qcom,l1_d_cache102 {
-			qcom,dump-node = <&L1_D_102>;
+			qcom,dump-node = <&L1_D_600>;
 			qcom,dump-id = <0x86>;
 		};
 		qcom,l1_d_cache103 {
-			qcom,dump-node = <&L1_D_103>;
+			qcom,dump-node = <&L1_D_700>;
 			qcom,dump-id = <0x87>;
 		};
 		qcom,llcc1_d_cache {
@@ -1127,10 +1565,12 @@
 				  "l3-scu-faultirq";
 	};
 
-	qcom,llcc@1300000 {
+	qcom,llcc@1100000 {
 		compatible = "qcom,llcc-core", "syscon", "simple-mfd";
-		reg = <0x1300000 0x50000>;
+		reg = <0x1100000 0x250000>;
 		reg-names = "llcc_base";
+		qcom,llcc-banks-off = <0x0 0x80000 0x100000 0x180000>;
+		qcom,llcc-broadcast-off = <0x200000>;
 
 		llcc: qcom,sdm845-llcc {
 			compatible = "qcom,sdm845-llcc";
@@ -1198,8 +1638,21 @@
 		qcom,rx-ring-size = <0x400>;
 	};
 
+	qmp_aop: mailbox@1799000c {
+		compatible = "qcom,qmp-mbox";
+		label = "aop";
+		reg = <0xc300000 0x100000>,
+			<0x1799000c 0x4>;
+		reg-names = "msgram", "irq-reg-base";
+		qcom,irq-mask = <0x1>;
+		interrupts = <0 389 1>;
+		mbox_desc_offset = <0x0>;
+		#mbox-cells = <1>;
+	};
+
 	apps_rsc: mailbox@179e0000 {
 		compatible = "qcom,tcs-drv";
+		label = "apps_rsc";
 		reg = <0x179e0000 0x100>, <0x179e0d00 0x3000>;
 		interrupts = <0 5 0>;
 		#mbox-cells = <1>;
@@ -1212,6 +1665,7 @@
 
 	disp_rsc: mailbox@af20000 {
 		compatible = "qcom,tcs-drv";
+		label = "display_rsc";
 		reg = <0xaf20000 0x100>, <0xaf21c00 0x3000>;
 		interrupts = <0 129 0>;
 		#mbox-cells = <1>;
@@ -1451,7 +1905,6 @@
 		qcom,ipa-hw-ver = <13>; /* IPA core version = IPAv3.5.1 */
 		qcom,ipa-hw-mode = <1>;
 		qcom,ee = <0>;
-		qcom,use-gsi;
 		qcom,use-ipa-tethering-bridge;
 		qcom,modem-cfg-emb-pipe-flt;
 		qcom,ipa-wdi2;
@@ -1606,6 +2059,11 @@
 		      <0x10ae000 0x2000>;
 		reg-names = "dcc-base", "dcc-ram-base";
 	};
+
+	qcom,msm-core@780000 {
+		compatible = "qcom,apss-core-ea";
+		reg = <0x780000 0x1000>;
+	};
 };
 
 &pcie_0_gdsc {
@@ -1693,6 +2151,9 @@
 };
 
 &gpu_gx_gdsc {
+	clock-names = "core_root_clk";
+	clocks = <&clock_gfx GPU_CC_GX_GFX3D_CLK_SRC>;
+	qcom,force-enable-root-clk;
 	parent-supply = <&pm8005_s1_level>;
 	status = "ok";
 };
@@ -1723,3 +2184,4 @@
 #include "sdm845-pm.dtsi"
 #include "sdm845-pinctrl.dtsi"
 #include "sdm845-audio.dtsi"
+#include "sdm845-gpu.dtsi"
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 1e9c49f..9552dc1 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -11,11 +11,13 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_SCHED_HMP=y
 CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
 CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
@@ -73,7 +75,10 @@
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_MSM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -267,24 +272,32 @@
 CONFIG_HW_RANDOM=y
 CONFIG_MSM_ADSPRPC=y
 CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QCOM_GENI=y
 CONFIG_SOUNDWIRE=y
 CONFIG_SPI=y
 CONFIG_SPI_QUP=y
+CONFIG_SPI_QCOM_GENI=y
 CONFIG_SPI_SPIDEV=y
 CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_SPMI=y
 CONFIG_PINCTRL_SDM845=y
 CONFIG_PINCTRL_SDM830=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_QPNP_PIN=y
 CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
 CONFIG_POWER_RESET_XGENE=y
 CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_FG_GEN3=y
+CONFIG_QPNP_SMB2=y
+CONFIG_SMB138X_CHARGER=y
+CONFIG_QPNP_QNOVO=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_WCD934X_CODEC=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
@@ -307,6 +320,8 @@
 CONFIG_MSM_SDE_ROTATOR=y
 CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
 CONFIG_DRM=y
+CONFIG_DRM_SDE_EVTLOG_DEBUG=y
+CONFIG_DRM_SDE_RSC=y
 CONFIG_FB_VIRTUAL=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
@@ -316,6 +331,7 @@
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
 CONFIG_SND_SOC=y
 CONFIG_SND_SOC_SDM845=y
 CONFIG_UHID=y
@@ -386,9 +402,15 @@
 CONFIG_USB_BAM=y
 CONFIG_MSM_GCC_SDM845=y
 CONFIG_MSM_VIDEOCC_SDM845=y
+CONFIG_MSM_CAMCC_SDM845=y
+CONFIG_MSM_DISPCC_SDM845=y
 CONFIG_CLOCK_QPNP_DIV=y
 CONFIG_MSM_CLK_RPMH=y
+CONFIG_CLOCK_CPU_OSM=y
+CONFIG_MSM_GPUCC_SDM845=y
+CONFIG_QCOM_MDSS_PLL=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_QMP=y
 CONFIG_IOMMU_IO_PGTABLE_FAST=y
 CONFIG_ARM_SMMU=y
 CONFIG_QCOM_LAZY_MAPPING=y
@@ -399,6 +421,8 @@
 CONFIG_QCOM_EUD=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_QCOM_BUS_CONFIG_RPMH=y
 CONFIG_QCOM_SECURE_BUFFER=y
 CONFIG_QCOM_EARLY_RANDOM=y
 CONFIG_MSM_SMEM=y
@@ -420,8 +444,19 @@
 CONFIG_MSM_PIL_MSS_QDSP6V5=y
 CONFIG_ICNSS=y
 CONFIG_QCOM_COMMAND_DB=y
+CONFIG_MSM_AVTIMER=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
+CONFIG_APSS_CORE_EA=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_QCOMCCI_HWMON=y
+CONFIG_QCOM_M4M_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_DEVFREQ_SIMPLE_DEV=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_EXTCON=y
 CONFIG_IIO=y
 CONFIG_QCOM_RRADC=y
@@ -453,6 +488,7 @@
 CONFIG_TIMER_STATS=y
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_IPC_LOGGING=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
 CONFIG_DEBUG_ALIGN_RODATA=y
 CONFIG_CORESIGHT=y
 CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index da51cc8..5f22fed 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -13,18 +13,19 @@
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
-CONFIG_CGROUPS=y
 CONFIG_CGROUP_DEBUG=y
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
-CONFIG_CGROUP_SCHED=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_SCHED_HMP=y
 CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
 CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
 CONFIG_SCHED_TUNE=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_RD_XZ is not set
@@ -79,7 +80,10 @@
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_MSM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -271,31 +275,38 @@
 # CONFIG_SERIO_SERPORT is not set
 # CONFIG_VT is not set
 # CONFIG_LEGACY_PTYS is not set
-CONFIG_SERIAL_MSM=y
-CONFIG_SERIAL_MSM_CONSOLE=y
 CONFIG_DIAG_CHAR=y
 CONFIG_HVC_DCC=y
+CONFIG_HVC_DCC_SERIALIZE_SMP=y
 CONFIG_HW_RANDOM=y
 CONFIG_MSM_ADSPRPC=y
 CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QCOM_GENI=y
 CONFIG_SOUNDWIRE=y
 CONFIG_SPI=y
 CONFIG_SPI_QUP=y
+CONFIG_SPI_QCOM_GENI=y
 CONFIG_SPI_SPIDEV=y
 CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_SPMI=y
 CONFIG_PINCTRL_SDM845=y
 CONFIG_PINCTRL_SDM830=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_QPNP_PIN=y
 CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
 CONFIG_POWER_RESET_XGENE=y
 CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_FG_GEN3=y
+CONFIG_QPNP_SMB2=y
+CONFIG_SMB138X_CHARGER=y
+CONFIG_QPNP_QNOVO=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_WCD934X_CODEC=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
@@ -317,7 +328,10 @@
 CONFIG_MSM_VIDC_GOVERNORS=y
 CONFIG_MSM_SDE_ROTATOR=y
 CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_QCOM_KGSL=y
 CONFIG_DRM=y
+CONFIG_DRM_SDE_EVTLOG_DEBUG=y
+CONFIG_DRM_SDE_RSC=y
 CONFIG_FB_VIRTUAL=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
@@ -327,6 +341,7 @@
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
 CONFIG_SND_SOC=y
 CONFIG_SND_SOC_SDM845=y
 CONFIG_UHID=y
@@ -404,9 +419,15 @@
 CONFIG_USB_BAM=y
 CONFIG_MSM_GCC_SDM845=y
 CONFIG_MSM_VIDEOCC_SDM845=y
+CONFIG_MSM_CAMCC_SDM845=y
+CONFIG_MSM_DISPCC_SDM845=y
 CONFIG_CLOCK_QPNP_DIV=y
 CONFIG_MSM_CLK_RPMH=y
+CONFIG_CLOCK_CPU_OSM=y
+CONFIG_MSM_GPUCC_SDM845=y
+CONFIG_QCOM_MDSS_PLL=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_QMP=y
 CONFIG_IOMMU_IO_PGTABLE_FAST=y
 CONFIG_ARM_SMMU=y
 CONFIG_QCOM_LAZY_MAPPING=y
@@ -420,6 +441,8 @@
 CONFIG_QCOM_EUD=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_QCOM_BUS_CONFIG_RPMH=y
 CONFIG_QCOM_SECURE_BUFFER=y
 CONFIG_QCOM_EARLY_RANDOM=y
 CONFIG_MSM_SMEM=y
@@ -442,9 +465,20 @@
 CONFIG_ICNSS=y
 CONFIG_ICNSS_DEBUG=y
 CONFIG_QCOM_COMMAND_DB=y
+CONFIG_MSM_AVTIMER=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
+CONFIG_APSS_CORE_EA=y
 CONFIG_QCOM_DCC_V2=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_QCOMCCI_HWMON=y
+CONFIG_QCOM_M4M_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_DEVFREQ_SIMPLE_DEV=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_EXTCON=y
 CONFIG_IIO=y
 CONFIG_QCOM_RRADC=y
@@ -496,6 +530,8 @@
 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
 CONFIG_WQ_WATCHDOG=y
 CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_ON_SCHED_BUG=y
+CONFIG_PANIC_ON_RT_THROTTLING=y
 CONFIG_SCHEDSTATS=y
 CONFIG_SCHED_STACK_END_CHECK=y
 CONFIG_TIMER_STATS=y
@@ -513,8 +549,10 @@
 CONFIG_QCOM_RTB=y
 CONFIG_QCOM_RTB_SEPARATE_CPUS=y
 CONFIG_FUNCTION_TRACER=y
-CONFIG_TRACER_SNAPSHOT=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
 CONFIG_LKDTM=y
 CONFIG_MEMTEST=y
 CONFIG_ARM64_PTDUMP=y
diff --git a/arch/arm64/include/asm/dma-contiguous.h b/arch/arm64/include/asm/dma-contiguous.h
new file mode 100644
index 0000000..f7e2c32
--- /dev/null
+++ b/arch/arm64/include/asm/dma-contiguous.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2013,2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_DMA_CONTIGUOUS_H
+#define _ASM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_DMA_CMA
+
+#include <linux/types.h>
+
+void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
+
+#endif
+#endif
+
+#endif
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index d1472eb..4ad25a5 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -47,9 +47,6 @@
 struct thread_info {
 	unsigned long		flags;		/* low level flags */
 	mm_segment_t		addr_limit;	/* address limit */
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
-	u64			ttbr0;		/* saved TTBR0_EL1 */
-#endif
 	struct task_struct	*task;		/* main task structure */
 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
 	u64			ttbr0;		/* saved TTBR0_EL1 */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 7d66bba..2c03b01 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -33,7 +33,8 @@
 arm64-obj-$(CONFIG_MODULES)		+= arm64ksyms.o module.o
 arm64-obj-$(CONFIG_ARM64_MODULE_PLTS)	+= module-plts.o
 arm64-obj-$(CONFIG_PERF_EVENTS)		+= perf_regs.o perf_callchain.o
-arm64-obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o
+arm64-obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o perf_trace_counters.o   \
+					   perf_trace_user.o
 arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)	+= hw_breakpoint.o
 arm64-obj-$(CONFIG_CPU_PM)		+= sleep.o suspend.o
 arm64-obj-$(CONFIG_CPU_IDLE)		+= cpuidle.o
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 769f24e..d7e90d9 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -131,11 +131,15 @@
 	/*
 	 * The kernel Image should not extend across a 1GB/32MB/512MB alignment
 	 * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
-	 * happens, increase the KASLR offset by the size of the kernel image.
+	 * happens, increase the KASLR offset by the size of the kernel image
+	 * rounded up by SWAPPER_BLOCK_SIZE.
 	 */
 	if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
-	    (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT))
-		offset = (offset + (u64)(_end - _text)) & mask;
+	    (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) {
+		u64 kimg_sz = _end - _text;
+		offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE))
+				& mask;
+	}
 
 	if (IS_ENABLED(CONFIG_KASAN))
 		/*
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 6d47969..852548c 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -840,12 +840,10 @@
 	struct hw_perf_event *hwc = &event->hw;
 	unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
 
-	/* Always place a cycle counter into the cycle counter. */
+	/* Place the first cycle counter request into the cycle counter. */
 	if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
-		if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
-			return -EAGAIN;
-
-		return ARMV8_IDX_CYCLE_COUNTER;
+		if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
+			return ARMV8_IDX_CYCLE_COUNTER;
 	}
 
 	/*
@@ -869,8 +867,6 @@
 {
 	unsigned long config_base = 0;
 
-	if (attr->exclude_idle)
-		return -EPERM;
 	if (is_kernel_in_hyp_mode() &&
 	    attr->exclude_kernel != attr->exclude_hv)
 		return -EINVAL;
@@ -977,11 +973,74 @@
 			     ARRAY_SIZE(pmceid));
 }
 
+static void armv8pmu_idle_update(struct arm_pmu *cpu_pmu)
+{
+	struct pmu_hw_events *hw_events;
+	struct perf_event *event;
+	int idx;
+
+	if (!cpu_pmu)
+		return;
+
+	hw_events = this_cpu_ptr(cpu_pmu->hw_events);
+
+	if (!hw_events)
+		return;
+
+	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+
+		if (!test_bit(idx, hw_events->used_mask))
+			continue;
+
+		event = hw_events->events[idx];
+
+		if (!event || !event->attr.exclude_idle ||
+				event->state != PERF_EVENT_STATE_ACTIVE)
+			continue;
+
+		cpu_pmu->pmu.read(event);
+	}
+}
+
+struct arm_pmu_and_idle_nb {
+	struct arm_pmu *cpu_pmu;
+	struct notifier_block perf_cpu_idle_nb;
+};
+
+static int perf_cpu_idle_notifier(struct notifier_block *nb,
+				unsigned long action, void *data)
+{
+	struct arm_pmu_and_idle_nb *pmu_nb = container_of(nb,
+				struct arm_pmu_and_idle_nb, perf_cpu_idle_nb);
+
+	if (action == IDLE_START)
+		armv8pmu_idle_update(pmu_nb->cpu_pmu);
+
+	return NOTIFY_OK;
+}
+
 static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
 {
-	return smp_call_function_any(&cpu_pmu->supported_cpus,
+	int ret;
+	struct arm_pmu_and_idle_nb *pmu_idle_nb;
+
+	pmu_idle_nb = devm_kzalloc(&cpu_pmu->plat_device->dev,
+					sizeof(*pmu_idle_nb), GFP_KERNEL);
+	if (!pmu_idle_nb)
+		return -ENOMEM;
+
+	pmu_idle_nb->cpu_pmu = cpu_pmu;
+	pmu_idle_nb->perf_cpu_idle_nb.notifier_call = perf_cpu_idle_notifier;
+	idle_notifier_register(&pmu_idle_nb->perf_cpu_idle_nb);
+
+	ret = smp_call_function_any(&cpu_pmu->supported_cpus,
 				    __armv8pmu_probe_pmu,
 				    cpu_pmu, 1);
+
+	if (ret)
+		idle_notifier_unregister(&pmu_idle_nb->perf_cpu_idle_nb);
+
+	return ret;
 }
 
 static void armv8_pmu_init(struct arm_pmu *cpu_pmu)
diff --git a/arch/arm64/kernel/perf_trace_counters.c b/arch/arm64/kernel/perf_trace_counters.c
new file mode 100644
index 0000000..1f0b74a
--- /dev/null
+++ b/arch/arm64/kernel/perf_trace_counters.c
@@ -0,0 +1,177 @@
+/* Copyright (c) 2013-2014,2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/cpu.h>
+#include <linux/tracepoint.h>
+#include <trace/events/sched.h>
+#define CREATE_TRACE_POINTS
+#include "perf_trace_counters.h"
+
+static unsigned int tp_pid_state;
+
+DEFINE_PER_CPU(u32, cntenset_val);
+DEFINE_PER_CPU(u32, previous_ccnt);
+DEFINE_PER_CPU(u32[NUM_L1_CTRS], previous_l1_cnts);
+DEFINE_PER_CPU(u32, old_pid);
+DEFINE_PER_CPU(u32, hotplug_flag);
+
+#define USE_CPUHP_STATE CPUHP_AP_ONLINE
+
+static int tracectr_cpu_hotplug_coming_up(unsigned int cpu)
+{
+	per_cpu(hotplug_flag, cpu) = 1;
+
+	return 0;
+}
+
+static void setup_prev_cnts(u32 cpu, u32 cnten_val)
+{
+	int i;
+
+	if (cnten_val & CC)
+		per_cpu(previous_ccnt, cpu) =
+			read_sysreg(pmccntr_el0);
+
+	for (i = 0; i < NUM_L1_CTRS; i++) {
+		if (cnten_val & (1 << i)) {
+			/* Select */
+			write_sysreg(i, pmselr_el0);
+			isb();
+			/* Read value */
+			per_cpu(previous_l1_cnts[i], cpu) =
+				read_sysreg(pmxevcntr_el0);
+		}
+	}
+}
+
+void tracectr_notifier(void *ignore, bool preempt,
+			struct task_struct *prev, struct task_struct *next)
+{
+	u32 cnten_val;
+	int current_pid;
+	u32 cpu = task_thread_info(next)->cpu;
+
+	if (tp_pid_state != 1)
+		return;
+	current_pid = next->pid;
+	if (per_cpu(old_pid, cpu) != -1) {
+		cnten_val = read_sysreg(pmcntenset_el0);
+		per_cpu(cntenset_val, cpu) = cnten_val;
+		/* Disable all the counters that were enabled */
+		write_sysreg(cnten_val, pmcntenclr_el0);
+
+		if (per_cpu(hotplug_flag, cpu) == 1) {
+			per_cpu(hotplug_flag, cpu) = 0;
+			setup_prev_cnts(cpu, cnten_val);
+		} else {
+			trace_sched_switch_with_ctrs(per_cpu(old_pid, cpu),
+						     current_pid);
+		}
+
+		/* Enable all the counters that were disabled */
+		write_sysreg(cnten_val, pmcntenset_el0);
+	}
+	per_cpu(old_pid, cpu) = current_pid;
+}
+
+static void enable_tp_pid(void)
+{
+	if (tp_pid_state == 0) {
+		tp_pid_state = 1;
+		register_trace_sched_switch(tracectr_notifier, NULL);
+	}
+}
+
+static void disable_tp_pid(void)
+{
+	if (tp_pid_state == 1) {
+		tp_pid_state = 0;
+		unregister_trace_sched_switch(tracectr_notifier, NULL);
+	}
+}
+
+static ssize_t read_enabled_perftp_file_bool(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char buf[2];
+
+	buf[1] = '\n';
+	if (tp_pid_state == 0)
+		buf[0] = '0';
+	else
+		buf[0] = '1';
+	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t write_enabled_perftp_file_bool(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	size_t buf_size;
+
+	buf_size = min(count, (sizeof(buf)-1));
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	switch (buf[0]) {
+	case 'y':
+	case 'Y':
+	case '1':
+		enable_tp_pid();
+		break;
+	case 'n':
+	case 'N':
+	case '0':
+		disable_tp_pid();
+		break;
+	}
+
+	return count;
+}
+
+static const struct file_operations fops_perftp = {
+	.read =		read_enabled_perftp_file_bool,
+	.write =	write_enabled_perftp_file_bool,
+	.llseek =	default_llseek,
+};
+
+int __init init_tracecounters(void)
+{
+	struct dentry *dir;
+	struct dentry *file;
+	unsigned int value = 1;
+	int cpu, rc;
+
+	dir = debugfs_create_dir("perf_debug_tp", NULL);
+	if (!dir)
+		return -ENOMEM;
+	file = debugfs_create_file("enabled", 0660, dir,
+		&value, &fops_perftp);
+	if (!file) {
+		debugfs_remove(dir);
+		return -ENOMEM;
+	}
+	for_each_possible_cpu(cpu)
+		per_cpu(old_pid, cpu) = -1;
+	rc = cpuhp_setup_state_nocalls(USE_CPUHP_STATE,
+		"tracectr_cpu_hotplug",
+		tracectr_cpu_hotplug_coming_up,
+		NULL);
+	return 0;
+}
+
+int __exit exit_tracecounters(void)
+{
+	cpuhp_remove_state_nocalls(USE_CPUHP_STATE);
+	return 0;
+}
+late_initcall(init_tracecounters);
diff --git a/arch/arm64/kernel/perf_trace_counters.h b/arch/arm64/kernel/perf_trace_counters.h
new file mode 100644
index 0000000..660f6ce
--- /dev/null
+++ b/arch/arm64/kernel/perf_trace_counters.h
@@ -0,0 +1,110 @@
+/* Copyright (c) 2013-2014,2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM perf_trace_counters
+
+#if !defined(_PERF_TRACE_COUNTERS_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _PERF_TRACE_COUNTERS_H_
+
+/* Ctr index for PMCNTENSET/CLR */
+#define CC 0x80000000
+#define C0 0x1
+#define C1 0x2
+#define C2 0x4
+#define C3 0x8
+#define C4 0x10
+#define C5 0x20
+#define C_ALL (CC | C0 | C1 | C2 | C3 | C4 | C5)
+#define NUM_L1_CTRS 6
+
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/tracepoint.h>
+
+DECLARE_PER_CPU(u32, cntenset_val);
+DECLARE_PER_CPU(u32, previous_ccnt);
+DECLARE_PER_CPU(u32[NUM_L1_CTRS], previous_l1_cnts);
+TRACE_EVENT(sched_switch_with_ctrs,
+
+		TP_PROTO(pid_t prev, pid_t next),
+
+		TP_ARGS(prev, next),
+
+		TP_STRUCT__entry(
+			__field(pid_t,	old_pid)
+			__field(pid_t,	new_pid)
+			__field(u32, cctr)
+			__field(u32, ctr0)
+			__field(u32, ctr1)
+			__field(u32, ctr2)
+			__field(u32, ctr3)
+			__field(u32, ctr4)
+			__field(u32, ctr5)
+		),
+
+		TP_fast_assign(
+			u32 cpu = smp_processor_id();
+			u32 i;
+			u32 cnten_val;
+			u32 total_ccnt = 0;
+			u32 total_cnt = 0;
+			u32 delta_l1_cnts[NUM_L1_CTRS];
+
+			__entry->old_pid	= prev;
+			__entry->new_pid	= next;
+
+			cnten_val = per_cpu(cntenset_val, cpu);
+
+			if (cnten_val & CC) {
+				/* Read value */
+				total_ccnt = read_sysreg(pmccntr_el0);
+				__entry->cctr = total_ccnt -
+					per_cpu(previous_ccnt, cpu);
+				per_cpu(previous_ccnt, cpu) = total_ccnt;
+			}
+			for (i = 0; i < NUM_L1_CTRS; i++) {
+				if (cnten_val & (1 << i)) {
+					/* Select */
+					write_sysreg(i, pmselr_el0);
+					isb();
+					/* Read value */
+					total_cnt = read_sysreg(pmxevcntr_el0);
+					delta_l1_cnts[i] = total_cnt -
+					  per_cpu(previous_l1_cnts[i], cpu);
+					per_cpu(previous_l1_cnts[i], cpu) =
+						total_cnt;
+				} else
+					delta_l1_cnts[i] = 0;
+			}
+
+			__entry->ctr0 = delta_l1_cnts[0];
+			__entry->ctr1 = delta_l1_cnts[1];
+			__entry->ctr2 = delta_l1_cnts[2];
+			__entry->ctr3 = delta_l1_cnts[3];
+			__entry->ctr4 = delta_l1_cnts[4];
+			__entry->ctr5 = delta_l1_cnts[5];
+		),
+
+		TP_printk("prev_pid=%d, next_pid=%d, CCNTR: %u, CTR0: %u, CTR1: %u, CTR2: %u, CTR3: %u, CTR4: %u, CTR5: %u",
+				__entry->old_pid, __entry->new_pid,
+				__entry->cctr,
+				__entry->ctr0, __entry->ctr1,
+				__entry->ctr2, __entry->ctr3,
+				__entry->ctr4, __entry->ctr5)
+);
+
+#endif
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../arch/arm64/kernel
+#define TRACE_INCLUDE_FILE perf_trace_counters
+#include <trace/define_trace.h>
diff --git a/arch/arm64/kernel/perf_trace_user.c b/arch/arm64/kernel/perf_trace_user.c
new file mode 100644
index 0000000..5a83cc5
--- /dev/null
+++ b/arch/arm64/kernel/perf_trace_user.c
@@ -0,0 +1,96 @@
+/* Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/perf_event.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/preempt.h>
+#include <linux/stat.h>
+#include <asm/uaccess.h>
+
+#define CREATE_TRACE_POINTS
+#include "perf_trace_user.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM perf_trace_counters
+
+#define TRACE_USER_MAX_BUF_SIZE 100
+
+static ssize_t perf_trace_write(struct file *file,
+				const char __user *user_string_in,
+				size_t len, loff_t *ppos)
+{
+	u32 cnten_val;
+	int rc;
+	char buf[TRACE_USER_MAX_BUF_SIZE + 1];
+	ssize_t length;
+
+	if (len == 0)
+		return 0;
+
+	length = len > TRACE_USER_MAX_BUF_SIZE ? TRACE_USER_MAX_BUF_SIZE : len;
+
+	rc = copy_from_user(buf, user_string_in, length);
+	if (rc) {
+		pr_err("%s copy_from_user failed, rc=%d\n", __func__, rc);
+		return length;
+	}
+
+	/* Remove any trailing newline and make sure string is terminated */
+	if (buf[length - 1] == '\n')
+		buf[length - 1] = '\0';
+	else
+		buf[length] = '\0';
+
+	/*
+	 * Disable preemption to ensure that all the performance counter
+	 * accesses happen on the same cpu
+	 */
+	preempt_disable();
+	/* stop counters, call the trace function, restart them */
+
+	cnten_val = read_sysreg(pmcntenset_el0);
+	/* Disable all the counters that were enabled */
+	write_sysreg(cnten_val, pmcntenclr_el0);
+
+	trace_perf_trace_user(buf, cnten_val);
+
+	/* Enable all the counters that were disabled */
+	write_sysreg(cnten_val, pmcntenset_el0);
+	preempt_enable();
+
+	return length;
+}
+
+static const struct file_operations perf_trace_fops = {
+	.write = perf_trace_write
+};
+
+static int __init init_perf_trace(void)
+{
+	struct dentry *dir;
+	struct dentry *file;
+	unsigned int value = 1;
+
+	dir = debugfs_create_dir("msm_perf", NULL);
+	if (!dir)
+		return -ENOMEM;
+	file = debugfs_create_file("trace_marker", 0220, dir,
+		&value, &perf_trace_fops);
+	if (!file)
+		return -ENOMEM;
+
+	return 0;
+}
+
+late_initcall(init_perf_trace);
diff --git a/arch/arm64/kernel/perf_trace_user.h b/arch/arm64/kernel/perf_trace_user.h
new file mode 100644
index 0000000..d592392
--- /dev/null
+++ b/arch/arm64/kernel/perf_trace_user.h
@@ -0,0 +1,84 @@
+/* Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#if !defined(_PERF_TRACE_USER_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _PERF_TRACE_USER_H_
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM perf_trace_counters
+
+#include <linux/tracepoint.h>
+
+#define CNTENSET_CC    0x80000000
+#define NUM_L1_CTRS             6
+
+TRACE_EVENT(perf_trace_user,
+	TP_PROTO(char *string, u32 cnten_val),
+	TP_ARGS(string, cnten_val),
+
+	TP_STRUCT__entry(
+		__field(u32, cctr)
+		__field(u32, ctr0)
+		__field(u32, ctr1)
+		__field(u32, ctr2)
+		__field(u32, ctr3)
+		__field(u32, ctr4)
+		__field(u32, ctr5)
+		__string(user_string, string)
+		),
+
+	TP_fast_assign(
+		u32 cnt;
+		u32 l1_cnts[NUM_L1_CTRS];
+		int i;
+
+		if (cnten_val & CNTENSET_CC) {
+			/* Read value */
+			cnt = read_sysreg(pmccntr_el0);
+			__entry->cctr = cnt;
+		} else
+			__entry->cctr = 0;
+		for (i = 0; i < NUM_L1_CTRS; i++) {
+			if (cnten_val & (1 << i)) {
+				/* Select */
+				write_sysreg(i, pmselr_el0);
+				isb();
+				/* Read value */
+				cnt = read_sysreg(pmxevcntr_el0);
+				l1_cnts[i] = cnt;
+			} else {
+				l1_cnts[i] = 0;
+			}
+		}
+
+		__entry->ctr0 = l1_cnts[0];
+		__entry->ctr1 = l1_cnts[1];
+		__entry->ctr2 = l1_cnts[2];
+		__entry->ctr3 = l1_cnts[3];
+		__entry->ctr4 = l1_cnts[4];
+		__entry->ctr5 = l1_cnts[5];
+		__assign_str(user_string, string);
+		),
+
+		TP_printk("CCNTR: %u, CTR0: %u, CTR1: %u, CTR2: %u, CTR3: %u, CTR4: %u, CTR5: %u, MSG=%s",
+				__entry->cctr,
+				__entry->ctr0, __entry->ctr1,
+				__entry->ctr2, __entry->ctr3,
+				__entry->ctr4, __entry->ctr5,
+				__get_str(user_string)
+			)
+	);
+
+#endif
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../arch/arm64/kernel
+#define TRACE_INCLUDE_FILE perf_trace_user
+#include <trace/define_trace.h>
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 8eb0d14..0c4a5ee 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -84,6 +84,16 @@
 	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
 }
 
+void arch_cpu_idle_enter(void)
+{
+	idle_notifier_call_chain(IDLE_START);
+}
+
+void arch_cpu_idle_exit(void)
+{
+	idle_notifier_call_chain(IDLE_END);
+}
+
 #ifdef CONFIG_HOTPLUG_CPU
 void arch_cpu_idle_dead(void)
 {
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 88e2f2b..55889d0 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -17,14 +17,62 @@
 
 #include <asm/kvm_hyp.h>
 
+static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
+{
+	u64 val;
+
+	/*
+	 * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
+	 * most TLB operations target EL2/EL0. In order to affect the
+	 * guest TLBs (EL1/EL0), we need to change one of these two
+	 * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
+	 * let's flip TGE before executing the TLB operation.
+	 */
+	write_sysreg(kvm->arch.vttbr, vttbr_el2);
+	val = read_sysreg(hcr_el2);
+	val &= ~HCR_TGE;
+	write_sysreg(val, hcr_el2);
+	isb();
+}
+
+static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
+{
+	write_sysreg(kvm->arch.vttbr, vttbr_el2);
+	isb();
+}
+
+static hyp_alternate_select(__tlb_switch_to_guest,
+			    __tlb_switch_to_guest_nvhe,
+			    __tlb_switch_to_guest_vhe,
+			    ARM64_HAS_VIRT_HOST_EXTN);
+
+static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
+{
+	/*
+	 * We're done with the TLB operation, let's restore the host's
+	 * view of HCR_EL2.
+	 */
+	write_sysreg(0, vttbr_el2);
+	write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+}
+
+static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
+{
+	write_sysreg(0, vttbr_el2);
+}
+
+static hyp_alternate_select(__tlb_switch_to_host,
+			    __tlb_switch_to_host_nvhe,
+			    __tlb_switch_to_host_vhe,
+			    ARM64_HAS_VIRT_HOST_EXTN);
+
 void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 {
 	dsb(ishst);
 
 	/* Switch to requested VMID */
 	kvm = kern_hyp_va(kvm);
-	write_sysreg(kvm->arch.vttbr, vttbr_el2);
-	isb();
+	__tlb_switch_to_guest()(kvm);
 
 	/*
 	 * We could do so much better if we had the VA as well.
@@ -45,7 +93,7 @@
 	dsb(ish);
 	isb();
 
-	write_sysreg(0, vttbr_el2);
+	__tlb_switch_to_host()(kvm);
 }
 
 void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
@@ -54,14 +102,13 @@
 
 	/* Switch to requested VMID */
 	kvm = kern_hyp_va(kvm);
-	write_sysreg(kvm->arch.vttbr, vttbr_el2);
-	isb();
+	__tlb_switch_to_guest()(kvm);
 
 	asm volatile("tlbi vmalls12e1is" : : );
 	dsb(ish);
 	isb();
 
-	write_sysreg(0, vttbr_el2);
+	__tlb_switch_to_host()(kvm);
 }
 
 void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
@@ -69,14 +116,13 @@
 	struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
 
 	/* Switch to requested VMID */
-	write_sysreg(kvm->arch.vttbr, vttbr_el2);
-	isb();
+	__tlb_switch_to_guest()(kvm);
 
 	asm volatile("tlbi vmalle1" : : );
 	dsb(nsh);
 	isb();
 
-	write_sysreg(0, vttbr_el2);
+	__tlb_switch_to_host()(kvm);
 }
 
 void __hyp_text __kvm_flush_vm_context(void)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index d8e6635..40e775a 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -976,7 +976,7 @@
 	 * device, and allocated the default domain for that group.
 	 */
 	if (!domain || iommu_dma_init_domain(domain, dma_base, size, dev)) {
-		pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
+		pr_debug("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
 			dev_name(dev));
 		return false;
 	}
@@ -1743,7 +1743,11 @@
 {
 	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
 	dma_addr_t dma_addr;
-	int ret, prot, len = PAGE_ALIGN(size + offset);
+	int ret, prot, len, start_offset, map_offset;
+
+	map_offset = offset & ~PAGE_MASK;
+	start_offset = offset & PAGE_MASK;
+	len = PAGE_ALIGN(map_offset + size);
 
 	dma_addr = __alloc_iova(mapping, len);
 	if (dma_addr == DMA_ERROR_CODE)
@@ -1753,12 +1757,12 @@
 	prot = __get_iommu_pgprot(attrs, prot,
 				  is_dma_coherent(dev, attrs));
 
-	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
-			prot);
+	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page) +
+			start_offset, len, prot);
 	if (ret < 0)
 		goto fail;
 
-	return dma_addr + offset;
+	return dma_addr + map_offset;
 fail:
 	__free_iova(mapping, dma_addr, len);
 	return DMA_ERROR_CODE;
@@ -1897,7 +1901,11 @@
 	if (!mapping)
 		goto err;
 
-	mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+	mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL | __GFP_NOWARN |
+							__GFP_NORETRY);
+	if (!mapping->bitmap)
+		mapping->bitmap = vzalloc(bitmap_size);
+
 	if (!mapping->bitmap)
 		goto err2;
 
@@ -1912,7 +1920,7 @@
 	kref_init(&mapping->kref);
 	return mapping;
 err3:
-	kfree(mapping->bitmap);
+	kvfree(mapping->bitmap);
 err2:
 	kfree(mapping);
 err:
@@ -1926,7 +1934,7 @@
 		container_of(kref, struct dma_iommu_mapping, kref);
 
 	iommu_domain_free(mapping->domain);
-	kfree(mapping->bitmap);
+	kvfree(mapping->bitmap);
 	kfree(mapping);
 }
 
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 05615a3..f70b433 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -30,6 +30,8 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/stop_machine.h>
+#include <linux/dma-contiguous.h>
+#include <linux/cma.h>
 
 #include <asm/barrier.h>
 #include <asm/cputype.h>
@@ -59,6 +61,40 @@
 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
 
+struct dma_contig_early_reserve {
+	phys_addr_t base;
+	unsigned long size;
+};
+
+static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS];
+static int dma_mmu_remap_num;
+
+void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
+{
+	if (dma_mmu_remap_num >= ARRAY_SIZE(dma_mmu_remap)) {
+		pr_err("ARM64: Not enough slots for DMA fixup reserved regions!\n");
+		return;
+	}
+	dma_mmu_remap[dma_mmu_remap_num].base = base;
+	dma_mmu_remap[dma_mmu_remap_num].size = size;
+	dma_mmu_remap_num++;
+}
+
+static bool dma_overlap(phys_addr_t start, phys_addr_t end)
+{
+	int i;
+
+	for (i = 0; i < dma_mmu_remap_num; i++) {
+		phys_addr_t dma_base = dma_mmu_remap[i].base;
+		phys_addr_t dma_end = dma_mmu_remap[i].base +
+			dma_mmu_remap[i].size;
+
+		if ((dma_base < end) && (dma_end > start))
+			return true;
+	}
+	return false;
+}
+
 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 			      unsigned long size, pgprot_t vma_prot)
 {
@@ -149,7 +185,8 @@
 		next = pmd_addr_end(addr, end);
 		/* try section mapping first */
 		if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
-		      allow_block_mappings) {
+		      allow_block_mappings &&
+		      !dma_overlap(phys, phys + next - addr)) {
 			pmd_t old_pmd =*pmd;
 			pmd_set_huge(pmd, phys, prot);
 			/*
@@ -209,7 +246,8 @@
 		/*
 		 * For 4K granule only, attempt to put down a 1GB block
 		 */
-		if (use_1G_block(addr, next, phys) && allow_block_mappings) {
+		if (use_1G_block(addr, next, phys) && allow_block_mappings &&
+		    !dma_overlap(phys, phys + next - addr)) {
 			pud_t old_pud = *pud;
 			pud_set_huge(pud, phys, prot);
 
diff --git a/arch/c6x/kernel/ptrace.c b/arch/c6x/kernel/ptrace.c
index 3c494e8..a511ac1 100644
--- a/arch/c6x/kernel/ptrace.c
+++ b/arch/c6x/kernel/ptrace.c
@@ -69,46 +69,6 @@
 				   0, sizeof(*regs));
 }
 
-static int gpr_set(struct task_struct *target,
-		   const struct user_regset *regset,
-		   unsigned int pos, unsigned int count,
-		   const void *kbuf, const void __user *ubuf)
-{
-	int ret;
-	struct pt_regs *regs = task_pt_regs(target);
-
-	/* Don't copyin TSR or CSR */
-	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-				 &regs,
-				 0, PT_TSR * sizeof(long));
-	if (ret)
-		return ret;
-
-	ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
-					PT_TSR * sizeof(long),
-					(PT_TSR + 1) * sizeof(long));
-	if (ret)
-		return ret;
-
-	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-				 &regs,
-				 (PT_TSR + 1) * sizeof(long),
-				 PT_CSR * sizeof(long));
-	if (ret)
-		return ret;
-
-	ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
-					PT_CSR * sizeof(long),
-					(PT_CSR + 1) * sizeof(long));
-	if (ret)
-		return ret;
-
-	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-				 &regs,
-				 (PT_CSR + 1) * sizeof(long), -1);
-	return ret;
-}
-
 enum c6x_regset {
 	REGSET_GPR,
 };
@@ -120,7 +80,6 @@
 		.size = sizeof(u32),
 		.align = sizeof(u32),
 		.get = gpr_get,
-		.set = gpr_set
 	},
 };
 
diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c
index 9207554..0dc1c8f 100644
--- a/arch/h8300/kernel/ptrace.c
+++ b/arch/h8300/kernel/ptrace.c
@@ -95,7 +95,8 @@
 	long *reg = (long *)&regs;
 
 	/* build user regs in buffer */
-	for (r = 0; r < ARRAY_SIZE(register_offset); r++)
+	BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0);
+	for (r = 0; r < sizeof(regs) / sizeof(long); r++)
 		*reg++ = h8300_get_reg(target, r);
 
 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
@@ -113,7 +114,8 @@
 	long *reg;
 
 	/* build user regs in buffer */
-	for (reg = (long *)&regs, r = 0; r < ARRAY_SIZE(register_offset); r++)
+	BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0);
+	for (reg = (long *)&regs, r = 0; r < sizeof(regs) / sizeof(long); r++)
 		*reg++ = h8300_get_reg(target, r);
 
 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
@@ -122,7 +124,7 @@
 		return ret;
 
 	/* write back to pt_regs */
-	for (reg = (long *)&regs, r = 0; r < ARRAY_SIZE(register_offset); r++)
+	for (reg = (long *)&regs, r = 0; r < sizeof(regs) / sizeof(long); r++)
 		h8300_put_reg(target, r, *reg++);
 	return 0;
 }
diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c
index 7563628..5e2dc7d 100644
--- a/arch/metag/kernel/ptrace.c
+++ b/arch/metag/kernel/ptrace.c
@@ -24,6 +24,16 @@
  * user_regset definitions.
  */
 
+static unsigned long user_txstatus(const struct pt_regs *regs)
+{
+	unsigned long data = (unsigned long)regs->ctx.Flags;
+
+	if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
+		data |= USER_GP_REGS_STATUS_CATCH_BIT;
+
+	return data;
+}
+
 int metag_gp_regs_copyout(const struct pt_regs *regs,
 			  unsigned int pos, unsigned int count,
 			  void *kbuf, void __user *ubuf)
@@ -62,9 +72,7 @@
 	if (ret)
 		goto out;
 	/* TXSTATUS */
-	data = (unsigned long)regs->ctx.Flags;
-	if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
-		data |= USER_GP_REGS_STATUS_CATCH_BIT;
+	data = user_txstatus(regs);
 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 				  &data, 4*25, 4*26);
 	if (ret)
@@ -119,6 +127,7 @@
 	if (ret)
 		goto out;
 	/* TXSTATUS */
+	data = user_txstatus(regs);
 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 				 &data, 4*25, 4*26);
 	if (ret)
@@ -244,6 +253,8 @@
 	unsigned long long *ptr;
 	int ret, i;
 
+	if (count < 4*13)
+		return -EINVAL;
 	/* Read the entire pipeline before making any changes */
 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 				 &rp, 0, 4*13);
@@ -303,7 +314,7 @@
 			const void *kbuf, const void __user *ubuf)
 {
 	int ret;
-	void __user *tls;
+	void __user *tls = target->thread.tls_ptr;
 
 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
 	if (ret)
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index a92994d..bf83dc1 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -485,7 +485,8 @@
 					  &target->thread.fpu,
 					  0, sizeof(elf_fpregset_t));
 
-	for (i = 0; i < NUM_FPU_REGS; i++) {
+	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
+	for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
 		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 					 &fpr_val, i * sizeof(elf_fpreg_t),
 					 (i + 1) * sizeof(elf_fpreg_t));
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 7bd69bd..1d8c24d 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -45,28 +45,9 @@
 
 #define flush_kernel_dcache_range(start,size) \
 	flush_kernel_dcache_range_asm((start), (start)+(size));
-/* vmap range flushes and invalidates.  Architecturally, we don't need
- * the invalidate, because the CPU should refuse to speculate once an
- * area has been flushed, so invalidate is left empty */
-static inline void flush_kernel_vmap_range(void *vaddr, int size)
-{
-	unsigned long start = (unsigned long)vaddr;
 
-	flush_kernel_dcache_range_asm(start, start + size);
-}
-static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
-{
-	unsigned long start = (unsigned long)vaddr;
-	void *cursor = vaddr;
-
-	for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
-		struct page *page = vmalloc_to_page(cursor);
-
-		if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
-			flush_kernel_dcache_page(page);
-	}
-	flush_kernel_dcache_range_asm(start, start + size);
-}
+void flush_kernel_vmap_range(void *vaddr, int size);
+void invalidate_kernel_vmap_range(void *vaddr, int size);
 
 #define flush_cache_vmap(start, end)		flush_cache_all()
 #define flush_cache_vunmap(start, end)		flush_cache_all()
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 977f0a4f..53ec75f 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -633,3 +633,25 @@
 		__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
 	}
 }
+
+void flush_kernel_vmap_range(void *vaddr, int size)
+{
+	unsigned long start = (unsigned long)vaddr;
+
+	if ((unsigned long)size > parisc_cache_flush_threshold)
+		flush_data_cache();
+	else
+		flush_kernel_dcache_range_asm(start, start + size);
+}
+EXPORT_SYMBOL(flush_kernel_vmap_range);
+
+void invalidate_kernel_vmap_range(void *vaddr, int size)
+{
+	unsigned long start = (unsigned long)vaddr;
+
+	if ((unsigned long)size > parisc_cache_flush_threshold)
+		flush_data_cache();
+	else
+		flush_kernel_dcache_range_asm(start, start + size);
+}
+EXPORT_SYMBOL(invalidate_kernel_vmap_range);
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 4063943..e81afc37 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -139,6 +139,8 @@
 
 	printk(KERN_EMERG "System shut down completed.\n"
 	       "Please power this system off now.");
+
+	for (;;);
 }
 
 void (*pm_power_off)(void) = machine_power_off;
diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S
index 861e721..f080abf 100644
--- a/arch/powerpc/boot/zImage.lds.S
+++ b/arch/powerpc/boot/zImage.lds.S
@@ -68,6 +68,7 @@
   }
 
 #ifdef CONFIG_PPC64_BOOT_WRAPPER
+  . = ALIGN(256);
   .got :
   {
     __toc_start = .;
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
index 9fa046d..4119945 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -52,7 +52,7 @@
 {
 	u32 *key = crypto_tfm_ctx(tfm);
 
-	*key = 0;
+	*key = ~0;
 
 	return 0;
 }
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 5c45114..b9e3f0a 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -19,16 +19,18 @@
 struct mm_iommu_table_group_mem_t;
 
 extern int isolate_lru_page(struct page *page);	/* from internal.h */
-extern bool mm_iommu_preregistered(void);
-extern long mm_iommu_get(unsigned long ua, unsigned long entries,
+extern bool mm_iommu_preregistered(struct mm_struct *mm);
+extern long mm_iommu_get(struct mm_struct *mm,
+		unsigned long ua, unsigned long entries,
 		struct mm_iommu_table_group_mem_t **pmem);
-extern long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem);
-extern void mm_iommu_init(mm_context_t *ctx);
-extern void mm_iommu_cleanup(mm_context_t *ctx);
-extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
-		unsigned long size);
-extern struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua,
-		unsigned long entries);
+extern long mm_iommu_put(struct mm_struct *mm,
+		struct mm_iommu_table_group_mem_t *mem);
+extern void mm_iommu_init(struct mm_struct *mm);
+extern void mm_iommu_cleanup(struct mm_struct *mm);
+extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
+		unsigned long ua, unsigned long size);
+extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
+		unsigned long ua, unsigned long entries);
 extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
 		unsigned long ua, unsigned long *hpa);
 extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 72dac0b..b350ac5 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -439,9 +439,23 @@
 _GLOBAL(pnv_wakeup_tb_loss)
 	ld	r1,PACAR1(r13)
 	/*
-	 * Before entering any idle state, the NVGPRs are saved in the stack
-	 * and they are restored before switching to the process context. Hence
-	 * until they are restored, they are free to be used.
+	 * Before entering any idle state, the NVGPRs are saved in the stack.
+	 * If there was a state loss, or PACA_NAPSTATELOST was set, then the
+	 * NVGPRs are restored. If we are here, it is likely that state is lost,
+	 * but not guaranteed -- neither ISA207 nor ISA300 tests to reach
+	 * here are the same as the test to restore NVGPRS:
+	 * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300,
+	 * and SRR1 test for restoring NVGPRs.
+	 *
+	 * We are about to clobber NVGPRs now, so set NAPSTATELOST to
+	 * guarantee they will always be restored. This might be tightened
+	 * with careful reading of specs (particularly for ISA300) but this
+	 * is already a slow wakeup path and it's simpler to be safe.
+	 */
+	li	r0,1
+	stb	r0,PACA_NAPSTATELOST(r13)
+
+	/*
 	 *
 	 * Save SRR1 and LR in NVGPRs as they might be clobbered in
 	 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 270ee30..f516ac5 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -915,7 +915,7 @@
 	init_mm.context.pte_frag = NULL;
 #endif
 #ifdef CONFIG_SPAPR_TCE_IOMMU
-	mm_iommu_init(&init_mm.context);
+	mm_iommu_init(&init_mm);
 #endif
 	irqstack_early_init();
 	exc_lvl_early_init();
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index b114f8b..73bf6e1 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -115,7 +115,7 @@
 	mm->context.pte_frag = NULL;
 #endif
 #ifdef CONFIG_SPAPR_TCE_IOMMU
-	mm_iommu_init(&mm->context);
+	mm_iommu_init(mm);
 #endif
 	return 0;
 }
@@ -156,13 +156,11 @@
 }
 #endif
 
-
 void destroy_context(struct mm_struct *mm)
 {
 #ifdef CONFIG_SPAPR_TCE_IOMMU
-	mm_iommu_cleanup(&mm->context);
+	WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
 #endif
-
 #ifdef CONFIG_PPC_ICSWX
 	drop_cop(mm->context.acop, mm);
 	kfree(mm->context.cop_lockp);
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
index e0f1c33..7de7124 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -56,7 +56,7 @@
 	}
 
 	pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
-			current->pid,
+			current ? current->pid : 0,
 			incr ? '+' : '-',
 			npages << PAGE_SHIFT,
 			mm->locked_vm << PAGE_SHIFT,
@@ -66,12 +66,9 @@
 	return ret;
 }
 
-bool mm_iommu_preregistered(void)
+bool mm_iommu_preregistered(struct mm_struct *mm)
 {
-	if (!current || !current->mm)
-		return false;
-
-	return !list_empty(&current->mm->context.iommu_group_mem_list);
+	return !list_empty(&mm->context.iommu_group_mem_list);
 }
 EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
 
@@ -124,19 +121,16 @@
 	return 0;
 }
 
-long mm_iommu_get(unsigned long ua, unsigned long entries,
+long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
 		struct mm_iommu_table_group_mem_t **pmem)
 {
 	struct mm_iommu_table_group_mem_t *mem;
 	long i, j, ret = 0, locked_entries = 0;
 	struct page *page = NULL;
 
-	if (!current || !current->mm)
-		return -ESRCH; /* process exited */
-
 	mutex_lock(&mem_list_mutex);
 
-	list_for_each_entry_rcu(mem, &current->mm->context.iommu_group_mem_list,
+	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
 			next) {
 		if ((mem->ua == ua) && (mem->entries == entries)) {
 			++mem->used;
@@ -154,7 +148,7 @@
 
 	}
 
-	ret = mm_iommu_adjust_locked_vm(current->mm, entries, true);
+	ret = mm_iommu_adjust_locked_vm(mm, entries, true);
 	if (ret)
 		goto unlock_exit;
 
@@ -190,7 +184,7 @@
 		 * of the CMA zone if possible. NOTE: faulting in + migration
 		 * can be expensive. Batching can be considered later
 		 */
-		if (get_pageblock_migratetype(page) == MIGRATE_CMA) {
+		if (is_migrate_cma_page(page)) {
 			if (mm_iommu_move_page_from_cma(page))
 				goto populate;
 			if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
@@ -215,11 +209,11 @@
 	mem->entries = entries;
 	*pmem = mem;
 
-	list_add_rcu(&mem->next, &current->mm->context.iommu_group_mem_list);
+	list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
 
 unlock_exit:
 	if (locked_entries && ret)
-		mm_iommu_adjust_locked_vm(current->mm, locked_entries, false);
+		mm_iommu_adjust_locked_vm(mm, locked_entries, false);
 
 	mutex_unlock(&mem_list_mutex);
 
@@ -264,17 +258,13 @@
 static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
 {
 	list_del_rcu(&mem->next);
-	mm_iommu_adjust_locked_vm(current->mm, mem->entries, false);
 	call_rcu(&mem->rcu, mm_iommu_free);
 }
 
-long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem)
+long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
 {
 	long ret = 0;
 
-	if (!current || !current->mm)
-		return -ESRCH; /* process exited */
-
 	mutex_lock(&mem_list_mutex);
 
 	if (mem->used == 0) {
@@ -297,6 +287,8 @@
 	/* @mapped became 0 so now mappings are disabled, release the region */
 	mm_iommu_release(mem);
 
+	mm_iommu_adjust_locked_vm(mm, mem->entries, false);
+
 unlock_exit:
 	mutex_unlock(&mem_list_mutex);
 
@@ -304,14 +296,12 @@
 }
 EXPORT_SYMBOL_GPL(mm_iommu_put);
 
-struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
-		unsigned long size)
+struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
+		unsigned long ua, unsigned long size)
 {
 	struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
 
-	list_for_each_entry_rcu(mem,
-			&current->mm->context.iommu_group_mem_list,
-			next) {
+	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
 		if ((mem->ua <= ua) &&
 				(ua + size <= mem->ua +
 				 (mem->entries << PAGE_SHIFT))) {
@@ -324,14 +314,12 @@
 }
 EXPORT_SYMBOL_GPL(mm_iommu_lookup);
 
-struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua,
-		unsigned long entries)
+struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
+		unsigned long ua, unsigned long entries)
 {
 	struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
 
-	list_for_each_entry_rcu(mem,
-			&current->mm->context.iommu_group_mem_list,
-			next) {
+	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
 		if ((mem->ua == ua) && (mem->entries == entries)) {
 			ret = mem;
 			break;
@@ -373,17 +361,7 @@
 }
 EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);
 
-void mm_iommu_init(mm_context_t *ctx)
+void mm_iommu_init(struct mm_struct *mm)
 {
-	INIT_LIST_HEAD_RCU(&ctx->iommu_group_mem_list);
-}
-
-void mm_iommu_cleanup(mm_context_t *ctx)
-{
-	struct mm_iommu_table_group_mem_t *mem, *tmp;
-
-	list_for_each_entry_safe(mem, tmp, &ctx->iommu_group_mem_list, next) {
-		list_del_rcu(&mem->next);
-		mm_iommu_do_free(mem);
-	}
+	INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);
 }
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index ac082dd..7037ca3 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -313,7 +313,7 @@
 	}
 
 	if (!ret) {
-		unsigned long y;
+		unsigned long y = regs->y;
 
 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 					 &y,
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 7fe88bb..38623e2 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2096,8 +2096,8 @@
 
 static void refresh_pce(void *ignored)
 {
-	if (current->mm)
-		load_mm_cr4(current->mm);
+	if (current->active_mm)
+		load_mm_cr4(current->active_mm);
 }
 
 static void x86_pmu_event_mapped(struct perf_event *event)
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
index c2b8d24..6226cb0e 100644
--- a/arch/x86/include/asm/kvm_page_track.h
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -35,6 +35,7 @@
 };
 
 void kvm_page_track_init(struct kvm *kvm);
+void kvm_page_track_cleanup(struct kvm *kvm);
 
 void kvm_page_track_free_memslot(struct kvm_memory_slot *free,
 				 struct kvm_memory_slot *dont);
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 8f44c5a..f228f74 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -31,6 +31,7 @@
 #include <asm/apic.h>
 #include <asm/timer.h>
 #include <asm/reboot.h>
+#include <asm/nmi.h>
 
 struct ms_hyperv_info ms_hyperv;
 EXPORT_SYMBOL_GPL(ms_hyperv);
@@ -158,6 +159,26 @@
 	return 0;
 }
 
+#ifdef CONFIG_X86_LOCAL_APIC
+/*
+ * Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes
+ * it dificult to process CHANNELMSG_UNLOAD in case of crash. Handle
+ * unknown NMI on the first CPU which gets it.
+ */
+static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
+{
+	static atomic_t nmi_cpu = ATOMIC_INIT(-1);
+
+	if (!unknown_nmi_panic)
+		return NMI_DONE;
+
+	if (atomic_cmpxchg(&nmi_cpu, -1, raw_smp_processor_id()) != -1)
+		return NMI_HANDLED;
+
+	return NMI_DONE;
+}
+#endif
+
 static void __init ms_hyperv_init_platform(void)
 {
 	/*
@@ -183,6 +204,9 @@
 		pr_info("HyperV: LAPIC Timer Frequency: %#x\n",
 			lapic_timer_frequency);
 	}
+
+	register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST,
+			     "hv_nmi_unknown");
 #endif
 
 	if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 54a2372..b5785c1 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -4,6 +4,7 @@
  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  */
 
+#define DISABLE_BRANCH_PROFILING
 #include <linux/init.h>
 #include <linux/linkage.h>
 #include <linux/types.h>
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 46b2f41..eea88fe 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1287,6 +1287,8 @@
 	 * exporting a reliable TSC.
 	 */
 	if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
+		if (boot_cpu_has(X86_FEATURE_ART))
+			art_related_clocksource = &clocksource_tsc;
 		clocksource_register_khz(&clocksource_tsc, tsc_khz);
 		return 0;
 	}
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
index b431539..85024e0 100644
--- a/arch/x86/kvm/page_track.c
+++ b/arch/x86/kvm/page_track.c
@@ -156,6 +156,14 @@
 	return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
 }
 
+void kvm_page_track_cleanup(struct kvm *kvm)
+{
+	struct kvm_page_track_notifier_head *head;
+
+	head = &kvm->arch.track_notifier_head;
+	cleanup_srcu_struct(&head->track_srcu);
+}
+
 void kvm_page_track_init(struct kvm *kvm)
 {
 	struct kvm_page_track_notifier_head *head;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 731044e..e5bc139 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7976,6 +7976,7 @@
 	kvm_free_vcpus(kvm);
 	kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
 	kvm_mmu_uninit_vm(kvm);
+	kvm_page_track_cleanup(kvm);
 }
 
 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 0493c17..333362f 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -1,3 +1,4 @@
+#define DISABLE_BRANCH_PROFILING
 #define pr_fmt(fmt) "kasan: " fmt
 #include <linux/bootmem.h>
 #include <linux/kasan.h>
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index bedfab9..a00a6c0 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -234,23 +234,14 @@
 		return 1;
 
 	for_each_pci_msi_entry(msidesc, dev) {
-		__pci_read_msi_msg(msidesc, &msg);
-		pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
-			((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff);
-		if (msg.data != XEN_PIRQ_MSI_DATA ||
-		    xen_irq_from_pirq(pirq) < 0) {
-			pirq = xen_allocate_pirq_msi(dev, msidesc);
-			if (pirq < 0) {
-				irq = -ENODEV;
-				goto error;
-			}
-			xen_msi_compose_msg(dev, pirq, &msg);
-			__pci_write_msi_msg(msidesc, &msg);
-			dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
-		} else {
-			dev_dbg(&dev->dev,
-				"xen: msi already bound to pirq=%d\n", pirq);
+		pirq = xen_allocate_pirq_msi(dev, msidesc);
+		if (pirq < 0) {
+			irq = -ENODEV;
+			goto error;
 		}
+		xen_msi_compose_msg(dev, pirq, &msg);
+		__pci_write_msi_msg(msidesc, &msg);
+		dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
 		irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
 					       (type == PCI_CAP_ID_MSI) ? nvec : 1,
 					       (type == PCI_CAP_ID_MSIX) ?
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 81caceb..ee54ad0 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -629,17 +629,8 @@
 {
 	struct blk_mq_timeout_data *data = priv;
 
-	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
-		/*
-		 * If a request wasn't started before the queue was
-		 * marked dying, kill it here or it'll go unnoticed.
-		 */
-		if (unlikely(blk_queue_dying(rq->q))) {
-			rq->errors = -EIO;
-			blk_mq_end_request(rq, rq->errors);
-		}
+	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
 		return;
-	}
 
 	if (time_after_eq(jiffies, rq->deadline)) {
 		if (!blk_mark_rq_complete(rq))
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 0774799..c6fee74 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -182,6 +182,9 @@
 	__set_bit(WRITE_16, filter->write_ok);
 	__set_bit(WRITE_LONG, filter->write_ok);
 	__set_bit(WRITE_LONG_2, filter->write_ok);
+	__set_bit(WRITE_SAME, filter->write_ok);
+	__set_bit(WRITE_SAME_16, filter->write_ok);
+	__set_bit(WRITE_SAME_32, filter->write_ok);
 	__set_bit(ERASE, filter->write_ok);
 	__set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
 	__set_bit(MODE_SELECT, filter->write_ok);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index d19b09c..54fc90e 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -245,7 +245,7 @@
 	struct alg_sock *ask = alg_sk(sk);
 	struct hash_ctx *ctx = ask->private;
 	struct ahash_request *req = &ctx->req;
-	char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req))];
+	char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req)) ? : 1];
 	struct sock *sk2;
 	struct alg_sock *ask2;
 	struct hash_ctx *ctx2;
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index bdc67ba..4421f7c 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -160,6 +160,34 @@
 		      DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343"),
 		},
 	},
+	{
+	 .callback = dmi_enable_rev_override,
+	 .ident = "DELL Precision 5520",
+	 .matches = {
+		      DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+		      DMI_MATCH(DMI_PRODUCT_NAME, "Precision 5520"),
+		},
+	},
+	{
+	 .callback = dmi_enable_rev_override,
+	 .ident = "DELL Precision 3520",
+	 .matches = {
+		      DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+		      DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3520"),
+		},
+	},
+	/*
+	 * Resolves a quirk with the Dell Latitude 3350 that
+	 * causes the ethernet adapter to not function.
+	 */
+	{
+	 .callback = dmi_enable_rev_override,
+	 .ident = "DELL Latitude 3350",
+	 .matches = {
+		      DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+		      DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 3350"),
+		},
+	},
 #endif
 	{}
 };
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 1bd8401..d7eb419 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -406,6 +406,7 @@
 	struct files_struct *files = proc->files;
 	unsigned long rlim_cur;
 	unsigned long irqs;
+	int ret;
 
 	if (files == NULL)
 		return -ESRCH;
@@ -416,7 +417,11 @@
 	rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
 	unlock_task_sighand(proc->tsk, &irqs);
 
-	return __alloc_fd(files, 0, rlim_cur, flags);
+	preempt_enable_no_resched();
+	ret = __alloc_fd(files, 0, rlim_cur, flags);
+	preempt_disable();
+
+	return ret;
 }
 
 /*
@@ -425,8 +430,11 @@
 static void task_fd_install(
 	struct binder_proc *proc, unsigned int fd, struct file *file)
 {
-	if (proc->files)
+	if (proc->files) {
+		preempt_enable_no_resched();
 		__fd_install(proc->files, fd, file);
+		preempt_disable();
+	}
 }
 
 /*
@@ -454,6 +462,7 @@
 {
 	trace_binder_lock(tag);
 	mutex_lock(&binder_main_lock);
+	preempt_disable();
 	trace_binder_locked(tag);
 }
 
@@ -461,8 +470,62 @@
 {
 	trace_binder_unlock(tag);
 	mutex_unlock(&binder_main_lock);
+	preempt_enable();
 }
 
+static inline void *kzalloc_preempt_disabled(size_t size)
+{
+	void *ptr;
+
+	ptr = kzalloc(size, GFP_NOWAIT);
+	if (ptr)
+		return ptr;
+
+	preempt_enable_no_resched();
+	ptr = kzalloc(size, GFP_KERNEL);
+	preempt_disable();
+
+	return ptr;
+}
+
+static inline long copy_to_user_preempt_disabled(void __user *to, const void *from, long n)
+{
+	long ret;
+
+	preempt_enable_no_resched();
+	ret = copy_to_user(to, from, n);
+	preempt_disable();
+	return ret;
+}
+
+static inline long copy_from_user_preempt_disabled(void *to, const void __user *from, long n)
+{
+	long ret;
+
+	preempt_enable_no_resched();
+	ret = copy_from_user(to, from, n);
+	preempt_disable();
+	return ret;
+}
+
+#define get_user_preempt_disabled(x, ptr)	\
+({						\
+	int __ret;				\
+	preempt_enable_no_resched();		\
+	__ret = get_user(x, ptr);		\
+	preempt_disable();			\
+	__ret;					\
+})
+
+#define put_user_preempt_disabled(x, ptr)	\
+({						\
+	int __ret;				\
+	preempt_enable_no_resched();		\
+	__ret = put_user(x, ptr);		\
+	preempt_disable();			\
+	__ret;					\
+})
+
 static void binder_set_nice(long nice)
 {
 	long min_nice;
@@ -595,6 +658,8 @@
 	else
 		mm = get_task_mm(proc->tsk);
 
+	preempt_enable_no_resched();
+
 	if (mm) {
 		down_write(&mm->mmap_sem);
 		vma = proc->vma;
@@ -649,6 +714,9 @@
 		up_write(&mm->mmap_sem);
 		mmput(mm);
 	}
+
+	preempt_disable();
+
 	return 0;
 
 free_range:
@@ -671,6 +739,9 @@
 		up_write(&mm->mmap_sem);
 		mmput(mm);
 	}
+
+	preempt_disable();
+	
 	return -ENOMEM;
 }
 
@@ -939,7 +1010,7 @@
 			return NULL;
 	}
 
-	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	node = kzalloc_preempt_disabled(sizeof(*node));
 	if (node == NULL)
 		return NULL;
 	binder_stats_created(BINDER_STAT_NODE);
@@ -1083,7 +1154,7 @@
 		else
 			return ref;
 	}
-	new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+	new_ref = kzalloc_preempt_disabled(sizeof(*ref));
 	if (new_ref == NULL)
 		return NULL;
 	binder_stats_created(BINDER_STAT_REF);
@@ -1955,14 +2026,14 @@
 	e->to_proc = target_proc->pid;
 
 	/* TODO: reuse incoming transaction for reply */
-	t = kzalloc(sizeof(*t), GFP_KERNEL);
+	t = kzalloc_preempt_disabled(sizeof(*t));
 	if (t == NULL) {
 		return_error = BR_FAILED_REPLY;
 		goto err_alloc_t_failed;
 	}
 	binder_stats_created(BINDER_STAT_TRANSACTION);
 
-	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
+	tcomplete = kzalloc_preempt_disabled(sizeof(*tcomplete));
 	if (tcomplete == NULL) {
 		return_error = BR_FAILED_REPLY;
 		goto err_alloc_tcomplete_failed;
@@ -2023,14 +2094,14 @@
 				      ALIGN(tr->data_size, sizeof(void *)));
 	offp = off_start;
 
-	if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
+	if (copy_from_user_preempt_disabled(t->buffer->data, (const void __user *)(uintptr_t)
 			   tr->data.ptr.buffer, tr->data_size)) {
 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
 				proc->pid, thread->pid);
 		return_error = BR_FAILED_REPLY;
 		goto err_copy_data_failed;
 	}
-	if (copy_from_user(offp, (const void __user *)(uintptr_t)
+	if (copy_from_user_preempt_disabled(offp, (const void __user *)(uintptr_t)
 			   tr->data.ptr.offsets, tr->offsets_size)) {
 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
 				proc->pid, thread->pid);
@@ -2148,9 +2219,10 @@
 				return_error = BR_FAILED_REPLY;
 				goto err_bad_offset;
 			}
-			if (copy_from_user(sg_bufp,
-					   (const void __user *)(uintptr_t)
-					   bp->buffer, bp->length)) {
+			if (copy_from_user_preempt_disabled(
+					sg_bufp,
+					(const void __user *)(uintptr_t)
+					bp->buffer, bp->length)) {
 				binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
 						  proc->pid, thread->pid);
 				return_error = BR_FAILED_REPLY;
@@ -2257,7 +2329,7 @@
 	void __user *end = buffer + size;
 
 	while (ptr < end && thread->return_error == BR_OK) {
-		if (get_user(cmd, (uint32_t __user *)ptr))
+		if (get_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
 			return -EFAULT;
 		ptr += sizeof(uint32_t);
 		trace_binder_command(cmd);
@@ -2275,7 +2347,7 @@
 			struct binder_ref *ref;
 			const char *debug_string;
 
-			if (get_user(target, (uint32_t __user *)ptr))
+			if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(uint32_t);
 			if (target == 0 && context->binder_context_mgr_node &&
@@ -2327,10 +2399,10 @@
 			binder_uintptr_t cookie;
 			struct binder_node *node;
 
-			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
+			if (get_user_preempt_disabled(node_ptr, (binder_uintptr_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(binder_uintptr_t);
-			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+			if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(binder_uintptr_t);
 			node = binder_get_node(proc, node_ptr);
@@ -2388,7 +2460,7 @@
 			binder_uintptr_t data_ptr;
 			struct binder_buffer *buffer;
 
-			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
+			if (get_user_preempt_disabled(data_ptr, (binder_uintptr_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(binder_uintptr_t);
 
@@ -2430,7 +2502,8 @@
 		case BC_REPLY_SG: {
 			struct binder_transaction_data_sg tr;
 
-			if (copy_from_user(&tr, ptr, sizeof(tr)))
+			if (copy_from_user_preempt_disabled(&tr, ptr,
+							    sizeof(tr)))
 				return -EFAULT;
 			ptr += sizeof(tr);
 			binder_transaction(proc, thread, &tr.transaction_data,
@@ -2441,7 +2514,7 @@
 		case BC_REPLY: {
 			struct binder_transaction_data tr;
 
-			if (copy_from_user(&tr, ptr, sizeof(tr)))
+			if (copy_from_user_preempt_disabled(&tr, ptr, sizeof(tr)))
 				return -EFAULT;
 			ptr += sizeof(tr);
 			binder_transaction(proc, thread, &tr,
@@ -2492,10 +2565,10 @@
 			struct binder_ref *ref;
 			struct binder_ref_death *death;
 
-			if (get_user(target, (uint32_t __user *)ptr))
+			if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(uint32_t);
-			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+			if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(binder_uintptr_t);
 			ref = binder_get_ref(proc, target, false);
@@ -2524,7 +2597,7 @@
 						proc->pid, thread->pid);
 					break;
 				}
-				death = kzalloc(sizeof(*death), GFP_KERNEL);
+				death = kzalloc_preempt_disabled(sizeof(*death));
 				if (death == NULL) {
 					thread->return_error = BR_ERROR;
 					binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
@@ -2578,8 +2651,7 @@
 			struct binder_work *w;
 			binder_uintptr_t cookie;
 			struct binder_ref_death *death = NULL;
-
-			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+			if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
 				return -EFAULT;
 
 			ptr += sizeof(cookie);
@@ -2611,7 +2683,8 @@
 					wake_up_interruptible(&proc->wait);
 				}
 			}
-		} break;
+		}
+		break;
 
 		default:
 			pr_err("%d:%d unknown command %d\n",
@@ -2660,7 +2733,7 @@
 	int wait_for_proc_work;
 
 	if (*consumed == 0) {
-		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
+		if (put_user_preempt_disabled(BR_NOOP, (uint32_t __user *)ptr))
 			return -EFAULT;
 		ptr += sizeof(uint32_t);
 	}
@@ -2671,7 +2744,7 @@
 
 	if (thread->return_error != BR_OK && ptr < end) {
 		if (thread->return_error2 != BR_OK) {
-			if (put_user(thread->return_error2, (uint32_t __user *)ptr))
+			if (put_user_preempt_disabled(thread->return_error2, (uint32_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(uint32_t);
 			binder_stat_br(proc, thread, thread->return_error2);
@@ -2679,7 +2752,7 @@
 				goto done;
 			thread->return_error2 = BR_OK;
 		}
-		if (put_user(thread->return_error, (uint32_t __user *)ptr))
+		if (put_user_preempt_disabled(thread->return_error, (uint32_t __user *)ptr))
 			return -EFAULT;
 		ptr += sizeof(uint32_t);
 		binder_stat_br(proc, thread, thread->return_error);
@@ -2757,7 +2830,7 @@
 		} break;
 		case BINDER_WORK_TRANSACTION_COMPLETE: {
 			cmd = BR_TRANSACTION_COMPLETE;
-			if (put_user(cmd, (uint32_t __user *)ptr))
+			if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
 				return -EFAULT;
 			ptr += sizeof(uint32_t);
 
@@ -2799,14 +2872,14 @@
 				node->has_weak_ref = 0;
 			}
 			if (cmd != BR_NOOP) {
-				if (put_user(cmd, (uint32_t __user *)ptr))
+				if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
 					return -EFAULT;
 				ptr += sizeof(uint32_t);
-				if (put_user(node->ptr,
+				if (put_user_preempt_disabled(node->ptr, (binder_uintptr_t __user *)
 					     (binder_uintptr_t __user *)ptr))
 					return -EFAULT;
 				ptr += sizeof(binder_uintptr_t);
-				if (put_user(node->cookie,
+				if (put_user_preempt_disabled(node->cookie, (binder_uintptr_t __user *)
 					     (binder_uintptr_t __user *)ptr))
 					return -EFAULT;
 				ptr += sizeof(binder_uintptr_t);
@@ -2850,11 +2923,10 @@
 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
 			else
 				cmd = BR_DEAD_BINDER;
-			if (put_user(cmd, (uint32_t __user *)ptr))
+			if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
 				return -EFAULT;
 			ptr += sizeof(uint32_t);
-			if (put_user(death->cookie,
-				     (binder_uintptr_t __user *)ptr))
+			if (put_user_preempt_disabled(death->cookie, (binder_uintptr_t __user *) ptr))
 				return -EFAULT;
 			ptr += sizeof(binder_uintptr_t);
 			binder_stat_br(proc, thread, cmd);
@@ -2921,10 +2993,10 @@
 					ALIGN(t->buffer->data_size,
 					    sizeof(void *));
 
-		if (put_user(cmd, (uint32_t __user *)ptr))
+		if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
 			return -EFAULT;
 		ptr += sizeof(uint32_t);
-		if (copy_to_user(ptr, &tr, sizeof(tr)))
+		if (copy_to_user_preempt_disabled(ptr, &tr, sizeof(tr)))
 			return -EFAULT;
 		ptr += sizeof(tr);
 
@@ -2966,7 +3038,7 @@
 		binder_debug(BINDER_DEBUG_THREADS,
 			     "%d:%d BR_SPAWN_LOOPER\n",
 			     proc->pid, thread->pid);
-		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
+		if (put_user_preempt_disabled(BR_SPAWN_LOOPER, (uint32_t __user *) buffer))
 			return -EFAULT;
 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
 	}
@@ -3041,7 +3113,7 @@
 			break;
 	}
 	if (*p == NULL) {
-		thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+		thread = kzalloc_preempt_disabled(sizeof(*thread));
 		if (thread == NULL)
 			return NULL;
 		binder_stats_created(BINDER_STAT_THREAD);
@@ -3145,7 +3217,7 @@
 		ret = -EINVAL;
 		goto out;
 	}
-	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
+	if (copy_from_user_preempt_disabled(&bwr, ubuf, sizeof(bwr))) {
 		ret = -EFAULT;
 		goto out;
 	}
@@ -3163,7 +3235,7 @@
 		trace_binder_write_done(ret);
 		if (ret < 0) {
 			bwr.read_consumed = 0;
-			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+			if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
 				ret = -EFAULT;
 			goto out;
 		}
@@ -3177,7 +3249,7 @@
 		if (!list_empty(&proc->todo))
 			wake_up_interruptible(&proc->wait);
 		if (ret < 0) {
-			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+			if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
 				ret = -EFAULT;
 			goto out;
 		}
@@ -3187,7 +3259,7 @@
 		     proc->pid, thread->pid,
 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
-	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
+	if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr))) {
 		ret = -EFAULT;
 		goto out;
 	}
@@ -3271,7 +3343,7 @@
 			goto err;
 		break;
 	case BINDER_SET_MAX_THREADS:
-		if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+		if (copy_from_user_preempt_disabled(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
 			ret = -EINVAL;
 			goto err;
 		}
@@ -3294,9 +3366,8 @@
 			ret = -EINVAL;
 			goto err;
 		}
-		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
-			     &ver->protocol_version)) {
-			ret = -EINVAL;
+			if (put_user_preempt_disabled(BINDER_CURRENT_PROTOCOL_VERSION, &ver->protocol_version)) {
+				ret = -EINVAL;
 			goto err;
 		}
 		break;
@@ -3357,12 +3428,13 @@
 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
 {
 	int ret;
+
 	struct vm_struct *area;
 	struct binder_proc *proc = filp->private_data;
 	const char *failure_string;
 	struct binder_buffer *buffer;
 
-	if (proc->tsk != current)
+	if (proc->tsk != current->group_leader)
 		return -EINVAL;
 
 	if ((vma->vm_end - vma->vm_start) > SZ_4M)
@@ -3417,7 +3489,11 @@
 	vma->vm_ops = &binder_vm_ops;
 	vma->vm_private_data = proc;
 
-	if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
+	/* binder_update_page_range assumes preemption is disabled */
+	preempt_disable();
+	ret = binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma);
+	preempt_enable_no_resched();
+	if (ret) {
 		ret = -ENOMEM;
 		failure_string = "alloc small buf";
 		goto err_alloc_small_buf_failed;
@@ -3464,9 +3540,9 @@
 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
 	if (proc == NULL)
 		return -ENOMEM;
-	get_task_struct(current);
-	proc->tsk = current;
-	proc->vma_vm_mm = current->mm;
+	get_task_struct(current->group_leader);
+	proc->tsk = current->group_leader;
+	proc->vma_vm_mm = current->group_leader->mm;
 	INIT_LIST_HEAD(&proc->todo);
 	init_waitqueue_head(&proc->wait);
 	proc->default_priority = task_nice(current);
@@ -3703,8 +3779,12 @@
 	int defer;
 
 	do {
-		binder_lock(__func__);
+		trace_binder_lock(__func__);
+		mutex_lock(&binder_main_lock);
+		trace_binder_locked(__func__);
+
 		mutex_lock(&binder_deferred_lock);
+		preempt_disable();
 		if (!hlist_empty(&binder_deferred_list)) {
 			proc = hlist_entry(binder_deferred_list.first,
 					struct binder_proc, deferred_work_node);
@@ -3730,7 +3810,9 @@
 		if (defer & BINDER_DEFERRED_RELEASE)
 			binder_deferred_release(proc); /* frees proc */
 
-		binder_unlock(__func__);
+		trace_binder_unlock(__func__);
+		mutex_unlock(&binder_main_lock);
+		preempt_enable_no_resched();
 		if (files)
 			put_files_struct(files);
 	} while (proc);
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
index bf43b5d..83f1439 100644
--- a/drivers/auxdisplay/img-ascii-lcd.c
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -218,6 +218,7 @@
 	{ .compatible = "img,boston-lcd", .data = &boston_config },
 	{ .compatible = "mti,malta-lcd", .data = &malta_config },
 	{ .compatible = "mti,sead3-lcd", .data = &sead3_config },
+	{ /* sentinel */ }
 };
 
 /**
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index d82ce17..4609244 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -416,6 +416,7 @@
 	_CPU_ATTR(online, &__cpu_online_mask),
 	_CPU_ATTR(possible, &__cpu_possible_mask),
 	_CPU_ATTR(present, &__cpu_present_mask),
+	_CPU_ATTR(core_ctl_isolated, &__cpu_isolated_mask),
 };
 
 /*
@@ -651,6 +652,7 @@
 	&cpu_attrs[0].attr.attr,
 	&cpu_attrs[1].attr.attr,
 	&cpu_attrs[2].attr.attr,
+	&cpu_attrs[3].attr.attr,
 	&dev_attr_kernel_max.attr,
 	&dev_attr_offline.attr,
 	&dev_attr_isolated.attr,
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 26cf6b9..a95e1e5 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -294,8 +294,7 @@
 	"/lib/firmware/updates/" UTS_RELEASE,
 	"/lib/firmware/updates",
 	"/lib/firmware/" UTS_RELEASE,
-	"/lib/firmware",
-	"/firmware/image"
+	"/lib/firmware"
 };
 
 /*
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index 4a99ac7..9959c76 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -55,6 +55,7 @@
 struct amd768_priv {
 	void __iomem *iobase;
 	struct pci_dev *pcidev;
+	u32 pmbase;
 };
 
 static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
@@ -148,33 +149,58 @@
 	if (pmbase == 0)
 		return -EIO;
 
-	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
 
-	if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET,
-				PMBASE_SIZE, DRV_NAME)) {
+	if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) {
 		dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
 			pmbase + 0xF0);
-		return -EBUSY;
+		err = -EBUSY;
+		goto out;
 	}
 
-	priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET,
-			PMBASE_SIZE);
+	priv->iobase = ioport_map(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
 	if (!priv->iobase) {
 		pr_err(DRV_NAME "Cannot map ioport\n");
-		return -ENOMEM;
+		err = -EINVAL;
+		goto err_iomap;
 	}
 
 	amd_rng.priv = (unsigned long)priv;
+	priv->pmbase = pmbase;
 	priv->pcidev = pdev;
 
 	pr_info(DRV_NAME " detected\n");
-	return devm_hwrng_register(&pdev->dev, &amd_rng);
+	err = hwrng_register(&amd_rng);
+	if (err) {
+		pr_err(DRV_NAME " registering failed (%d)\n", err);
+		goto err_hwrng;
+	}
+	return 0;
+
+err_hwrng:
+	ioport_unmap(priv->iobase);
+err_iomap:
+	release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+out:
+	kfree(priv);
+	return err;
 }
 
 static void __exit mod_exit(void)
 {
+	struct amd768_priv *priv;
+
+	priv = (struct amd768_priv *)amd_rng.priv;
+
+	hwrng_unregister(&amd_rng);
+
+	ioport_unmap(priv->iobase);
+
+	release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+
+	kfree(priv);
 }
 
 module_init(mod_init);
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index e7a2459..e1d421a 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -31,6 +31,9 @@
 #include <linux/module.h>
 #include <linux/pci.h>
 
+
+#define PFX	KBUILD_MODNAME ": "
+
 #define GEODE_RNG_DATA_REG   0x50
 #define GEODE_RNG_STATUS_REG 0x54
 
@@ -82,6 +85,7 @@
 
 static int __init mod_init(void)
 {
+	int err = -ENODEV;
 	struct pci_dev *pdev = NULL;
 	const struct pci_device_id *ent;
 	void __iomem *mem;
@@ -89,27 +93,43 @@
 
 	for_each_pci_dev(pdev) {
 		ent = pci_match_id(pci_tbl, pdev);
-		if (ent) {
-			rng_base = pci_resource_start(pdev, 0);
-			if (rng_base == 0)
-				return -ENODEV;
-
-			mem = devm_ioremap(&pdev->dev, rng_base, 0x58);
-			if (!mem)
-				return -ENOMEM;
-			geode_rng.priv = (unsigned long)mem;
-
-			pr_info("AMD Geode RNG detected\n");
-			return devm_hwrng_register(&pdev->dev, &geode_rng);
-		}
+		if (ent)
+			goto found;
 	}
-
 	/* Device not found. */
-	return -ENODEV;
+	goto out;
+
+found:
+	rng_base = pci_resource_start(pdev, 0);
+	if (rng_base == 0)
+		goto out;
+	err = -ENOMEM;
+	mem = ioremap(rng_base, 0x58);
+	if (!mem)
+		goto out;
+	geode_rng.priv = (unsigned long)mem;
+
+	pr_info("AMD Geode RNG detected\n");
+	err = hwrng_register(&geode_rng);
+	if (err) {
+		pr_err(PFX "RNG registering failed (%d)\n",
+		       err);
+		goto err_unmap;
+	}
+out:
+	return err;
+
+err_unmap:
+	iounmap(mem);
+	goto out;
 }
 
 static void __exit mod_exit(void)
 {
+	void __iomem *mem = (void __iomem *)geode_rng.priv;
+
+	hwrng_unregister(&geode_rng);
+	iounmap(mem);
 }
 
 module_init(mod_init);
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 3bbd2a5..2acaa77 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -1598,7 +1598,7 @@
 		.a2w_reg = A2W_PLLH_AUX,
 		.load_mask = CM_PLLH_LOADAUX,
 		.hold_mask = 0,
-		.fixed_divider = 10),
+		.fixed_divider = 1),
 	[BCM2835_PLLH_PIX]	= REGISTER_PLL_DIV(
 		.name = "pllh_pix",
 		.source_pll = "pllh",
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 83db1416..ece2f00 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1,7 +1,7 @@
 /*
  * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
  * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -2101,7 +2101,7 @@
 	/* prevent racing with updates to the clock topology */
 	clk_prepare_lock();
 
-	if (core->parent == parent)
+	if (core->parent == parent && !(core->flags & CLK_IS_MEASURE))
 		goto out;
 
 	/* verify ops for for multi-parent clks */
@@ -2599,7 +2599,7 @@
 	.release	= seq_release,
 };
 
-static void clk_debug_print_hw(struct clk_core *clk, struct seq_file *f)
+void clk_debug_print_hw(struct clk_core *clk, struct seq_file *f)
 {
 	if (IS_ERR_OR_NULL(clk))
 		return;
diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h
index 331e086..f0db049 100644
--- a/drivers/clk/clk.h
+++ b/drivers/clk/clk.h
@@ -23,6 +23,7 @@
 
 /* Debugfs API to print the enabled clocks */
 void clock_debug_print_enabled(void);
+void clk_debug_print_hw(struct clk_core *clk, struct seq_file *f);
 
 #else
 /* All these casts to avoid ifdefs in clkdev... */
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 13f747a..7226dd3 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -169,6 +169,24 @@
 	  Say Y if you want to support video devices and functionality such as
 	  video encode/decode.
 
+config MSM_CAMCC_SDM845
+	tristate "SDM845 Camera Clock Controller"
+	depends on COMMON_CLK_QCOM
+	help
+	  Support for the camera clock controller on Qualcomm Technologies, Inc
+	  sdm845 devices.
+	  Say Y if you want to support camera devices and functionality such as
+	  capturing pictures.
+
+config MSM_DISPCC_SDM845
+	tristate "SDM845 Display Clock Controller"
+	depends on COMMON_CLK_QCOM
+	help
+	  Support for the display clock controller on Qualcomm Technologies, Inc
+	  sdm845 devices.
+	  Say Y if you want to support display devices and functionality such as
+	  splash screen.
+
 config CLOCK_QPNP_DIV
 	tristate "QPNP PMIC clkdiv driver"
 	depends on COMMON_CLK_QCOM && SPMI
@@ -186,3 +204,25 @@
 	  SoCs. It accepts requests from other hardware subsystems via RSC.
 	  Say Y to support the clocks managed by RPMh VRM/ARC on platforms
 	  such as sdm845.
+
+config CLOCK_CPU_OSM
+	tristate "OSM CPU Clock Controller"
+	depends on COMMON_CLK_QCOM
+	help
+	 Support for the OSM clock controller.
+	 Operating State Manager (OSM) is a hardware engine used by some
+	 Qualcomm Technologies, Inc. (QTI) SoCs to manage frequency and
+	 voltage scaling in hardware. OSM is capable of controlling
+	 frequency and voltage requests for multiple clusters via the
+	 existence of multiple OSM domains.
+	 Say Y if you want to support OSM clocks.
+
+config MSM_GPUCC_SDM845
+	tristate "SDM845 Graphics Clock Controller"
+	depends on MSM_GCC_SDM845
+	help
+	  Support for the graphics clock controller on Qualcomm Technologies, Inc.
+	  sdm845 devices.
+	  Say Y if you want to support graphics controller devices.
+
+source "drivers/clk/qcom/mdss/Kconfig"
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 321587b..1d042cd 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -10,27 +10,33 @@
 clk-qcom-y += clk-regmap-divider.o
 clk-qcom-y += clk-regmap-mux.o
 clk-qcom-y += reset.o clk-voter.o
-clk-qcom-y += clk-dummy.o
+clk-qcom-y += clk-dummy.o clk-debug.o
 clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o gdsc-regulator.o
 
 # Keep alphabetically sorted by config
 obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
 obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
+obj-$(CONFIG_CLOCK_CPU_OSM) += clk-cpu-osm.o
+obj-$(CONFIG_CLOCK_QPNP_DIV) += clk-qpnp-div.o
 obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
 obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
 obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
 obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
 obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o
+obj-$(CONFIG_MSM_CAMCC_SDM845) += camcc-sdm845.o
+obj-$(CONFIG_MSM_CLK_RPMH) += clk-rpmh.o
+obj-$(CONFIG_MSM_DISPCC_SDM845) += dispcc-sdm845.o
 obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
 obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
 obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
 obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
 obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
-obj-$(CONFIG_MSM_GCC_SDM845) += gcc-sdm845.o
-obj-$(CONFIG_MSM_VIDEOCC_SDM845) += videocc-sdm845.o
+obj-$(CONFIG_MSM_GCC_SDM845) += gcc-sdm845.o debugcc-sdm845.o
+obj-$(CONFIG_MSM_GPUCC_SDM845) += gpucc-sdm845.o
 obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
 obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
 obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
 obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
-obj-$(CONFIG_CLOCK_QPNP_DIV) += clk-qpnp-div.o
-obj-$(CONFIG_MSM_CLK_RPMH) += clk-rpmh.o
+obj-$(CONFIG_MSM_VIDEOCC_SDM845) += videocc-sdm845.o
+
+obj-y += mdss/
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
new file mode 100644
index 0000000..a274975
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -0,0 +1,1943 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+#include "clk-alpha-pll.h"
+#include "vdd-level-sdm845.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_CX_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_mx, VDD_CX_NUM, 1, vdd_corner);
+
+enum {
+	P_BI_TCXO,
+	P_CAM_CC_PLL0_OUT_EVEN,
+	P_CAM_CC_PLL1_OUT_EVEN,
+	P_CAM_CC_PLL2_OUT_EVEN,
+	P_CAM_CC_PLL2_OUT_ODD,
+	P_CAM_CC_PLL3_OUT_EVEN,
+	P_CORE_BI_PLL_TEST_SE,
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CAM_CC_PLL2_OUT_EVEN, 1 },
+	{ P_CAM_CC_PLL1_OUT_EVEN, 2 },
+	{ P_CAM_CC_PLL3_OUT_EVEN, 5 },
+	{ P_CAM_CC_PLL0_OUT_EVEN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_0[] = {
+	"bi_tcxo",
+	"cam_cc_pll2_out_even",
+	"cam_cc_pll1_out_even",
+	"cam_cc_pll3_out_even",
+	"cam_cc_pll0_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CAM_CC_PLL2_OUT_EVEN, 1 },
+	{ P_CAM_CC_PLL1_OUT_EVEN, 2 },
+	{ P_CAM_CC_PLL2_OUT_ODD, 4 },
+	{ P_CAM_CC_PLL3_OUT_EVEN, 5 },
+	{ P_CAM_CC_PLL0_OUT_EVEN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_1[] = {
+	"bi_tcxo",
+	"cam_cc_pll2_out_even",
+	"cam_cc_pll1_out_even",
+	"cam_cc_pll2_out_odd",
+	"cam_cc_pll3_out_even",
+	"cam_cc_pll0_out_even",
+	"core_bi_pll_test_se",
+};
+
+static struct pll_vco fabia_vco[] = {
+	{ 250000000, 2000000000, 0 },
+	{ 125000000, 1000000000, 1 },
+};
+
+static const struct pll_config cam_cc_pll0_config = {
+	.l = 0x1f,
+	.frac = 0x4000,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+	.offset = 0x0,
+	.vco_table = fabia_vco,
+	.num_vco = ARRAY_SIZE(fabia_vco),
+	.type = FABIA_PLL,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll0",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_fabia_pll_ops,
+			VDD_CX_FMAX_MAP4(
+				MIN, 615000000,
+				LOW, 1066000000,
+				LOW_L1, 1600000000,
+				NOMINAL, 2000000000),
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_fabia_even[] = {
+	{ 0x0, 1 },
+	{ 0x1, 2 },
+	{ 0x3, 4 },
+	{ 0x7, 8 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+	.offset = 0x0,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_fabia_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll0_out_even",
+		.parent_names = (const char *[]){ "cam_cc_pll0" },
+		.num_parents = 1,
+		.ops = &clk_generic_pll_postdiv_ops,
+	},
+};
+
+static const struct pll_config cam_cc_pll1_config = {
+	.l = 0x2a,
+	.frac = 0x1556,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+	.offset = 0x1000,
+	.vco_table = fabia_vco,
+	.num_vco = ARRAY_SIZE(fabia_vco),
+	.type = FABIA_PLL,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll1",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_fabia_pll_ops,
+			VDD_CX_FMAX_MAP4(
+				MIN, 615000000,
+				LOW, 1066000000,
+				LOW_L1, 1600000000,
+				NOMINAL, 2000000000),
+		},
+	},
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
+	.offset = 0x1000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_fabia_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll1_out_even",
+		.parent_names = (const char *[]){ "cam_cc_pll1" },
+		.num_parents = 1,
+		.ops = &clk_generic_pll_postdiv_ops,
+	},
+};
+
+static const struct pll_config cam_cc_pll2_config = {
+	.l = 0x32,
+	.frac = 0x0,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+	.offset = 0x2000,
+	.vco_table = fabia_vco,
+	.num_vco = ARRAY_SIZE(fabia_vco),
+	.type = FABIA_PLL,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll2",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_fabia_pll_ops,
+			VDD_MX_FMAX_MAP4(
+				MIN, 615000000,
+				LOW, 1066000000,
+				LOW_L1, 1600000000,
+				NOMINAL, 2000000000),
+		},
+	},
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_even = {
+	.offset = 0x2000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_fabia_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll2_out_even",
+		.parent_names = (const char *[]){ "cam_cc_pll2" },
+		.num_parents = 1,
+		.ops = &clk_generic_pll_postdiv_ops,
+	},
+};
+
+static const struct clk_div_table post_div_table_fabia_odd[] = {
+	{ 0x0, 1 },
+	{ 0x3, 3 },
+	{ 0x5, 5 },
+	{ 0x7, 7 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_odd = {
+	.offset = 0x2000,
+	.post_div_shift = 12,
+	.post_div_table = post_div_table_fabia_odd,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_odd),
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll2_out_odd",
+		.parent_names = (const char *[]){ "cam_cc_pll2" },
+		.num_parents = 1,
+		.ops = &clk_generic_pll_postdiv_ops,
+	},
+};
+
+static const struct pll_config cam_cc_pll3_config = {
+	.l = 0x14,
+	.frac = 0x0,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+	.offset = 0x3000,
+	.vco_table = fabia_vco,
+	.num_vco = ARRAY_SIZE(fabia_vco),
+	.type = FABIA_PLL,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll3",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_fabia_pll_ops,
+			VDD_CX_FMAX_MAP4(
+				MIN, 615000000,
+				LOW, 1066000000,
+				LOW_L1, 1600000000,
+				NOMINAL, 2000000000),
+		},
+	},
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
+	.offset = 0x3000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_fabia_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll3_out_even",
+		.parent_names = (const char *[]){ "cam_cc_pll3" },
+		.num_parents = 1,
+		.ops = &clk_generic_pll_postdiv_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+	F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_EVEN, 1, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+	.cmd_rcgr = 0x600c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_bps_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_bps_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 200000000,
+			LOW, 404000000,
+			LOW_L1, 480000000,
+			NOMINAL, 600000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+	F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	{ }
+};
+
+
+static struct clk_rcg2 cam_cc_cci_clk_src = {
+	.cmd_rcgr = 0xb0d8,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_cci_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_cci_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP4(
+			MIN, 19200000,
+			LOWER, 37500000,
+			LOW, 50000000,
+			NOMINAL, 100000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+	F(320000000, P_CAM_CC_PLL2_OUT_ODD, 3, 0, 0),
+	F(384000000, P_CAM_CC_PLL2_OUT_ODD, 2.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+	.cmd_rcgr = 0x9060,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_1,
+	.freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_cphy_rx_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP4(
+			MIN, 19200000,
+			LOWER, 300000000,
+			LOW, 320000000,
+			HIGH, 384000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(240000000, P_CAM_CC_PLL2_OUT_EVEN, 2, 0, 0),
+	F(269333333, P_CAM_CC_PLL1_OUT_EVEN, 3, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+	.cmd_rcgr = 0x5004,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_csi0phytimer_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP3(
+			MIN, 19200000,
+			LOWER, 240000000,
+			LOW, 269333333),
+	},
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+	.cmd_rcgr = 0x5028,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_csi1phytimer_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP3(
+			MIN, 19200000,
+			LOWER, 240000000,
+			LOW, 269333333),
+	},
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+	.cmd_rcgr = 0x504c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_csi2phytimer_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP3(
+			MIN, 19200000,
+			LOWER, 240000000,
+			LOW, 269333333),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+	F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+	F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+	.cmd_rcgr = 0x6038,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_fast_ahb_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 100000000,
+			LOW, 200000000,
+			LOW_L1, 300000000,
+			NOMINAL, 400000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_fd_core_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(320000000, P_CAM_CC_PLL2_OUT_EVEN, 1.5, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+	F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_fd_core_clk_src = {
+	.cmd_rcgr = 0xb0b0,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_fd_core_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 320000000,
+			LOW, 400000000,
+			LOW_L1, 538666667,
+			NOMINAL, 600000000),
+	},
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+	.cmd_rcgr = 0xb088,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_icp_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 320000000,
+			LOW, 400000000,
+			LOW_L1, 538666667,
+			NOMINAL, 600000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(320000000, P_CAM_CC_PLL2_OUT_EVEN, 1.5, 0, 0),
+	F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_EVEN, 1, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+	.cmd_rcgr = 0x900c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_0_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 320000000,
+			LOW, 404000000,
+			LOW_L1, 480000000,
+			NOMINAL, 600000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = {
+	F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0),
+	F(384000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
+	.cmd_rcgr = 0x9038,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_1,
+	.freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_0_csid_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP3(
+			MIN, 19200000,
+			LOWER, 384000000,
+			NOMINAL, 538666667),
+	},
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+	.cmd_rcgr = 0xa00c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_1_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 320000000,
+			LOW, 404000000,
+			LOW_L1, 480000000,
+			NOMINAL, 600000000),
+	},
+};
+
+static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
+	.cmd_rcgr = 0xa030,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_1,
+	.freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_1_csid_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP3(
+			MIN, 19200000,
+			LOWER, 384000000,
+			NOMINAL, 538666667),
+	},
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
+	.cmd_rcgr = 0xb004,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_lite_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 320000000,
+			LOW, 404000000,
+			LOW_L1, 480000000,
+			NOMINAL, 600000000),
+	},
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
+	.cmd_rcgr = 0xb024,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_1,
+	.freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_lite_csid_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP3(
+			MIN, 19200000,
+			LOWER, 384000000,
+			NOMINAL, 538666667),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = {
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(240000000, P_CAM_CC_PLL0_OUT_EVEN, 2.5, 0, 0),
+	F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_EVEN, 1, 0, 0),
+	F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
+	.cmd_rcgr = 0x700c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_ipe_0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ipe_0_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP6(
+			MIN, 19200000,
+			LOWER, 240000000,
+			LOW, 404000000,
+			LOW_L1, 480000000,
+			NOMINAL, 538666667,
+			HIGH, 600000000),
+	},
+};
+
+static struct clk_rcg2 cam_cc_ipe_1_clk_src = {
+	.cmd_rcgr = 0x800c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_ipe_0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ipe_1_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP6(
+			MIN, 19200000,
+			LOWER, 240000000,
+			LOW, 404000000,
+			LOW_L1, 480000000,
+			NOMINAL, 538666667,
+			HIGH, 600000000),
+	},
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+	.cmd_rcgr = 0xb04c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_bps_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_jpeg_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 200000000,
+			LOW, 404000000,
+			LOW_L1, 480000000,
+			NOMINAL, 600000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = {
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+	F(384000000, P_CAM_CC_PLL2_OUT_ODD, 2.5, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_EVEN, 1, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_lrme_clk_src = {
+	.cmd_rcgr = 0xb0f8,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_1,
+	.enable_safe_config = true,
+	.freq_tbl = ftbl_cam_cc_lrme_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_lrme_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 200000000,
+			LOW, 384000000,
+			LOW_L1, 480000000,
+			NOMINAL, 600000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+	F(33333333, P_CAM_CC_PLL0_OUT_EVEN, 2, 1, 9),
+	F(34285714, P_CAM_CC_PLL2_OUT_EVEN, 14, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+	.cmd_rcgr = 0x4004,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_mclk0_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP2(
+			MIN, 19200000,
+			LOWER, 34285714),
+	},
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+	.cmd_rcgr = 0x4024,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_mclk1_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP2(
+			MIN, 19200000,
+			LOWER, 34285714),
+	},
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+	.cmd_rcgr = 0x4044,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_mclk2_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP2(
+			MIN, 19200000,
+			LOWER, 34285714),
+	},
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+	.cmd_rcgr = 0x4064,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_mclk3_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP2(
+			MIN, 19200000,
+			LOWER, 34285714),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(60000000, P_CAM_CC_PLL0_OUT_EVEN, 10, 0, 0),
+	F(66666667, P_CAM_CC_PLL0_OUT_EVEN, 9, 0, 0),
+	F(73846154, P_CAM_CC_PLL2_OUT_EVEN, 6.5, 0, 0),
+	F(80000000, P_CAM_CC_PLL2_OUT_EVEN, 6, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+	.cmd_rcgr = 0x6054,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_slow_ahb_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 60000000,
+			LOW, 66666667,
+			LOW_L1, 73846154,
+			NOMINAL, 80000000),
+	},
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+	.halt_reg = 0x606c,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x606c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_bps_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+	.halt_reg = 0x6050,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x6050,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_bps_areg_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fast_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_bps_axi_clk = {
+	.halt_reg = 0x6034,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_bps_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+	.halt_reg = 0x6024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_bps_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_bps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_camnoc_atb_clk = {
+	.halt_reg = 0xb12c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb12c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_camnoc_atb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+	.halt_reg = 0xb124,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb124,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_camnoc_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_cci_clk = {
+	.halt_reg = 0xb0f0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb0f0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_cci_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cci_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+	.halt_reg = 0xb11c,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0xb11c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_cpas_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+	.halt_reg = 0x501c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x501c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csi0phytimer_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_csi0phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+	.halt_reg = 0x5040,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x5040,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csi1phytimer_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_csi1phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+	.halt_reg = 0x5064,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x5064,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csi2phytimer_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_csi2phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+	.halt_reg = 0x5020,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x5020,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csiphy0_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+	.halt_reg = 0x5044,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x5044,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csiphy1_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+	.halt_reg = 0x5068,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x5068,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csiphy2_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_debug_clk = {
+	.halt_reg = 0xc008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_debug_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_fd_core_clk = {
+	.halt_reg = 0xb0c8,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb0c8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_fd_core_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fd_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_fd_core_uar_clk = {
+	.halt_reg = 0xb0d0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb0d0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_fd_core_uar_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fd_core_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_icp_apb_clk = {
+	.halt_reg = 0xb084,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb084,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_icp_apb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_icp_atb_clk = {
+	.halt_reg = 0xb078,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb078,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_icp_atb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+	.halt_reg = 0xb0a0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb0a0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_icp_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_icp_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_icp_cti_clk = {
+	.halt_reg = 0xb07c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb07c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_icp_cti_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_icp_ts_clk = {
+	.halt_reg = 0xb080,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb080,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_icp_ts_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_axi_clk = {
+	.halt_reg = 0x907c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x907c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+	.halt_reg = 0x9024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x9024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_cphy_rx_clk = {
+	.halt_reg = 0x9078,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x9078,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_cphy_rx_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_csid_clk = {
+	.halt_reg = 0x9050,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x9050,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_csid_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_0_csid_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_dsp_clk = {
+	.halt_reg = 0x9034,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x9034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_dsp_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_0_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_axi_clk = {
+	.halt_reg = 0xa054,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+	.halt_reg = 0xa024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_cphy_rx_clk = {
+	.halt_reg = 0xa050,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0xa050,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_cphy_rx_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_csid_clk = {
+	.halt_reg = 0xa048,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa048,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_csid_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_1_csid_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_dsp_clk = {
+	.halt_reg = 0xa02c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa02c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_dsp_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_1_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_lite_clk = {
+	.halt_reg = 0xb01c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb01c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_lite_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_lite_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = {
+	.halt_reg = 0xb044,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0xb044,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_lite_cphy_rx_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_lite_csid_clk = {
+	.halt_reg = 0xb03c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb03c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_lite_csid_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_lite_csid_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_0_ahb_clk = {
+	.halt_reg = 0x703c,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x703c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_0_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_0_areg_clk = {
+	.halt_reg = 0x7038,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x7038,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_0_areg_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fast_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_0_axi_clk = {
+	.halt_reg = 0x7034,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x7034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_0_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_0_clk = {
+	.halt_reg = 0x7024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x7024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_0_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ipe_0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_1_ahb_clk = {
+	.halt_reg = 0x803c,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x803c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_1_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_1_areg_clk = {
+	.halt_reg = 0x8038,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x8038,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_1_areg_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fast_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_1_axi_clk = {
+	.halt_reg = 0x8034,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x8034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_1_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_1_clk = {
+	.halt_reg = 0x8024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x8024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_1_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ipe_1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+	.halt_reg = 0xb064,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb064,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_jpeg_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_jpeg_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_lrme_clk = {
+	.halt_reg = 0xb110,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb110,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_lrme_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_lrme_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+	.halt_reg = 0x401c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x401c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_mclk0_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_mclk0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+	.halt_reg = 0x403c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x403c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_mclk1_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_mclk1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+	.halt_reg = 0x405c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x405c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_mclk2_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_mclk2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+	.halt_reg = 0x407c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x407c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_mclk3_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_mclk3_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_pll_test_clk = {
+	.halt_reg = 0xc014,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll_test_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_soc_ahb_clk = {
+	.halt_reg = 0xb13c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb13c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_soc_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_sys_tmr_clk = {
+	.halt_reg = 0xb0a8,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb0a8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_sys_tmr_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap *cam_cc_sdm845_clocks[] = {
+	[CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+	[CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+	[CAM_CC_BPS_AXI_CLK] = &cam_cc_bps_axi_clk.clkr,
+	[CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+	[CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+	[CAM_CC_CAMNOC_ATB_CLK] = &cam_cc_camnoc_atb_clk.clkr,
+	[CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+	[CAM_CC_CCI_CLK] = &cam_cc_cci_clk.clkr,
+	[CAM_CC_CCI_CLK_SRC] = &cam_cc_cci_clk_src.clkr,
+	[CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+	[CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+	[CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+	[CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+	[CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+	[CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+	[CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+	[CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+	[CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+	[CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+	[CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+	[CAM_CC_DEBUG_CLK] = &cam_cc_debug_clk.clkr,
+	[CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+	[CAM_CC_FD_CORE_CLK] = &cam_cc_fd_core_clk.clkr,
+	[CAM_CC_FD_CORE_CLK_SRC] = &cam_cc_fd_core_clk_src.clkr,
+	[CAM_CC_FD_CORE_UAR_CLK] = &cam_cc_fd_core_uar_clk.clkr,
+	[CAM_CC_ICP_APB_CLK] = &cam_cc_icp_apb_clk.clkr,
+	[CAM_CC_ICP_ATB_CLK] = &cam_cc_icp_atb_clk.clkr,
+	[CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+	[CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+	[CAM_CC_ICP_CTI_CLK] = &cam_cc_icp_cti_clk.clkr,
+	[CAM_CC_ICP_TS_CLK] = &cam_cc_icp_ts_clk.clkr,
+	[CAM_CC_IFE_0_AXI_CLK] = &cam_cc_ife_0_axi_clk.clkr,
+	[CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+	[CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+	[CAM_CC_IFE_0_CPHY_RX_CLK] = &cam_cc_ife_0_cphy_rx_clk.clkr,
+	[CAM_CC_IFE_0_CSID_CLK] = &cam_cc_ife_0_csid_clk.clkr,
+	[CAM_CC_IFE_0_CSID_CLK_SRC] = &cam_cc_ife_0_csid_clk_src.clkr,
+	[CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr,
+	[CAM_CC_IFE_1_AXI_CLK] = &cam_cc_ife_1_axi_clk.clkr,
+	[CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+	[CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+	[CAM_CC_IFE_1_CPHY_RX_CLK] = &cam_cc_ife_1_cphy_rx_clk.clkr,
+	[CAM_CC_IFE_1_CSID_CLK] = &cam_cc_ife_1_csid_clk.clkr,
+	[CAM_CC_IFE_1_CSID_CLK_SRC] = &cam_cc_ife_1_csid_clk_src.clkr,
+	[CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr,
+	[CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr,
+	[CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr,
+	[CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr,
+	[CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr,
+	[CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr,
+	[CAM_CC_IPE_0_AHB_CLK] = &cam_cc_ipe_0_ahb_clk.clkr,
+	[CAM_CC_IPE_0_AREG_CLK] = &cam_cc_ipe_0_areg_clk.clkr,
+	[CAM_CC_IPE_0_AXI_CLK] = &cam_cc_ipe_0_axi_clk.clkr,
+	[CAM_CC_IPE_0_CLK] = &cam_cc_ipe_0_clk.clkr,
+	[CAM_CC_IPE_0_CLK_SRC] = &cam_cc_ipe_0_clk_src.clkr,
+	[CAM_CC_IPE_1_AHB_CLK] = &cam_cc_ipe_1_ahb_clk.clkr,
+	[CAM_CC_IPE_1_AREG_CLK] = &cam_cc_ipe_1_areg_clk.clkr,
+	[CAM_CC_IPE_1_AXI_CLK] = &cam_cc_ipe_1_axi_clk.clkr,
+	[CAM_CC_IPE_1_CLK] = &cam_cc_ipe_1_clk.clkr,
+	[CAM_CC_IPE_1_CLK_SRC] = &cam_cc_ipe_1_clk_src.clkr,
+	[CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+	[CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+	[CAM_CC_LRME_CLK] = &cam_cc_lrme_clk.clkr,
+	[CAM_CC_LRME_CLK_SRC] = &cam_cc_lrme_clk_src.clkr,
+	[CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+	[CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+	[CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+	[CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+	[CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+	[CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+	[CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+	[CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+	[CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+	[CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+	[CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+	[CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr,
+	[CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+	[CAM_CC_PLL2_OUT_EVEN] = &cam_cc_pll2_out_even.clkr,
+	[CAM_CC_PLL2_OUT_ODD] = &cam_cc_pll2_out_odd.clkr,
+	[CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+	[CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr,
+	[CAM_CC_PLL_TEST_CLK] = &cam_cc_pll_test_clk.clkr,
+	[CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+	[CAM_CC_SOC_AHB_CLK] = &cam_cc_soc_ahb_clk.clkr,
+	[CAM_CC_SYS_TMR_CLK] = &cam_cc_sys_tmr_clk.clkr,
+};
+
+static const struct qcom_reset_map cam_cc_sdm845_resets[] = {
+	[TITAN_CAM_CC_BPS_BCR] = { 0x6000 },
+	[TITAN_CAM_CC_CAMNOC_BCR] = { 0xb120 },
+	[TITAN_CAM_CC_CCI_BCR] = { 0xb0d4 },
+	[TITAN_CAM_CC_CPAS_BCR] = { 0xb118 },
+	[TITAN_CAM_CC_CSI0PHY_BCR] = { 0x5000 },
+	[TITAN_CAM_CC_CSI1PHY_BCR] = { 0x5024 },
+	[TITAN_CAM_CC_CSI2PHY_BCR] = { 0x5048 },
+	[TITAN_CAM_CC_FD_BCR] = { 0xb0ac },
+	[TITAN_CAM_CC_ICP_BCR] = { 0xb074 },
+	[TITAN_CAM_CC_IFE_0_BCR] = { 0x9000 },
+	[TITAN_CAM_CC_IFE_1_BCR] = { 0xa000 },
+	[TITAN_CAM_CC_IFE_LITE_BCR] = { 0xb000 },
+	[TITAN_CAM_CC_IPE_0_BCR] = { 0x7000 },
+	[TITAN_CAM_CC_IPE_1_BCR] = { 0x8000 },
+	[TITAN_CAM_CC_JPEG_BCR] = { 0xb048 },
+	[TITAN_CAM_CC_LRME_BCR] = { 0xb0f4 },
+	[TITAN_CAM_CC_MCLK0_BCR] = { 0x4000 },
+	[TITAN_CAM_CC_MCLK1_BCR] = { 0x4020 },
+	[TITAN_CAM_CC_MCLK2_BCR] = { 0x4040 },
+	[TITAN_CAM_CC_MCLK3_BCR] = { 0x4060 },
+	[TITAN_CAM_CC_TITAN_TOP_BCR] = { 0xb130 },
+};
+
+static const struct regmap_config cam_cc_sdm845_regmap_config = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register	= 0xd004,
+	.fast_io	= true,
+};
+
+static const struct qcom_cc_desc cam_cc_sdm845_desc = {
+	.config = &cam_cc_sdm845_regmap_config,
+	.clks = cam_cc_sdm845_clocks,
+	.num_clks = ARRAY_SIZE(cam_cc_sdm845_clocks),
+	.resets = cam_cc_sdm845_resets,
+	.num_resets = ARRAY_SIZE(cam_cc_sdm845_resets),
+};
+
+static const struct of_device_id cam_cc_sdm845_match_table[] = {
+	{ .compatible = "qcom,cam_cc-sdm845" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_sdm845_match_table);
+
+static int cam_cc_sdm845_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	int ret = 0;
+
+	regmap = qcom_cc_map(pdev, &cam_cc_sdm845_desc);
+	if (IS_ERR(regmap)) {
+		pr_err("Failed to map the Camera CC registers\n");
+		return PTR_ERR(regmap);
+	}
+
+	vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(vdd_cx.regulator[0])) {
+		if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+				"Unable to get vdd_cx regulator\n");
+		return PTR_ERR(vdd_cx.regulator[0]);
+	}
+
+	vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx");
+	if (IS_ERR(vdd_mx.regulator[0])) {
+		if (!(PTR_ERR(vdd_mx.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+				"Unable to get vdd_mx regulator\n");
+		return PTR_ERR(vdd_mx.regulator[0]);
+	}
+
+	clk_fabia_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+	clk_fabia_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+	clk_fabia_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+	clk_fabia_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+
+	ret = qcom_cc_really_probe(pdev, &cam_cc_sdm845_desc, regmap);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register Camera CC clocks\n");
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Registered Camera CC clocks\n");
+	return ret;
+}
+
+static struct platform_driver cam_cc_sdm845_driver = {
+	.probe		= cam_cc_sdm845_probe,
+	.driver		= {
+		.name	= "cam_cc-sdm845",
+		.of_match_table = cam_cc_sdm845_match_table,
+	},
+};
+
+static int __init cam_cc_sdm845_init(void)
+{
+	return platform_driver_register(&cam_cc_sdm845_driver);
+}
+core_initcall(cam_cc_sdm845_init);
+
+static void __exit cam_cc_sdm845_exit(void)
+{
+	platform_driver_unregister(&cam_cc_sdm845_driver);
+}
+module_exit(cam_cc_sdm845_exit);
+
+MODULE_DESCRIPTION("QTI CAM_CC SDM845 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:cam_cc-sdm845");
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index 53f736c..51a5e0b 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -23,6 +23,7 @@
 
 #include "clk-branch.h"
 #include "clk-regmap.h"
+#include "clk-debug.h"
 
 static bool clk_branch_in_hwcg_mode(const struct clk_branch *br)
 {
@@ -338,6 +339,7 @@
 	.recalc_rate = clk_branch2_recalc_rate,
 	.set_flags = clk_branch_set_flags,
 	.list_registers = clk_branch2_list_registers,
+	.debug_init = clk_debug_measure_add,
 };
 EXPORT_SYMBOL_GPL(clk_branch2_ops);
 
@@ -393,6 +395,7 @@
 	.disable = clk_gate2_disable,
 	.is_enabled = clk_is_enabled_regmap,
 	.list_registers = clk_gate2_list_registers,
+	.debug_init = clk_debug_measure_add,
 };
 EXPORT_SYMBOL_GPL(clk_gate2_ops);
 
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
new file mode 100644
index 0000000..d5e2be6
--- /dev/null
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -0,0 +1,2619 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/cpu.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/driver.h>
+#include <linux/regmap.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <soc/qcom/scm.h>
+#include <dt-bindings/clock/qcom,cpucc-sdm845.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-rcg.h"
+#include "clk-voter.h"
+
+#define OSM_TABLE_SIZE			40
+#define SINGLE_CORE			1
+#define MAX_CLUSTER_CNT			3
+#define MAX_MEM_ACC_VAL_PER_LEVEL	3
+#define MAX_CORE_COUNT			4
+#define CORE_COUNT_VAL(val)		((val & GENMASK(18, 16)) >> 16)
+
+#define OSM_CYCLE_COUNTER_CTRL_REG		0x760
+#define OSM_CYCLE_COUNTER_USE_XO_EDGE_EN	BIT(8)
+
+#define OSM_REG_SIZE			32
+
+#define L3_EFUSE_SHIFT			0
+#define L3_EFUSE_MASK			0
+#define PWRCL_EFUSE_SHIFT		0
+#define PWRCL_EFUSE_MASK		0
+#define PERFCL_EFUSE_SHIFT		29
+#define PERFCL_EFUSE_MASK		0x7
+
+#define ENABLE_REG			0x0
+#define FREQ_REG			0x110
+#define VOLT_REG			0x114
+#define OVERRIDE_REG			0x118
+#define SPM_CC_INC_HYSTERESIS		0x1c
+#define SPM_CC_DEC_HYSTERESIS		0x20
+#define SPM_CORE_INACTIVE_MAPPING	0x28
+#define CC_ZERO_BEHAV_CTRL		0xc
+#define ENABLE_OVERRIDE			BIT(0)
+#define SPM_CC_DCVS_DISABLE		0x24
+#define LLM_FREQ_VOTE_INC_HYSTERESIS	0x30
+#define LLM_FREQ_VOTE_DEC_HYSTERESIS	0x34
+#define LLM_INTF_DCVS_DISABLE		0x40
+#define LLM_VOLTAGE_VOTE_INC_HYSTERESIS	0x38
+#define LLM_VOLTAGE_VOTE_DEC_HYSTERESIS	0x3c
+#define VMIN_REDUCTION_ENABLE_REG	0x48
+#define VMIN_REDUCTION_TIMER_REG	0x4c
+#define PDN_FSM_CTRL_REG		0x54
+#define DELTA_DEX_VAL			BVAL(31, 23, 0xa)
+#define IGNORE_PLL_LOCK			BIT(15)
+#define CC_BOOST_FSM_EN			BIT(0)
+#define CC_BOOST_FSM_TIMERS_REG0	0x58
+#define CC_BOOST_FSM_TIMERS_REG1	0x5c
+#define CC_BOOST_FSM_TIMERS_REG2	0x60
+#define DCVS_BOOST_FSM_EN_MASK		BIT(2)
+#define DCVS_BOOST_FSM_TIMERS_REG0	0x64
+#define DCVS_BOOST_FSM_TIMERS_REG1	0x68
+#define DCVS_BOOST_FSM_TIMERS_REG2	0x6c
+#define PS_BOOST_FSM_EN_MASK		BIT(1)
+#define PS_BOOST_FSM_TIMERS_REG0	0x74
+#define PS_BOOST_FSM_TIMERS_REG1	0x78
+#define PS_BOOST_FSM_TIMERS_REG2	0x7c
+#define BOOST_PROG_SYNC_DELAY_REG	0x80
+#define DCVS_DROOP_FSM_EN_MASK		BIT(5)
+#define DROOP_PROG_SYNC_DELAY_REG	0x9c
+#define DROOP_RELEASE_TIMER_CTRL	0x88
+#define DROOP_CTRL_REG			0x84
+#define DCVS_DROOP_TIMER_CTRL		0x98
+#define PLL_SW_OVERRIDE_ENABLE		0xa0
+#define PLL_SW_OVERRIDE_DROOP_EN	BIT(0)
+#define SPM_CORE_COUNT_CTRL		0x2c
+#define CORE_DCVS_CTRL			0xbc
+#define OVERRIDE_CLUSTER_IDLE_ACK	0x800
+#define REQ_GEN_FSM_STATUS		0x70c
+
+#define PLL_MIN_LVAL			0x21
+#define PLL_MIN_FREQ_REG		0x94
+#define PLL_POST_DIV1			0x1F
+#define PLL_POST_DIV2			0x11F
+#define PLL_MODE			0x0
+#define PLL_L_VAL			0x4
+#define PLL_USER_CTRL			0xc
+#define PLL_CONFIG_CTL_LO		0x10
+#define PLL_CONFIG_CTL_HI		0x14
+#define MIN_VCO_VAL			0x2b
+
+#define MAX_VC				63
+#define MAX_MEM_ACC_LEVELS		3
+#define MAX_MEM_ACC_VAL_PER_LEVEL	3
+#define MAX_MEM_ACC_VALUES		(MAX_MEM_ACC_LEVELS * \
+					MAX_MEM_ACC_VAL_PER_LEVEL)
+#define MEM_ACC_ADDRS			3
+
+#define ISENSE_ON_DATA			0xf
+#define ISENSE_OFF_DATA			0x0
+#define CONSTANT_32			0x20
+
+#define APM_MX_MODE			0x0
+#define APM_APC_MODE			0x2
+#define APM_READ_DATA_MASK		0xc
+#define APM_MX_MODE_VAL			0x4
+#define APM_APC_READ_VAL		0x8
+#define APM_MX_READ_VAL			0x4
+#define APM_CROSSOVER_VC		0xb0
+
+#define MEM_ACC_SEQ_CONST(n)		(n)
+#define MEM_ACC_APM_READ_MASK		0xff
+#define MEMACC_CROSSOVER_VC		0xb8
+
+#define PLL_WAIT_LOCK_TIME_US		10
+#define PLL_WAIT_LOCK_TIME_NS		(PLL_WAIT_LOCK_TIME_US * 1000)
+#define SAFE_FREQ_WAIT_NS		5000
+#define DEXT_DECREMENT_WAIT_NS		1000
+
+#define DATA_MEM(n)			(0x400 + (n) * 4)
+
+#define DCVS_PERF_STATE_DESIRED_REG_0	0x780
+#define DCVS_PERF_STATE_DESIRED_REG(n) (DCVS_PERF_STATE_DESIRED_REG_0 + \
+					(4 * n))
+#define OSM_CYCLE_COUNTER_STATUS_REG_0	0x7d0
+#define OSM_CYCLE_COUNTER_STATUS_REG(n)	(OSM_CYCLE_COUNTER_STATUS_REG_0 + \
+					(4 * n))
+
+static const struct regmap_config osm_qcom_regmap_config = {
+	.reg_bits       = 32,
+	.reg_stride     = 4,
+	.val_bits       = 32,
+	.fast_io	= true,
+};
+
+enum clk_osm_bases {
+	OSM_BASE,
+	PLL_BASE,
+	EFUSE_BASE,
+	SEQ_BASE,
+	NUM_BASES,
+};
+
+enum clk_osm_lut_data {
+	FREQ,
+	FREQ_DATA,
+	PLL_OVERRIDES,
+	MEM_ACC_LEVEL,
+	VIRTUAL_CORNER,
+	NUM_FIELDS,
+};
+
+struct osm_entry {
+	u16 virtual_corner;
+	u16 open_loop_volt;
+	u32 freq_data;
+	u32 override_data;
+	u32 mem_acc_level;
+	long frequency;
+};
+
+struct clk_osm {
+	struct clk_hw hw;
+	struct osm_entry osm_table[OSM_TABLE_SIZE];
+	struct dentry *debugfs;
+	struct regulator *vdd_reg;
+	struct platform_device *vdd_dev;
+	void *vbases[NUM_BASES];
+	unsigned long pbases[NUM_BASES];
+	spinlock_t lock;
+
+	u32 cpu_reg_mask;
+	u32 num_entries;
+	u32 cluster_num;
+	u32 core_num;
+	u32 apm_crossover_vc;
+	u32 apm_threshold_vc;
+	u32 mem_acc_crossover_vc;
+	u32 mem_acc_threshold_vc;
+	u32 min_cpr_vc;
+	u32 cycle_counter_reads;
+	u32 cycle_counter_delay;
+	u32 cycle_counter_factor;
+	u64 total_cycle_counter;
+	u32 prev_cycle_counter;
+	u32 l_val_base;
+	u32 apcs_pll_user_ctl;
+	u32 apcs_pll_min_freq;
+	u32 cfg_gfmux_addr;
+	u32 apcs_cbc_addr;
+	u32 speedbin;
+	u32 mem_acc_crossover_vc_addr;
+	u32 mem_acc_addr[MEM_ACC_ADDRS];
+	u32 ramp_ctl_addr;
+	u32 apm_mode_ctl;
+	u32 apm_status_ctl;
+	u32 osm_clk_rate;
+	u32 xo_clk_rate;
+	bool secure_init;
+	bool red_fsm_en;
+	bool boost_fsm_en;
+	bool safe_fsm_en;
+	bool ps_fsm_en;
+	bool droop_fsm_en;
+
+	struct notifier_block panic_notifier;
+	u32 trace_periodic_timer;
+	bool trace_en;
+	bool wdog_trace_en;
+};
+
+static struct regulator *vdd_l3;
+static struct regulator *vdd_pwrcl;
+static struct regulator *vdd_perfcl;
+
+static inline struct clk_osm *to_clk_osm(struct clk_hw *_hw)
+{
+	return container_of(_hw, struct clk_osm, hw);
+}
+
+static inline void clk_osm_masked_write_reg(struct clk_osm *c, u32 val,
+					    u32 offset, u32 mask)
+{
+	u32 val2, orig_val;
+
+	val2 = orig_val = readl_relaxed((char *)c->vbases[OSM_BASE] + offset);
+	val2 &= ~mask;
+	val2 |= val & mask;
+
+	if (val2 != orig_val)
+		writel_relaxed(val2, (char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline void clk_osm_write_seq_reg(struct clk_osm *c, u32 val, u32 offset)
+{
+	writel_relaxed(val, (char *)c->vbases[SEQ_BASE] + offset);
+}
+
+static inline void clk_osm_write_reg(struct clk_osm *c, u32 val, u32 offset)
+{
+	writel_relaxed(val, (char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline int clk_osm_read_reg(struct clk_osm *c, u32 offset)
+{
+	return readl_relaxed((char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline int clk_osm_read_reg_no_log(struct clk_osm *c, u32 offset)
+{
+	return readl_relaxed_no_log((char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline int clk_osm_mb(struct clk_osm *c, int base)
+{
+	return readl_relaxed_no_log((char *)c->vbases[base] + ENABLE_REG);
+}
+
+static long clk_osm_list_rate(struct clk_hw *hw, unsigned int n,
+					unsigned long rate_max)
+{
+	if (n >= hw->init->num_rate_max)
+		return -ENXIO;
+	return hw->init->rate_max[n];
+}
+
+static inline bool is_better_rate(unsigned long req, unsigned long best,
+			unsigned long new)
+{
+	if (IS_ERR_VALUE(new))
+		return false;
+
+	return (req <= new && new < best) || (best < req && best < new);
+}
+
+static long clk_osm_round_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long *parent_rate)
+{
+	int i;
+	unsigned long rrate = 0;
+
+	/*
+	 * If the rate passed in is 0, return the first frequency in the
+	 * FMAX table.
+	 */
+	if (!rate)
+		return hw->init->rate_max[0];
+
+	for (i = 0; i < hw->init->num_rate_max; i++) {
+		if (is_better_rate(rate, rrate, hw->init->rate_max[i])) {
+			rrate = hw->init->rate_max[i];
+			if (rate == rrate)
+				break;
+		}
+	}
+
+	pr_debug("%s: rate %lu, rrate %ld, Rate max %ld\n", __func__, rate,
+						rrate, hw->init->rate_max[i]);
+
+	return rrate;
+}
+
+static int clk_osm_search_table(struct osm_entry *table, int entries, long rate)
+{
+	int quad_core_index, single_core_index = 0;
+	int core_count;
+
+	for (quad_core_index = 0; quad_core_index < entries;
+						quad_core_index++) {
+		core_count = CORE_COUNT_VAL(table[quad_core_index].freq_data);
+		if (rate == table[quad_core_index].frequency &&
+					core_count == SINGLE_CORE) {
+			single_core_index = quad_core_index;
+			continue;
+		}
+		if (rate == table[quad_core_index].frequency &&
+					core_count == MAX_CORE_COUNT)
+			return quad_core_index;
+	}
+	if (single_core_index)
+		return single_core_index;
+
+	return -EINVAL;
+}
+
+static int clk_osm_enable(struct clk_hw *hw)
+{
+	struct clk_osm *cpuclk = to_clk_osm(hw);
+
+	clk_osm_write_reg(cpuclk, 1, ENABLE_REG);
+
+	/* Make sure the write goes through before proceeding */
+	clk_osm_mb(cpuclk, OSM_BASE);
+
+	/* Wait for 5us for OSM hardware to enable */
+	udelay(5);
+
+	pr_debug("OSM clk enabled for cluster=%d\n", cpuclk->cluster_num);
+
+	return 0;
+}
+
+const struct clk_ops clk_ops_cpu_osm = {
+	.enable = clk_osm_enable,
+	.round_rate = clk_osm_round_rate,
+	.list_rate = clk_osm_list_rate,
+};
+
+static struct clk_ops clk_ops_core;
+
+static int cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+				    unsigned long parent_rate)
+{
+	struct clk_osm *cpuclk = to_clk_osm(hw);
+	struct clk_hw *p_hw = clk_hw_get_parent(hw);
+	struct clk_osm *parent = to_clk_osm(p_hw);
+	int index = 0;
+	unsigned long r_rate;
+
+	if (!cpuclk || !parent)
+		return -EINVAL;
+
+	r_rate = clk_osm_round_rate(p_hw, rate, NULL);
+
+	if (rate != r_rate) {
+		pr_err("invalid requested rate=%ld\n", rate);
+		return -EINVAL;
+	}
+
+	/* Convert rate to table index */
+	index = clk_osm_search_table(parent->osm_table,
+				     parent->num_entries, r_rate);
+	if (index < 0) {
+		pr_err("cannot set %s to %lu\n", clk_hw_get_name(hw), rate);
+		return -EINVAL;
+	}
+	pr_debug("rate: %lu --> index %d\n", rate, index);
+	/*
+	 * Choose index and send request to OSM hardware.
+	 * TODO: Program INACTIVE_OS_REQUEST if needed.
+	 */
+	clk_osm_write_reg(parent, index,
+			DCVS_PERF_STATE_DESIRED_REG(cpuclk->core_num));
+
+	/* Make sure the write goes through before proceeding */
+	clk_osm_mb(parent, OSM_BASE);
+
+	return 0;
+}
+
+static int l3_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+				    unsigned long parent_rate)
+{
+	struct clk_osm *cpuclk = to_clk_osm(hw);
+	int index = 0;
+	unsigned long r_rate;
+
+	if (!cpuclk)
+		return -EINVAL;
+
+	r_rate = clk_osm_round_rate(hw, rate, NULL);
+
+	if (rate != r_rate) {
+		pr_err("invalid requested rate=%ld\n", rate);
+		return -EINVAL;
+	}
+
+	/* Convert rate to table index */
+	index = clk_osm_search_table(cpuclk->osm_table,
+				     cpuclk->num_entries, r_rate);
+	if (index < 0) {
+		pr_err("cannot set %s to %lu\n", clk_hw_get_name(hw), rate);
+		return -EINVAL;
+	}
+	pr_debug("rate: %lu --> index %d\n", rate, index);
+
+	clk_osm_write_reg(cpuclk, index, DCVS_PERF_STATE_DESIRED_REG_0);
+
+	/* Make sure the write goes through before proceeding */
+	clk_osm_mb(cpuclk, OSM_BASE);
+
+	return 0;
+}
+
+static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct clk_osm *cpuclk = to_clk_osm(hw);
+	struct clk_hw *p_hw = clk_hw_get_parent(hw);
+	struct clk_osm *parent = to_clk_osm(p_hw);
+	int index = 0;
+
+	if (!cpuclk || !parent)
+		return -EINVAL;
+
+	index = clk_osm_read_reg(parent,
+			DCVS_PERF_STATE_DESIRED_REG(cpuclk->core_num));
+
+	pr_debug("%s: Index %d, freq %ld\n", __func__, index,
+				parent->osm_table[index].frequency);
+
+	/* Convert index to frequency */
+	return parent->osm_table[index].frequency;
+}
+
+static unsigned long l3_clk_recalc_rate(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct clk_osm *cpuclk = to_clk_osm(hw);
+	int index = 0;
+
+	if (!cpuclk)
+		return -EINVAL;
+
+	index = clk_osm_read_reg(cpuclk, DCVS_PERF_STATE_DESIRED_REG_0);
+
+	pr_debug("%s: Index %d, freq %ld\n", __func__, index,
+				cpuclk->osm_table[index].frequency);
+
+	/* Convert index to frequency */
+	return cpuclk->osm_table[index].frequency;
+}
+
+
+const struct clk_ops clk_ops_l3_osm = {
+	.enable = clk_osm_enable,
+	.round_rate = clk_osm_round_rate,
+	.list_rate = clk_osm_list_rate,
+	.recalc_rate = l3_clk_recalc_rate,
+	.set_rate = l3_clk_set_rate,
+};
+
+enum {
+	P_XO,
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+	{ P_XO, 0 },
+};
+
+static const char * const gcc_parent_names_1[] = {
+	"xo",
+};
+
+static struct clk_init_data osm_clks_init[] = {
+	[0] = {
+		.name = "l3_clk",
+		.parent_names = (const char *[]){ "bi_tcxo" },
+		.num_parents = 1,
+		.ops = &clk_ops_l3_osm,
+	},
+	[1] = {
+		.name = "pwrcl_clk",
+		.parent_names = (const char *[]){ "bi_tcxo" },
+		.num_parents = 1,
+		.ops = &clk_ops_cpu_osm,
+	},
+	[2] = {
+		.name = "perfcl_clk",
+		.parent_names = (const char *[]){ "bi_tcxo" },
+		.num_parents = 1,
+		.ops = &clk_ops_cpu_osm,
+	},
+};
+
+static struct clk_osm l3_clk = {
+	.cluster_num = 0,
+	.cpu_reg_mask = 0x0,
+	.hw.init = &osm_clks_init[0],
+};
+
+static DEFINE_CLK_VOTER(l3_cluster0_vote_clk, l3_clk, 0);
+static DEFINE_CLK_VOTER(l3_cluster1_vote_clk, l3_clk, 0);
+
+static struct clk_osm pwrcl_clk = {
+	.cluster_num = 1,
+	.cpu_reg_mask = 0x300,
+	.hw.init = &osm_clks_init[1],
+};
+
+static struct clk_osm cpu0_pwrcl_clk = {
+	.core_num = 0,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu0_pwrcl_clk",
+		.parent_names = (const char *[]){ "pwrcl_clk" },
+		.num_parents = 1,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu1_pwrcl_clk = {
+	.core_num = 1,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu1_pwrcl_clk",
+		.parent_names = (const char *[]){ "pwrcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu2_pwrcl_clk = {
+	.core_num = 2,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu2_pwrcl_clk",
+		.parent_names = (const char *[]){ "pwrcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu3_pwrcl_clk = {
+	.core_num = 3,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu3_pwrcl_clk",
+		.parent_names = (const char *[]){ "pwrcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm perfcl_clk = {
+	.cluster_num = 2,
+	.cpu_reg_mask = 0x700,
+	.hw.init = &osm_clks_init[2],
+};
+
+
+static struct clk_osm cpu4_perfcl_clk = {
+	.core_num = 0,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu4_perfcl_clk",
+		.parent_names = (const char *[]){ "perfcl_clk" },
+		.num_parents = 1,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu5_perfcl_clk = {
+	.core_num = 1,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu5_perfcl_clk",
+		.parent_names = (const char *[]){ "perfcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu6_perfcl_clk = {
+	.core_num = 2,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu6_perfcl_clk",
+		.parent_names = (const char *[]){ "perfcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu7_perfcl_clk = {
+	.core_num = 3,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu7_perfcl_clk",
+		.parent_names = (const char *[]){ "perfcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+/*
+ * Use the cpu* clocks only for writing to the PERF_STATE_DESIRED registers.
+ * Note that we are currently NOT programming the APSS_LMH_GFMUX_CFG &
+ * APSS_OSM_GFMUX_CFG registers.
+ */
+
+static struct clk_hw *osm_qcom_clk_hws[] = {
+	[L3_CLK] = &l3_clk.hw,
+	[L3_CLUSTER0_VOTE_CLK] = &l3_cluster0_vote_clk.hw,
+	[L3_CLUSTER1_VOTE_CLK] = &l3_cluster1_vote_clk.hw,
+	[PWRCL_CLK] = &pwrcl_clk.hw,
+	[CPU0_PWRCL_CLK] = &cpu0_pwrcl_clk.hw,
+	[CPU1_PWRCL_CLK] = &cpu1_pwrcl_clk.hw,
+	[CPU2_PWRCL_CLK] = &cpu2_pwrcl_clk.hw,
+	[CPU3_PWRCL_CLK] = &cpu3_pwrcl_clk.hw,
+	[PERFCL_CLK] = &perfcl_clk.hw,
+	[CPU4_PERFCL_CLK] = &cpu4_perfcl_clk.hw,
+	[CPU5_PERFCL_CLK] = &cpu5_perfcl_clk.hw,
+	[CPU6_PERFCL_CLK] = &cpu6_perfcl_clk.hw,
+	[CPU7_PERFCL_CLK] = &cpu7_perfcl_clk.hw,
+};
+
+static struct clk_osm *logical_cpu_to_clk(int cpu)
+{
+	struct device_node *cpu_node;
+	const u32 *cell;
+	u64 hwid;
+	static struct clk_osm *cpu_clk_map[NR_CPUS];
+
+	if (cpu_clk_map[cpu])
+		return cpu_clk_map[cpu];
+
+	cpu_node = of_get_cpu_node(cpu, NULL);
+	if (!cpu_node)
+		goto fail;
+
+	cell = of_get_property(cpu_node, "reg", NULL);
+	if (!cell) {
+		pr_err("%s: missing reg property\n", cpu_node->full_name);
+		goto fail;
+	}
+
+	hwid = of_read_number(cell, of_n_addr_cells(cpu_node));
+	if ((hwid | pwrcl_clk.cpu_reg_mask) == pwrcl_clk.cpu_reg_mask) {
+		switch (cpu) {
+		case 0:
+			cpu_clk_map[cpu] = &cpu0_pwrcl_clk;
+			break;
+		case 1:
+			cpu_clk_map[cpu] = &cpu1_pwrcl_clk;
+			break;
+		case 2:
+			cpu_clk_map[cpu] = &cpu2_pwrcl_clk;
+			break;
+		case 3:
+			cpu_clk_map[cpu] = &cpu3_pwrcl_clk;
+			break;
+		default:
+			pr_err("unsupported CPU number for power cluster\n");
+			return NULL;
+		}
+		return cpu_clk_map[cpu];
+	}
+
+	if ((hwid | perfcl_clk.cpu_reg_mask) == perfcl_clk.cpu_reg_mask) {
+		switch (cpu) {
+		case 4:
+			cpu_clk_map[cpu] = &cpu4_perfcl_clk;
+			break;
+		case 5:
+			cpu_clk_map[cpu] = &cpu5_perfcl_clk;
+			break;
+		case 6:
+			cpu_clk_map[cpu] = &cpu6_perfcl_clk;
+			break;
+		case 7:
+			cpu_clk_map[cpu] = &cpu7_perfcl_clk;
+			break;
+		default:
+			pr_err("unsupported CPU number for perf cluster\n");
+			return NULL;
+		}
+		return cpu_clk_map[cpu];
+	}
+
+fail:
+	return NULL;
+}
+
+static inline int clk_osm_count_ns(struct clk_osm *c, u64 nsec)
+{
+	u64 temp;
+
+	temp = (u64)c->osm_clk_rate * nsec;
+	do_div(temp, 1000000000);
+
+	return temp;
+}
+
+static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
+{
+	int curr_level, i, j = 0;
+	int mem_acc_level_map[MAX_MEM_ACC_LEVELS] = {MAX_VC, MAX_VC, MAX_VC};
+
+	curr_level = c->osm_table[0].mem_acc_level;
+	for (i = 0; i < c->num_entries; i++) {
+		if (curr_level == MAX_MEM_ACC_LEVELS)
+			break;
+
+		if (c->osm_table[i].mem_acc_level != curr_level) {
+			mem_acc_level_map[j++] =
+				c->osm_table[i].virtual_corner;
+			curr_level = c->osm_table[i].mem_acc_level;
+		}
+	}
+
+	if (c->secure_init) {
+		clk_osm_write_seq_reg(c,
+				c->pbases[OSM_BASE] + MEMACC_CROSSOVER_VC,
+				DATA_MEM(57));
+		clk_osm_write_seq_reg(c, c->mem_acc_addr[0], DATA_MEM(48));
+		clk_osm_write_seq_reg(c, c->mem_acc_addr[1], DATA_MEM(49));
+		clk_osm_write_seq_reg(c, c->mem_acc_addr[2], DATA_MEM(50));
+		clk_osm_write_seq_reg(c, c->mem_acc_crossover_vc,
+							DATA_MEM(78));
+		clk_osm_write_seq_reg(c, mem_acc_level_map[0], DATA_MEM(79));
+		if (c == &perfcl_clk)
+			clk_osm_write_seq_reg(c, c->mem_acc_threshold_vc,
+								DATA_MEM(80));
+		else
+			clk_osm_write_seq_reg(c, mem_acc_level_map[1],
+								DATA_MEM(80));
+		/*
+		 * Note that DATA_MEM[81] -> DATA_MEM[89] values will be
+		 * confirmed post-si. Use a value of 1 for DATA_MEM[89] and
+		 * leave the rest of them as 0.
+		 */
+		clk_osm_write_seq_reg(c, 1, DATA_MEM(89));
+	} else {
+		scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(78),
+						c->mem_acc_crossover_vc);
+		scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(79),
+						mem_acc_level_map[0]);
+		if (c == &perfcl_clk)
+			scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(80),
+						c->mem_acc_threshold_vc);
+		else
+			scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(80),
+						mem_acc_level_map[1]);
+	}
+}
+
+static void clk_osm_program_apm_regs(struct clk_osm *c)
+{
+	if (c == &l3_clk || c == &pwrcl_clk)
+		return;
+
+	/*
+	 * Program address of the control register used to configure
+	 * the Array Power Mux controller
+	 */
+	clk_osm_write_seq_reg(c, c->apm_mode_ctl, DATA_MEM(41));
+
+	/* Program address of controller status register */
+	clk_osm_write_seq_reg(c, c->apm_status_ctl, DATA_MEM(43));
+
+	/* Program address of crossover register */
+	clk_osm_write_seq_reg(c, c->pbases[OSM_BASE] + APM_CROSSOVER_VC,
+						DATA_MEM(44));
+
+	/* Program mode value to switch APM to VDD_APC */
+	clk_osm_write_seq_reg(c, APM_APC_MODE, DATA_MEM(72));
+
+	/* Program mode value to switch APM to VDD_MX */
+	clk_osm_write_seq_reg(c, APM_MX_MODE, DATA_MEM(73));
+
+	/* Program mask used to move into read_mask port */
+	clk_osm_write_seq_reg(c, APM_READ_DATA_MASK, DATA_MEM(74));
+
+	/* Value used to move into read_exp port */
+	clk_osm_write_seq_reg(c, APM_APC_READ_VAL, DATA_MEM(75));
+	clk_osm_write_seq_reg(c, APM_MX_READ_VAL, DATA_MEM(76));
+}
+
+static void clk_osm_do_additional_setup(struct clk_osm *c,
+					struct platform_device *pdev)
+{
+	if (!c->secure_init)
+		return;
+
+	dev_info(&pdev->dev, "Performing additional OSM setup due to lack of TZ for cluster=%d\n",
+						 c->cluster_num);
+
+	/* PLL L_VAL & post-div programming */
+	clk_osm_write_seq_reg(c, c->apcs_pll_min_freq, DATA_MEM(32));
+	clk_osm_write_seq_reg(c, c->l_val_base, DATA_MEM(33));
+	clk_osm_write_seq_reg(c, c->apcs_pll_user_ctl, DATA_MEM(34));
+	clk_osm_write_seq_reg(c, PLL_POST_DIV1, DATA_MEM(35));
+	clk_osm_write_seq_reg(c, PLL_POST_DIV2, DATA_MEM(36));
+
+	/* APM Programming */
+	clk_osm_program_apm_regs(c);
+
+	/* GFMUX Programming */
+	clk_osm_write_seq_reg(c, c->cfg_gfmux_addr, DATA_MEM(37));
+	clk_osm_write_seq_reg(c, 0x1, DATA_MEM(65));
+	clk_osm_write_seq_reg(c, 0x2, DATA_MEM(66));
+	clk_osm_write_seq_reg(c, 0x3, DATA_MEM(67));
+	clk_osm_write_seq_reg(c, 0x40000000, DATA_MEM(68));
+	clk_osm_write_seq_reg(c, 0x20000000, DATA_MEM(69));
+	clk_osm_write_seq_reg(c, 0x10000000, DATA_MEM(70));
+	clk_osm_write_seq_reg(c, 0x70000000, DATA_MEM(71));
+
+	/* Override programming */
+	clk_osm_write_seq_reg(c, c->pbases[OSM_BASE] +
+			OVERRIDE_CLUSTER_IDLE_ACK, DATA_MEM(54));
+	clk_osm_write_seq_reg(c, 0x3, DATA_MEM(55));
+	clk_osm_write_seq_reg(c, c->pbases[OSM_BASE] + PDN_FSM_CTRL_REG,
+					DATA_MEM(40));
+	clk_osm_write_seq_reg(c, c->pbases[OSM_BASE] + REQ_GEN_FSM_STATUS,
+					DATA_MEM(60));
+	clk_osm_write_seq_reg(c, 0x10, DATA_MEM(61));
+	clk_osm_write_seq_reg(c, 0x70, DATA_MEM(62));
+	clk_osm_write_seq_reg(c, c->apcs_cbc_addr, DATA_MEM(112));
+	clk_osm_write_seq_reg(c, 0x2, DATA_MEM(113));
+
+	if (c == &perfcl_clk) {
+		int rc;
+		u32 isense_addr;
+
+		/* Performance cluster isense programming */
+		rc = of_property_read_u32(pdev->dev.of_node,
+				"qcom,perfcl-isense-addr", &isense_addr);
+		if (rc) {
+			dev_err(&pdev->dev, "unable to find qcom,perfcl-isense-addr property, rc=%d\n",
+				rc);
+			return;
+		}
+		clk_osm_write_seq_reg(c, isense_addr, DATA_MEM(45));
+		clk_osm_write_seq_reg(c, ISENSE_ON_DATA, DATA_MEM(46));
+		clk_osm_write_seq_reg(c, ISENSE_OFF_DATA, DATA_MEM(47));
+	}
+
+	clk_osm_write_seq_reg(c, c->ramp_ctl_addr, DATA_MEM(105));
+	clk_osm_write_seq_reg(c, CONSTANT_32, DATA_MEM(92));
+
+	/* Enable/disable CPR ramp settings */
+	clk_osm_write_seq_reg(c, 0x101C031, DATA_MEM(106));
+	clk_osm_write_seq_reg(c, 0x1010031, DATA_MEM(107));
+}
+
+static void clk_osm_setup_fsms(struct clk_osm *c)
+{
+	u32 val;
+
+	/* Voltage Reduction FSM */
+	if (c->red_fsm_en) {
+		val = clk_osm_read_reg(c, VMIN_REDUCTION_ENABLE_REG) | BIT(0);
+		val |= BVAL(6, 1, c->min_cpr_vc);
+		clk_osm_write_reg(c, val, VMIN_REDUCTION_ENABLE_REG);
+
+		clk_osm_write_reg(c, clk_osm_count_ns(c, 10000),
+				  VMIN_REDUCTION_TIMER_REG);
+	}
+
+	/* Boost FSM */
+	if (c->boost_fsm_en) {
+		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+		val |= DELTA_DEX_VAL | CC_BOOST_FSM_EN | IGNORE_PLL_LOCK;
+		clk_osm_write_reg(c, val, PDN_FSM_CTRL_REG);
+
+		val = clk_osm_read_reg(c, CC_BOOST_FSM_TIMERS_REG0);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+		val |= BVAL(31, 16, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
+		clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG0);
+
+		val = clk_osm_read_reg(c, CC_BOOST_FSM_TIMERS_REG1);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+		val |= BVAL(31, 16, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+		clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG1);
+
+		val = clk_osm_read_reg(c, CC_BOOST_FSM_TIMERS_REG2);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
+		clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG2);
+	}
+
+	/* Safe Freq FSM */
+	if (c->safe_fsm_en) {
+		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+		clk_osm_write_reg(c, val | DCVS_BOOST_FSM_EN_MASK,
+				  PDN_FSM_CTRL_REG);
+
+		val = clk_osm_read_reg(c, DCVS_BOOST_FSM_TIMERS_REG0);
+		val |= BVAL(31, 16, clk_osm_count_ns(c, 1000));
+		clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG0);
+
+		val = clk_osm_read_reg(c, DCVS_BOOST_FSM_TIMERS_REG1);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
+		clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG1);
+
+		val = clk_osm_read_reg(c, DCVS_BOOST_FSM_TIMERS_REG2);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
+		clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG2);
+
+	}
+
+	/* Pulse Swallowing FSM */
+	if (c->ps_fsm_en) {
+		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+		clk_osm_write_reg(c, val | PS_BOOST_FSM_EN_MASK,
+							PDN_FSM_CTRL_REG);
+
+		val = clk_osm_read_reg(c, PS_BOOST_FSM_TIMERS_REG0);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
+		val |= BVAL(31, 16, clk_osm_count_ns(c, 1000));
+		clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG0);
+
+		val = clk_osm_read_reg(c, PS_BOOST_FSM_TIMERS_REG1);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
+		val |= BVAL(31, 16, clk_osm_count_ns(c, 1000));
+		clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG1);
+
+		val = clk_osm_read_reg(c, PS_BOOST_FSM_TIMERS_REG2);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
+		clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG2);
+	}
+
+	/* PLL signal timing control */
+	if (c->boost_fsm_en || c->safe_fsm_en || c->ps_fsm_en)
+		clk_osm_write_reg(c, 0x2, BOOST_PROG_SYNC_DELAY_REG);
+
+	/* DCVS droop FSM - only if RCGwRC is not used for di/dt control */
+	if (c->droop_fsm_en) {
+		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+		clk_osm_write_reg(c, val | DCVS_DROOP_FSM_EN_MASK,
+				  PDN_FSM_CTRL_REG);
+	}
+
+	if (c->ps_fsm_en || c->droop_fsm_en) {
+		clk_osm_write_reg(c, 0x1, DROOP_PROG_SYNC_DELAY_REG);
+		clk_osm_write_reg(c, clk_osm_count_ns(c, 100),
+				  DROOP_RELEASE_TIMER_CTRL);
+		clk_osm_write_reg(c, clk_osm_count_ns(c, 150),
+				  DCVS_DROOP_TIMER_CTRL);
+		/*
+		 * TODO: Check if DCVS_DROOP_CODE used is correct. Also check
+		 * if RESYNC_CTRL should be set for L3.
+		 */
+		val = BIT(31) | BVAL(22, 16, 0x2) | BVAL(6, 0, 0x8);
+		clk_osm_write_reg(c, val, DROOP_CTRL_REG);
+	}
+}
+
+static int clk_osm_set_llm_volt_policy(struct platform_device *pdev)
+{
+	struct device_node *of = pdev->dev.of_node;
+	u32 *array;
+	int rc = 0, val, regval;
+
+	array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+			     GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	/*
+	 * Setup Timer to control how long OSM should wait before performing
+	 * DCVS when a LLM up voltage request is received.
+	 * Time is specified in us.
+	 */
+	rc = of_property_read_u32_array(of, "qcom,llm-volt-up-timer",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No LLM voltage up timer value, rc=%d\n",
+			rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val,
+					LLM_VOLTAGE_VOTE_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+						array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val,
+					LLM_VOLTAGE_VOTE_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&perfcl_clk,
+						array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val,
+					LLM_VOLTAGE_VOTE_INC_HYSTERESIS);
+	}
+
+	/*
+	 * Setup Timer to control how long OSM should wait before performing
+	 * DCVS when a LLM down voltage request is received.
+	 * Time is specified in us.
+	 */
+	rc = of_property_read_u32_array(of, "qcom,llm-volt-down-timer",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No LLM Voltage down timer value: %d\n",
+									rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val,
+					LLM_VOLTAGE_VOTE_DEC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+					       array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val,
+					LLM_VOLTAGE_VOTE_DEC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&perfcl_clk,
+					       array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val,
+					LLM_VOLTAGE_VOTE_DEC_HYSTERESIS);
+	}
+
+	/* Enable or disable honoring of LLM Voltage requests */
+	rc = of_property_read_bool(pdev->dev.of_node,
+					"qcom,enable-llm-volt-vote");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Honoring LLM Voltage requests\n");
+		val = 0;
+	} else
+		val = 1;
+
+	/* Enable or disable LLM VOLT DVCS */
+	regval = val | clk_osm_read_reg(&l3_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&l3_clk, regval, LLM_INTF_DCVS_DISABLE);
+	regval = val | clk_osm_read_reg(&pwrcl_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&pwrcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+	regval = val | clk_osm_read_reg(&perfcl_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+
+	/* Wait for the writes to complete */
+	clk_osm_mb(&perfcl_clk, OSM_BASE);
+
+	devm_kfree(&pdev->dev, array);
+	return 0;
+}
+
+static int clk_osm_set_llm_freq_policy(struct platform_device *pdev)
+{
+	struct device_node *of = pdev->dev.of_node;
+	u32 *array;
+	int rc = 0, val, regval;
+
+	array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+			     GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	/*
+	 * Setup Timer to control how long OSM should wait before performing
+	 * DCVS when a LLM up frequency request is received.
+	 * Time is specified in us.
+	 */
+	rc = of_property_read_u32_array(of, "qcom,llm-freq-up-timer", array,
+					MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "Unable to get CC up timer value: %d\n",
+			rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val, LLM_FREQ_VOTE_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+						array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val,
+						LLM_FREQ_VOTE_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&perfcl_clk,
+						array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val,
+						LLM_FREQ_VOTE_INC_HYSTERESIS);
+	}
+
+	/*
+	 * Setup Timer to control how long OSM should wait before performing
+	 * DCVS when a LLM down frequency request is received.
+	 * Time is specified in us.
+	 */
+	rc = of_property_read_u32_array(of, "qcom,llm-freq-down-timer",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No LLM Frequency down timer value: %d\n",
+			rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val, LLM_FREQ_VOTE_DEC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+					       array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val,
+						LLM_FREQ_VOTE_DEC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&perfcl_clk,
+					       array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val,
+						LLM_FREQ_VOTE_DEC_HYSTERESIS);
+	}
+
+	/* Enable or disable honoring of LLM frequency requests */
+	rc = of_property_read_bool(pdev->dev.of_node,
+					"qcom,enable-llm-freq-vote");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Honoring LLM Frequency requests\n");
+		val = 0;
+	} else
+		val = BIT(1);
+
+	/* Enable or disable LLM FREQ DVCS */
+	regval = val | clk_osm_read_reg(&l3_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&l3_clk, regval, LLM_INTF_DCVS_DISABLE);
+	regval = val | clk_osm_read_reg(&pwrcl_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&pwrcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+	regval = val | clk_osm_read_reg(&perfcl_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+
+	/* Wait for the write to complete */
+	clk_osm_mb(&perfcl_clk, OSM_BASE);
+
+	devm_kfree(&pdev->dev, array);
+	return 0;
+}
+
+static int clk_osm_set_cc_policy(struct platform_device *pdev)
+{
+	int rc = 0, val;
+	u32 *array;
+	struct device_node *of = pdev->dev.of_node;
+
+	array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+			     GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of, "qcom,up-timer", array,
+					MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No up timer value, rc=%d\n",
+			 rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk,
+					array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val, SPM_CC_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+					array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&perfcl_clk,
+					array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val, SPM_CC_INC_HYSTERESIS);
+	}
+
+	rc = of_property_read_u32_array(of, "qcom,down-timer",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No down timer value, rc=%d\n", rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk,
+				       array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val, SPM_CC_DEC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+				       array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_DEC_HYSTERESIS);
+
+		clk_osm_count_ns(&perfcl_clk,
+				       array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val, SPM_CC_DEC_HYSTERESIS);
+	}
+
+	/* OSM index override for cluster PC */
+	rc = of_property_read_u32_array(of, "qcom,pc-override-index",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No PC override index value, rc=%d\n",
+			rc);
+		clk_osm_write_reg(&pwrcl_clk, 0, CC_ZERO_BEHAV_CTRL);
+		clk_osm_write_reg(&perfcl_clk, 0, CC_ZERO_BEHAV_CTRL);
+	} else {
+		val = BVAL(6, 1, array[pwrcl_clk.cluster_num])
+			| ENABLE_OVERRIDE;
+		clk_osm_write_reg(&pwrcl_clk, val, CC_ZERO_BEHAV_CTRL);
+		val = BVAL(6, 1, array[perfcl_clk.cluster_num])
+			| ENABLE_OVERRIDE;
+		clk_osm_write_reg(&perfcl_clk, val, CC_ZERO_BEHAV_CTRL);
+	}
+
+	/* Wait for the writes to complete */
+	clk_osm_mb(&perfcl_clk, OSM_BASE);
+
+	rc = of_property_read_bool(pdev->dev.of_node, "qcom,set-c3-active");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Treat cores in C3 as active\n");
+
+		val = clk_osm_read_reg(&l3_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(2);
+		clk_osm_write_reg(&l3_clk, val, SPM_CORE_INACTIVE_MAPPING);
+
+		val = clk_osm_read_reg(&pwrcl_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(2);
+		clk_osm_write_reg(&pwrcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+
+		val = clk_osm_read_reg(&perfcl_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(2);
+		clk_osm_write_reg(&perfcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+	}
+
+	rc = of_property_read_bool(pdev->dev.of_node, "qcom,set-c2-active");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Treat cores in C2 as active\n");
+
+		val = clk_osm_read_reg(&l3_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(1);
+		clk_osm_write_reg(&l3_clk, val, SPM_CORE_INACTIVE_MAPPING);
+
+		val = clk_osm_read_reg(&pwrcl_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(1);
+		clk_osm_write_reg(&pwrcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+
+		val = clk_osm_read_reg(&perfcl_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(1);
+		clk_osm_write_reg(&perfcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+	}
+
+	rc = of_property_read_bool(pdev->dev.of_node, "qcom,disable-cc-dvcs");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Disabling CC based DCVS\n");
+		val = 1;
+	} else
+		val = 0;
+
+	clk_osm_write_reg(&l3_clk, val, SPM_CC_DCVS_DISABLE);
+	clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_DCVS_DISABLE);
+	clk_osm_write_reg(&perfcl_clk, val, SPM_CC_DCVS_DISABLE);
+
+	/* Wait for the writes to complete */
+	clk_osm_mb(&perfcl_clk, OSM_BASE);
+
+	devm_kfree(&pdev->dev, array);
+	return 0;
+}
+
+static void clk_osm_setup_cluster_pll(struct clk_osm *c)
+{
+	writel_relaxed(0x0, c->vbases[PLL_BASE] + PLL_MODE);
+	writel_relaxed(0x26, c->vbases[PLL_BASE] + PLL_L_VAL);
+	writel_relaxed(0x8, c->vbases[PLL_BASE] +
+			PLL_USER_CTRL);
+	writel_relaxed(0x20000AA8, c->vbases[PLL_BASE] +
+			PLL_CONFIG_CTL_LO);
+	writel_relaxed(0x000003D2, c->vbases[PLL_BASE] +
+			PLL_CONFIG_CTL_HI);
+	writel_relaxed(0x2, c->vbases[PLL_BASE] +
+			PLL_MODE);
+
+	/* Ensure writes complete before delaying */
+	clk_osm_mb(c, PLL_BASE);
+
+	udelay(PLL_WAIT_LOCK_TIME_US);
+
+	writel_relaxed(0x6, c->vbases[PLL_BASE] + PLL_MODE);
+
+	/* Ensure write completes before delaying */
+	clk_osm_mb(c, PLL_BASE);
+
+	usleep_range(50, 75);
+
+	writel_relaxed(0x7, c->vbases[PLL_BASE] + PLL_MODE);
+}
+
+static void clk_osm_misc_programming(struct clk_osm *c)
+{
+	u32 lval = 0xFF, val;
+	int i;
+
+	clk_osm_write_reg(c, BVAL(23, 16, 0xF), SPM_CORE_COUNT_CTRL);
+	clk_osm_write_reg(c, PLL_MIN_LVAL, PLL_MIN_FREQ_REG);
+
+	/* Pattern to set/clear PLL lock in PDN_FSM_CTRL_REG */
+	val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+	if (c->secure_init) {
+		val |= IGNORE_PLL_LOCK;
+		clk_osm_write_seq_reg(c, val, DATA_MEM(108));
+		val &= ~IGNORE_PLL_LOCK;
+		clk_osm_write_seq_reg(c, val, DATA_MEM(109));
+		clk_osm_write_seq_reg(c, MIN_VCO_VAL, DATA_MEM(110));
+	} else {
+		val |= IGNORE_PLL_LOCK;
+		scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(108), val);
+		val &= ~IGNORE_PLL_LOCK;
+		scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(109), val);
+	}
+
+	/* Program LVAL corresponding to first turbo VC */
+	for (i = 0; i < c->num_entries; i++) {
+		if (c->osm_table[i].mem_acc_level == MAX_MEM_ACC_LEVELS) {
+			lval = c->osm_table[i].freq_data & GENMASK(7, 0);
+			break;
+		}
+	}
+
+	if (c->secure_init)
+		clk_osm_write_seq_reg(c, lval, DATA_MEM(114));
+	else
+		scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(114), lval);
+
+}
+
+static int clk_osm_setup_hw_table(struct clk_osm *c)
+{
+	struct osm_entry *entry = c->osm_table;
+	int i;
+	u32 freq_val = 0, volt_val = 0, override_val = 0;
+	u32 table_entry_offset, last_mem_acc_level, last_virtual_corner = 0;
+
+	for (i = 0; i < OSM_TABLE_SIZE; i++) {
+		if (i < c->num_entries) {
+			freq_val = entry[i].freq_data;
+			volt_val = BVAL(27, 24, entry[i].mem_acc_level)
+				| BVAL(21, 16, entry[i].virtual_corner)
+				| BVAL(11, 0, entry[i].open_loop_volt);
+			override_val = entry[i].override_data;
+
+			if (last_virtual_corner && last_virtual_corner ==
+			    entry[i].virtual_corner && last_mem_acc_level !=
+			    entry[i].mem_acc_level) {
+				pr_err("invalid LUT entry at row=%d virtual_corner=%d, mem_acc_level=%d\n",
+				       i, entry[i].virtual_corner,
+				       entry[i].mem_acc_level);
+				return -EINVAL;
+			}
+			last_virtual_corner = entry[i].virtual_corner;
+			last_mem_acc_level = entry[i].mem_acc_level;
+		}
+
+		table_entry_offset = i * OSM_REG_SIZE;
+		clk_osm_write_reg(c, freq_val, FREQ_REG + table_entry_offset);
+		clk_osm_write_reg(c, volt_val, VOLT_REG + table_entry_offset);
+		clk_osm_write_reg(c, override_val, OVERRIDE_REG +
+				  table_entry_offset);
+	}
+
+	/* Make sure all writes go through */
+	clk_osm_mb(c, OSM_BASE);
+
+	return 0;
+}
+
+static void clk_osm_print_osm_table(struct clk_osm *c)
+{
+	int i;
+	struct osm_entry *table = c->osm_table;
+	u32 pll_src, pll_div, lval, core_count;
+
+	pr_debug("Index, Frequency, VC, OLV (mv), Core Count, PLL Src, PLL Div, L-Val, ACC Level\n");
+	for (i = 0; i < c->num_entries; i++) {
+		pll_src = (table[i].freq_data & GENMASK(31, 30)) >> 30;
+		pll_div = (table[i].freq_data & GENMASK(29, 28)) >> 28;
+		lval = table[i].freq_data & GENMASK(7, 0);
+		core_count = (table[i].freq_data & GENMASK(18, 16)) >> 16;
+
+		pr_debug("%3d, %11lu, %2u, %5u, %2u, %6u, %8u, %7u, %5u\n",
+			i,
+			table[i].frequency,
+			table[i].virtual_corner,
+			table[i].open_loop_volt,
+			core_count,
+			pll_src,
+			pll_div,
+			lval,
+			table[i].mem_acc_level);
+	}
+	pr_debug("APM threshold corner=%d, crossover corner=%d\n",
+			c->apm_threshold_vc, c->apm_crossover_vc);
+	pr_debug("MEM-ACC threshold corner=%d, crossover corner=%d\n",
+			c->mem_acc_threshold_vc, c->mem_acc_crossover_vc);
+}
+
+static u32 find_voltage(struct clk_osm *c, unsigned long rate)
+{
+	struct osm_entry *table = c->osm_table;
+	int entries = c->num_entries, i;
+
+	for (i = 0; i < entries; i++) {
+		if (rate == table[i].frequency) {
+			/* OPP table voltages have units of mV */
+			return table[i].open_loop_volt * 1000;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int add_opp(struct clk_osm *c, struct device *dev)
+{
+	unsigned long rate = 0;
+	u32 uv;
+	long rc;
+	int j = 0;
+	unsigned long min_rate = c->hw.init->rate_max[0];
+	unsigned long max_rate =
+			c->hw.init->rate_max[c->hw.init->num_rate_max - 1];
+
+	while (1) {
+		rate = c->hw.init->rate_max[j++];
+		uv = find_voltage(c, rate);
+		if (uv <= 0) {
+			pr_warn("No voltage for %lu.\n", rate);
+			return -EINVAL;
+		}
+
+		rc = dev_pm_opp_add(dev, rate, uv);
+		if (rc) {
+			pr_warn("failed to add OPP for %lu\n", rate);
+			return rc;
+		}
+
+		/*
+		 * Print the OPP pair for the lowest and highest frequency for
+		 * each device that we're populating. This is important since
+		 * this information will be used by thermal mitigation and the
+		 * scheduler.
+		 */
+		if (rate == min_rate)
+			pr_info("Set OPP pair (%lu Hz, %d uv) on %s\n",
+				rate, uv, dev_name(dev));
+
+		if (rate == max_rate && max_rate != min_rate) {
+			pr_info("Set OPP pair (%lu Hz, %d uv) on %s\n",
+				rate, uv, dev_name(dev));
+			break;
+		}
+
+		if (min_rate == max_rate)
+			break;
+	}
+	return 0;
+}
+
+static void populate_opp_table(struct platform_device *pdev)
+{
+	int cpu;
+	struct device *cpu_dev;
+	struct clk_osm *c, *parent;
+	struct clk_hw *hw_parent;
+
+	for_each_possible_cpu(cpu) {
+		c = logical_cpu_to_clk(cpu);
+		if (!c) {
+			pr_err("no clock device for CPU=%d\n", cpu);
+			return;
+		}
+
+		hw_parent = clk_hw_get_parent(&c->hw);
+		parent = to_clk_osm(hw_parent);
+		cpu_dev = get_cpu_device(cpu);
+		if (cpu_dev)
+			if (add_opp(parent, cpu_dev))
+				pr_err("Failed to add OPP levels for %s\n",
+					dev_name(cpu_dev));
+	}
+
+	/*TODO: Figure out which device to tag the L3 table to */
+}
+
+static u64 clk_osm_get_cpu_cycle_counter(int cpu)
+{
+	u32 val;
+	unsigned long flags;
+	struct clk_osm *parent, *c = logical_cpu_to_clk(cpu);
+
+	if (IS_ERR_OR_NULL(c)) {
+		pr_err("no clock device for CPU=%d\n", cpu);
+		return 0;
+	}
+
+	parent = to_clk_osm(clk_hw_get_parent(&c->hw));
+
+	spin_lock_irqsave(&parent->lock, flags);
+	val = clk_osm_read_reg_no_log(parent,
+			OSM_CYCLE_COUNTER_STATUS_REG(c->core_num));
+
+	if (val < c->prev_cycle_counter) {
+		/* Handle counter overflow */
+		c->total_cycle_counter += UINT_MAX -
+			c->prev_cycle_counter + val;
+		c->prev_cycle_counter = val;
+	} else {
+		c->total_cycle_counter += val - c->prev_cycle_counter;
+		c->prev_cycle_counter = val;
+	}
+	spin_unlock_irqrestore(&parent->lock, flags);
+
+	return c->total_cycle_counter;
+}
+
+static void clk_osm_setup_cycle_counters(struct clk_osm *c)
+{
+	u32 ratio = c->osm_clk_rate;
+	u32 val = 0;
+
+	/* Enable cycle counter */
+	val = BIT(0);
+	/* Setup OSM clock to XO ratio */
+	do_div(ratio, c->xo_clk_rate);
+	val |= BVAL(5, 1, ratio - 1) | OSM_CYCLE_COUNTER_USE_XO_EDGE_EN;
+
+	clk_osm_write_reg(c, val, OSM_CYCLE_COUNTER_CTRL_REG);
+	pr_debug("OSM to XO clock ratio: %d\n", ratio);
+}
+
+static int clk_osm_resolve_crossover_corners(struct clk_osm *c,
+					struct platform_device *pdev)
+{
+	struct regulator *regulator = c->vdd_reg;
+	int count, vc, i, memacc_threshold, apm_threshold;
+	int rc = 0;
+	u32 corner_volt;
+
+	if (c == &l3_clk || c == &pwrcl_clk)
+		return rc;
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+				  "qcom,perfcl-apcs-apm-threshold-voltage",
+				  &apm_threshold);
+	if (rc) {
+		pr_err("qcom,perfcl-apcs-apm-threshold-voltage property not specified\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+				  "qcom,perfcl-apcs-mem-acc-threshold-voltage",
+				  &memacc_threshold);
+	if (rc) {
+		pr_err("qcom,perfcl-apcs-mem-acc-threshold-voltage property not specified\n");
+		return rc;
+	}
+
+	/*
+	 * Initialize VC settings in case none of them go above the voltage
+	 * limits
+	 */
+	c->apm_threshold_vc = c->apm_crossover_vc = c->mem_acc_crossover_vc =
+				c->mem_acc_threshold_vc = MAX_VC;
+
+	count = regulator_count_voltages(regulator);
+	if (count < 0) {
+		pr_err("Failed to get the number of virtual corners supported\n");
+		return count;
+	}
+
+	c->apm_crossover_vc = count - 2;
+	c->mem_acc_crossover_vc = count - 1;
+
+	for (i = 0; i < OSM_TABLE_SIZE; i++) {
+		vc = c->osm_table[i].virtual_corner + 1;
+		corner_volt = regulator_list_corner_voltage(regulator, vc);
+
+		if (c->apm_threshold_vc == MAX_VC &&
+				corner_volt >= apm_threshold)
+			c->apm_threshold_vc = c->osm_table[i].virtual_corner;
+
+		if (c->mem_acc_threshold_vc == MAX_VC &&
+				corner_volt >= memacc_threshold)
+			c->mem_acc_threshold_vc =
+				c->osm_table[i].virtual_corner;
+	}
+
+	return rc;
+}
+
+static int clk_osm_resolve_open_loop_voltages(struct clk_osm *c)
+{
+	struct regulator *regulator = c->vdd_reg;
+	u32 vc, mv;
+	int i;
+
+	for (i = 0; i < OSM_TABLE_SIZE; i++) {
+		vc = c->osm_table[i].virtual_corner + 1;
+		/* Voltage is in uv. Convert to mv */
+		mv = regulator_list_corner_voltage(regulator, vc) / 1000;
+		c->osm_table[i].open_loop_volt = mv;
+	}
+
+	return 0;
+}
+
+static int clk_osm_get_lut(struct platform_device *pdev,
+			   struct clk_osm *c, char *prop_name)
+{
+	struct device_node *of = pdev->dev.of_node;
+	int prop_len, total_elems, num_rows, i, j, k;
+	int rc = 0;
+	u32 *array;
+	u32 *fmax_temp;
+	u32 data;
+	unsigned long abs_fmax = 0;
+	bool last_entry = false;
+
+	if (!of_find_property(of, prop_name, &prop_len)) {
+		dev_err(&pdev->dev, "missing %s\n", prop_name);
+		return -EINVAL;
+	}
+
+	total_elems = prop_len / sizeof(u32);
+	if (total_elems % NUM_FIELDS) {
+		dev_err(&pdev->dev, "bad length %d\n", prop_len);
+		return -EINVAL;
+	}
+
+	num_rows = total_elems / NUM_FIELDS;
+
+	fmax_temp = devm_kzalloc(&pdev->dev, num_rows * sizeof(unsigned long),
+					GFP_KERNEL);
+	if (!fmax_temp)
+		return -ENOMEM;
+
+	array = devm_kzalloc(&pdev->dev, prop_len, GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of, prop_name, array, total_elems);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to parse OSM table, rc=%d\n", rc);
+		goto exit;
+	}
+
+	pr_debug("%s: Entries in Table: %d\n", __func__, num_rows);
+	c->num_entries = num_rows;
+	if (c->num_entries > OSM_TABLE_SIZE) {
+		pr_err("LUT entries %d exceed maximum size %d\n",
+		       c->num_entries, OSM_TABLE_SIZE);
+		return -EINVAL;
+	}
+
+	for (i = 0, j = 0, k = 0; j < OSM_TABLE_SIZE; j++) {
+		c->osm_table[j].frequency = array[i + FREQ];
+		c->osm_table[j].freq_data = array[i + FREQ_DATA];
+		c->osm_table[j].override_data = array[i + PLL_OVERRIDES];
+		c->osm_table[j].mem_acc_level = array[i + MEM_ACC_LEVEL];
+		/* Voltage corners are 0 based in the OSM LUT */
+		c->osm_table[j].virtual_corner = array[i + VIRTUAL_CORNER] - 1;
+		pr_debug("index=%d freq=%ld virtual_corner=%d freq_data=0x%x override_data=0x%x mem_acc_level=0x%x\n",
+			 j, c->osm_table[j].frequency,
+			 c->osm_table[j].virtual_corner,
+			 c->osm_table[j].freq_data,
+			 c->osm_table[j].override_data,
+			 c->osm_table[j].mem_acc_level);
+
+		data = (array[i + FREQ_DATA] & GENMASK(18, 16)) >> 16;
+		if (!last_entry && data == MAX_CORE_COUNT) {
+			fmax_temp[k] = array[i];
+			k++;
+		}
+
+		if (i < total_elems - NUM_FIELDS)
+			i += NUM_FIELDS;
+		else {
+			abs_fmax = array[i];
+			last_entry = true;
+		}
+	}
+	fmax_temp[k] = abs_fmax;
+
+	osm_clks_init[c->cluster_num].rate_max = devm_kzalloc(&pdev->dev,
+						 k * sizeof(unsigned long),
+						       GFP_KERNEL);
+	if (!osm_clks_init[c->cluster_num].rate_max) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	for (i = 0; i < k; i++)
+		osm_clks_init[c->cluster_num].rate_max[i] = fmax_temp[i];
+
+	osm_clks_init[c->cluster_num].num_rate_max = k;
+exit:
+	devm_kfree(&pdev->dev, fmax_temp);
+	devm_kfree(&pdev->dev, array);
+	return rc;
+}
+
+static int clk_osm_parse_dt_configs(struct platform_device *pdev)
+{
+	struct device_node *of = pdev->dev.of_node;
+	u32 *array;
+	int rc = 0;
+	struct resource *res;
+	char l3_min_cpr_vc_str[] = "qcom,l3-min-cpr-vc-bin0";
+	char pwrcl_min_cpr_vc_str[] = "qcom,pwrcl-min-cpr-vc-bin0";
+	char perfcl_min_cpr_vc_str[] = "qcom,perfcl-min-cpr-vc-bin0";
+
+	array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+			     GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of, "qcom,l-val-base",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,l-val-base property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.l_val_base = array[l3_clk.cluster_num];
+	pwrcl_clk.l_val_base = array[pwrcl_clk.cluster_num];
+	perfcl_clk.l_val_base = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apcs-pll-user-ctl",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apcs-pll-user-ctl property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.apcs_pll_user_ctl = array[l3_clk.cluster_num];
+	pwrcl_clk.apcs_pll_user_ctl = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apcs_pll_user_ctl = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apcs-pll-min-freq",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apcs-pll-min-freq property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.apcs_pll_min_freq = array[l3_clk.cluster_num];
+	pwrcl_clk.apcs_pll_min_freq = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apcs_pll_min_freq = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apm-mode-ctl",
+				  array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apm-mode-ctl property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.apm_mode_ctl = array[l3_clk.cluster_num];
+	pwrcl_clk.apm_mode_ctl = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apm_mode_ctl = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apm-status-ctrl",
+				  array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apm-status-ctrl property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.apm_status_ctl = array[l3_clk.cluster_num];
+	pwrcl_clk.apm_status_ctl = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apm_status_ctl = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,cfg-gfmux-addr",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,cfg-gfmux-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.cfg_gfmux_addr = array[l3_clk.cluster_num];
+	pwrcl_clk.cfg_gfmux_addr = array[pwrcl_clk.cluster_num];
+	perfcl_clk.cfg_gfmux_addr = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apcs-cbc-addr",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apcs-cbc-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.apcs_cbc_addr = array[l3_clk.cluster_num];
+	pwrcl_clk.apcs_cbc_addr = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apcs_cbc_addr = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apcs-ramp-ctl-addr",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apcs-ramp-ctl-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.ramp_ctl_addr = array[l3_clk.cluster_num];
+	pwrcl_clk.ramp_ctl_addr = array[pwrcl_clk.cluster_num];
+	perfcl_clk.ramp_ctl_addr = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32(of, "qcom,xo-clk-rate",
+				  &pwrcl_clk.xo_clk_rate);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,xo-clk-rate property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.xo_clk_rate = perfcl_clk.xo_clk_rate = pwrcl_clk.xo_clk_rate;
+
+	rc = of_property_read_u32(of, "qcom,osm-clk-rate",
+				  &pwrcl_clk.osm_clk_rate);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,osm-clk-rate property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+	l3_clk.osm_clk_rate = perfcl_clk.osm_clk_rate = pwrcl_clk.osm_clk_rate;
+
+	rc = of_property_read_u32(of, "qcom,cc-reads",
+				  &pwrcl_clk.cycle_counter_reads);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,cc-reads property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+	l3_clk.cycle_counter_reads = perfcl_clk.cycle_counter_reads =
+			pwrcl_clk.cycle_counter_reads;
+
+	rc = of_property_read_u32(of, "qcom,cc-delay",
+				  &pwrcl_clk.cycle_counter_delay);
+	if (rc)
+		dev_dbg(&pdev->dev, "no delays between cycle counter reads\n");
+	else
+		l3_clk.cycle_counter_delay = perfcl_clk.cycle_counter_delay =
+			pwrcl_clk.cycle_counter_delay;
+
+	rc = of_property_read_u32(of, "qcom,cc-factor",
+				  &pwrcl_clk.cycle_counter_factor);
+	if (rc)
+		dev_dbg(&pdev->dev, "no factor specified for cycle counter estimation\n");
+	else
+		l3_clk.cycle_counter_factor = perfcl_clk.cycle_counter_factor =
+			pwrcl_clk.cycle_counter_factor;
+
+	l3_clk.red_fsm_en = perfcl_clk.red_fsm_en = pwrcl_clk.red_fsm_en =
+		of_property_read_bool(of, "qcom,red-fsm-en");
+
+	l3_clk.boost_fsm_en = perfcl_clk.boost_fsm_en =
+		pwrcl_clk.boost_fsm_en =
+		of_property_read_bool(of, "qcom,boost-fsm-en");
+
+	l3_clk.safe_fsm_en = perfcl_clk.safe_fsm_en = pwrcl_clk.safe_fsm_en =
+		of_property_read_bool(of, "qcom,safe-fsm-en");
+
+	l3_clk.ps_fsm_en = perfcl_clk.ps_fsm_en = pwrcl_clk.ps_fsm_en =
+		of_property_read_bool(of, "qcom,ps-fsm-en");
+
+	l3_clk.droop_fsm_en = perfcl_clk.droop_fsm_en =
+		pwrcl_clk.droop_fsm_en =
+		of_property_read_bool(of, "qcom,droop-fsm-en");
+
+	devm_kfree(&pdev->dev, array);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"l3_sequencer");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for l3_sequencer\n");
+		return -ENOMEM;
+	}
+
+	l3_clk.pbases[SEQ_BASE] = (unsigned long)res->start;
+	l3_clk.vbases[SEQ_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+
+	if (!l3_clk.vbases[SEQ_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in l3_sequencer base\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"pwrcl_sequencer");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for pwrcl_sequencer\n");
+		return -ENOMEM;
+	}
+
+	pwrcl_clk.pbases[SEQ_BASE] = (unsigned long)res->start;
+	pwrcl_clk.vbases[SEQ_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+
+	if (!pwrcl_clk.vbases[SEQ_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in pwrcl_sequencer base\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"perfcl_sequencer");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for perfcl_sequencer\n");
+		return -ENOMEM;
+	}
+
+	perfcl_clk.pbases[SEQ_BASE] = (unsigned long)res->start;
+	perfcl_clk.vbases[SEQ_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+
+	if (!perfcl_clk.vbases[SEQ_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in perfcl_sequencer base\n");
+		return -ENOMEM;
+	}
+
+	snprintf(l3_min_cpr_vc_str, ARRAY_SIZE(l3_min_cpr_vc_str),
+			"qcom,l3-min-cpr-vc-bin%d", l3_clk.speedbin);
+	rc = of_property_read_u32(of, l3_min_cpr_vc_str, &l3_clk.min_cpr_vc);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find %s property, rc=%d\n",
+			l3_min_cpr_vc_str, rc);
+		return -EINVAL;
+	}
+
+	snprintf(pwrcl_min_cpr_vc_str, ARRAY_SIZE(pwrcl_min_cpr_vc_str),
+			"qcom,pwrcl-min-cpr-vc-bin%d", pwrcl_clk.speedbin);
+	rc = of_property_read_u32(of, pwrcl_min_cpr_vc_str,
+						&pwrcl_clk.min_cpr_vc);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find %s property, rc=%d\n",
+			pwrcl_min_cpr_vc_str, rc);
+		return -EINVAL;
+	}
+
+	snprintf(perfcl_min_cpr_vc_str, ARRAY_SIZE(perfcl_min_cpr_vc_str),
+			"qcom,perfcl-min-cpr-vc-bin%d", perfcl_clk.speedbin);
+	rc = of_property_read_u32(of, perfcl_min_cpr_vc_str,
+						&perfcl_clk.min_cpr_vc);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find %s property, rc=%d\n",
+			perfcl_min_cpr_vc_str, rc);
+		return -EINVAL;
+	}
+
+	l3_clk.secure_init = perfcl_clk.secure_init = pwrcl_clk.secure_init =
+		of_property_read_bool(pdev->dev.of_node, "qcom,osm-no-tz");
+
+	if (!pwrcl_clk.secure_init)
+		return rc;
+
+	rc = of_property_read_u32_array(of, "qcom,l3-mem-acc-addr",
+					l3_clk.mem_acc_addr, MEM_ACC_ADDRS);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,l3-mem-acc-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of, "qcom,pwrcl-mem-acc-addr",
+					pwrcl_clk.mem_acc_addr, MEM_ACC_ADDRS);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,pwrcl-mem-acc-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of, "qcom,perfcl-mem-acc-addr",
+					perfcl_clk.mem_acc_addr, MEM_ACC_ADDRS);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,perfcl-mem-acc-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int clk_osm_resources_init(struct platform_device *pdev)
+{
+	struct device_node *node;
+	struct resource *res;
+	unsigned long pbase;
+	int rc = 0;
+	void *vbase;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"osm_l3_base");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for osm_l3_base");
+		return -ENOMEM;
+	}
+
+	l3_clk.pbases[OSM_BASE] = (unsigned long)res->start;
+	l3_clk.vbases[OSM_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+
+	if (!l3_clk.vbases[OSM_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in osm_l3_base base\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"osm_pwrcl_base");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for osm_pwrcl_base");
+		return -ENOMEM;
+	}
+
+	pwrcl_clk.pbases[OSM_BASE] = (unsigned long)res->start;
+	pwrcl_clk.vbases[OSM_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+	if (!pwrcl_clk.vbases[OSM_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in osm_pwrcl_base base\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"osm_perfcl_base");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for osm_perfcl_base");
+		return -ENOMEM;
+	}
+
+	perfcl_clk.pbases[OSM_BASE] = (unsigned long)res->start;
+	perfcl_clk.vbases[OSM_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+
+	if (!perfcl_clk.vbases[OSM_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in osm_perfcl_base base\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l3_pll");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for l3_pll\n");
+		return -ENOMEM;
+	}
+	pbase = (unsigned long)res->start;
+	vbase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+
+	if (!vbase) {
+		dev_err(&pdev->dev, "Unable to map l3_pll base\n");
+		return -ENOMEM;
+	}
+
+	l3_clk.pbases[PLL_BASE] = pbase;
+	l3_clk.vbases[PLL_BASE] = vbase;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwrcl_pll");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for pwrcl_pll\n");
+		return -ENOMEM;
+	}
+	pbase = (unsigned long)res->start;
+	vbase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+
+	if (!vbase) {
+		dev_err(&pdev->dev, "Unable to map pwrcl_pll base\n");
+		return -ENOMEM;
+	}
+
+	pwrcl_clk.pbases[PLL_BASE] = pbase;
+	pwrcl_clk.vbases[PLL_BASE] = vbase;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "perfcl_pll");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for perfcl_pll\n");
+		return -ENOMEM;
+	}
+	pbase = (unsigned long)res->start;
+	vbase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+
+	if (!vbase) {
+		dev_err(&pdev->dev, "Unable to map perfcl_pll base\n");
+		return -ENOMEM;
+	}
+
+	perfcl_clk.pbases[PLL_BASE] = pbase;
+	perfcl_clk.vbases[PLL_BASE] = vbase;
+
+	/* efuse speed bin fuses are optional */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "pwrcl_efuse");
+	if (res) {
+		pbase = (unsigned long)res->start;
+		vbase = devm_ioremap(&pdev->dev, res->start,
+				     resource_size(res));
+		if (!vbase) {
+			dev_err(&pdev->dev, "Unable to map in pwrcl_efuse base\n");
+			return -ENOMEM;
+		}
+		pwrcl_clk.pbases[EFUSE_BASE] = pbase;
+		pwrcl_clk.vbases[EFUSE_BASE] = vbase;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "perfcl_efuse");
+	if (res) {
+		pbase = (unsigned long)res->start;
+		vbase = devm_ioremap(&pdev->dev, res->start,
+				     resource_size(res));
+		if (!vbase) {
+			dev_err(&pdev->dev, "Unable to map in perfcl_efuse base\n");
+			return -ENOMEM;
+		}
+		perfcl_clk.pbases[EFUSE_BASE] = pbase;
+		perfcl_clk.vbases[EFUSE_BASE] = vbase;
+	}
+
+	vdd_l3 = devm_regulator_get(&pdev->dev, "vdd-l3");
+	if (IS_ERR(vdd_l3)) {
+		rc = PTR_ERR(vdd_l3);
+		if (rc != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get the l3 vreg, rc=%d\n",
+				rc);
+		return rc;
+	}
+	l3_clk.vdd_reg = vdd_l3;
+
+	vdd_pwrcl = devm_regulator_get(&pdev->dev, "vdd-pwrcl");
+	if (IS_ERR(vdd_pwrcl)) {
+		rc = PTR_ERR(vdd_pwrcl);
+		if (rc != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get the pwrcl vreg, rc=%d\n",
+				rc);
+		return rc;
+	}
+	pwrcl_clk.vdd_reg = vdd_pwrcl;
+
+	vdd_perfcl = devm_regulator_get(&pdev->dev, "vdd-perfcl");
+	if (IS_ERR(vdd_perfcl)) {
+		rc = PTR_ERR(vdd_perfcl);
+		if (rc != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get the perfcl vreg, rc=%d\n",
+				rc);
+		return rc;
+	}
+	perfcl_clk.vdd_reg = vdd_perfcl;
+
+	node = of_parse_phandle(pdev->dev.of_node, "vdd-l3-supply", 0);
+	if (!node) {
+		pr_err("Unable to find vdd-l3-supply\n");
+		return -EINVAL;
+	}
+
+	l3_clk.vdd_dev = of_find_device_by_node(node->parent->parent);
+	if (!l3_clk.vdd_dev) {
+		pr_err("Unable to find device for vdd-l3-supply node\n");
+		return -EINVAL;
+	}
+
+	node = of_parse_phandle(pdev->dev.of_node, "vdd-pwrcl-supply", 0);
+	if (!node) {
+		pr_err("Unable to find vdd-pwrcl-supply\n");
+		return -EINVAL;
+	}
+
+	pwrcl_clk.vdd_dev = of_find_device_by_node(node->parent->parent);
+	if (!pwrcl_clk.vdd_dev) {
+		pr_err("Unable to find device for vdd-pwrcl-supply node\n");
+		return -EINVAL;
+	}
+
+	node = of_parse_phandle(pdev->dev.of_node, "vdd-perfcl-supply", 0);
+	if (!node) {
+		pr_err("Unable to find vdd-perfcl-supply\n");
+		return -EINVAL;
+	}
+
+	perfcl_clk.vdd_dev = of_find_device_by_node(node->parent->parent);
+	if (!perfcl_clk.vdd_dev) {
+		pr_err("Unable to find device for vdd-perfcl-supply\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static unsigned long init_rate = 300000000;
+
+static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
+{
+	int rc = 0, cpu, i;
+	int speedbin = 0, pvs_ver = 0;
+	u32 pte_efuse, val;
+	int num_clks = ARRAY_SIZE(osm_qcom_clk_hws);
+	struct clk *ext_xo_clk, *clk;
+	struct clk_osm *c;
+	struct device *dev = &pdev->dev;
+	struct clk_onecell_data *clk_data;
+	struct resource *res;
+	void *vbase;
+	char l3speedbinstr[] = "qcom,l3-speedbin0-v0";
+	char perfclspeedbinstr[] = "qcom,perfcl-speedbin0-v0";
+	char pwrclspeedbinstr[] = "qcom,pwrcl-speedbin0-v0";
+	struct cpu_cycle_counter_cb cb = {
+		.get_cpu_cycle_counter = clk_osm_get_cpu_cycle_counter,
+	};
+
+	/*
+	 * Require the RPM-XO clock to be registered before OSM.
+	 * The cpuss_gpll0_clk_src is listed to be configured by BL.
+	 */
+	ext_xo_clk = devm_clk_get(dev, "xo_ao");
+	if (IS_ERR(ext_xo_clk)) {
+		if (PTR_ERR(ext_xo_clk) != -EPROBE_DEFER)
+			dev_err(dev, "Unable to get xo clock\n");
+		return PTR_ERR(ext_xo_clk);
+	}
+
+	clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
+								GFP_KERNEL);
+	if (!clk_data)
+		goto exit;
+
+	clk_data->clks = devm_kzalloc(&pdev->dev, (num_clks *
+					sizeof(struct clk *)), GFP_KERNEL);
+	if (!clk_data->clks)
+		goto clk_err;
+
+	clk_data->clk_num = num_clks;
+
+	rc = clk_osm_parse_dt_configs(pdev);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to parse device tree configurations\n");
+		return rc;
+	}
+
+	rc = clk_osm_resources_init(pdev);
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "resources init failed, rc=%d\n",
+									rc);
+		return rc;
+	}
+
+	if (l3_clk.vbases[EFUSE_BASE]) {
+		/* Multiple speed-bins are supported */
+		pte_efuse = readl_relaxed(l3_clk.vbases[EFUSE_BASE]);
+		l3_clk.speedbin = ((pte_efuse >> L3_EFUSE_SHIFT) &
+						    L3_EFUSE_MASK);
+		snprintf(l3speedbinstr, ARRAY_SIZE(l3speedbinstr),
+			 "qcom,l3-speedbin%d-v%d", speedbin, pvs_ver);
+	}
+
+	dev_info(&pdev->dev, "using L3 speed bin %u and pvs_ver %d\n",
+		 speedbin, pvs_ver);
+
+	rc = clk_osm_get_lut(pdev, &l3_clk, l3speedbinstr);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to get OSM LUT for L3, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (pwrcl_clk.vbases[EFUSE_BASE]) {
+		/* Multiple speed-bins are supported */
+		pte_efuse = readl_relaxed(pwrcl_clk.vbases[EFUSE_BASE]);
+		pwrcl_clk.speedbin = ((pte_efuse >> PWRCL_EFUSE_SHIFT) &
+						    PWRCL_EFUSE_MASK);
+		snprintf(pwrclspeedbinstr, ARRAY_SIZE(pwrclspeedbinstr),
+			 "qcom,pwrcl-speedbin%d-v%d", speedbin, pvs_ver);
+	}
+
+	dev_info(&pdev->dev, "using pwrcl speed bin %u and pvs_ver %d\n",
+		 speedbin, pvs_ver);
+
+	rc = clk_osm_get_lut(pdev, &pwrcl_clk, pwrclspeedbinstr);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to get OSM LUT for power cluster, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (perfcl_clk.vbases[EFUSE_BASE]) {
+		/* Multiple speed-bins are supported */
+		pte_efuse = readl_relaxed(perfcl_clk.vbases[EFUSE_BASE]);
+		perfcl_clk.speedbin = ((pte_efuse >> PERFCL_EFUSE_SHIFT) &
+							PERFCL_EFUSE_MASK);
+		snprintf(perfclspeedbinstr, ARRAY_SIZE(perfclspeedbinstr),
+			 "qcom,perfcl-speedbin%d-v%d", speedbin, pvs_ver);
+	}
+
+	dev_info(&pdev->dev, "using perfcl speed bin %u and pvs_ver %d\n",
+		 speedbin, pvs_ver);
+
+	rc = clk_osm_get_lut(pdev, &perfcl_clk, perfclspeedbinstr);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to get OSM LUT for perf cluster, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = clk_osm_resolve_open_loop_voltages(&l3_clk);
+	if (rc) {
+		if (rc == -EPROBE_DEFER)
+			return rc;
+		dev_err(&pdev->dev, "Unable to determine open-loop voltages for L3, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = clk_osm_resolve_open_loop_voltages(&pwrcl_clk);
+	if (rc) {
+		if (rc == -EPROBE_DEFER)
+			return rc;
+		dev_err(&pdev->dev, "Unable to determine open-loop voltages for power cluster, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = clk_osm_resolve_open_loop_voltages(&perfcl_clk);
+	if (rc) {
+		if (rc == -EPROBE_DEFER)
+			return rc;
+		dev_err(&pdev->dev, "Unable to determine open-loop voltages for perf cluster, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = clk_osm_resolve_crossover_corners(&l3_clk, pdev);
+	if (rc)
+		dev_info(&pdev->dev,
+			"No APM crossover corner programmed for L3\n");
+
+	rc = clk_osm_resolve_crossover_corners(&pwrcl_clk, pdev);
+	if (rc)
+		dev_info(&pdev->dev,
+			"No APM crossover corner programmed for pwrcl_clk\n");
+
+	rc = clk_osm_resolve_crossover_corners(&perfcl_clk, pdev);
+	if (rc)
+		dev_info(&pdev->dev, "No MEM-ACC crossover corner programmed\n");
+
+	clk_osm_setup_cycle_counters(&l3_clk);
+	clk_osm_setup_cycle_counters(&pwrcl_clk);
+	clk_osm_setup_cycle_counters(&perfcl_clk);
+
+	clk_osm_print_osm_table(&l3_clk);
+	clk_osm_print_osm_table(&pwrcl_clk);
+	clk_osm_print_osm_table(&perfcl_clk);
+
+	rc = clk_osm_setup_hw_table(&l3_clk);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to setup l3 hardware table\n");
+		goto exit;
+	}
+	rc = clk_osm_setup_hw_table(&pwrcl_clk);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to setup power cluster hardware table\n");
+		goto exit;
+	}
+	rc = clk_osm_setup_hw_table(&perfcl_clk);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to setup perf cluster hardware table\n");
+		goto exit;
+	}
+
+	/* Policy tuning */
+	rc = clk_osm_set_cc_policy(pdev);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "cc policy setup failed");
+		goto exit;
+	}
+
+	/* LLM Freq Policy Tuning */
+	rc = clk_osm_set_llm_freq_policy(pdev);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "LLM Frequency Policy setup failed");
+		goto exit;
+	}
+
+	/* LLM Voltage Policy Tuning */
+	rc = clk_osm_set_llm_volt_policy(pdev);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "Failed to set LLM voltage Policy");
+		goto exit;
+	}
+
+	clk_osm_setup_fsms(&l3_clk);
+	clk_osm_setup_fsms(&pwrcl_clk);
+	clk_osm_setup_fsms(&perfcl_clk);
+
+	/* Program VC at which the array power supply needs to be switched */
+	clk_osm_write_reg(&perfcl_clk, perfcl_clk.apm_threshold_vc,
+					APM_CROSSOVER_VC);
+	if (perfcl_clk.secure_init) {
+		clk_osm_write_seq_reg(&perfcl_clk, perfcl_clk.apm_crossover_vc,
+				DATA_MEM(77));
+		clk_osm_write_seq_reg(&perfcl_clk,
+				(0x39 | (perfcl_clk.apm_threshold_vc << 6)),
+				DATA_MEM(111));
+	} else {
+		scm_io_write(perfcl_clk.pbases[SEQ_BASE] + DATA_MEM(77),
+				perfcl_clk.apm_crossover_vc);
+		scm_io_write(perfcl_clk.pbases[SEQ_BASE] + DATA_MEM(111),
+				(0x39 | (perfcl_clk.apm_threshold_vc << 6)));
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"apps_itm_ctl");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for apps_itm_ctl\n");
+		return -ENOMEM;
+	}
+
+	vbase = devm_ioremap(&pdev->dev, res->start,
+						resource_size(res));
+	if (!vbase) {
+		dev_err(&pdev->dev,
+				"Unable to map in apps_itm_ctl base\n");
+		return -ENOMEM;
+	}
+
+	val = readl_relaxed(vbase + 0x0);
+	val &= ~BIT(0);
+	writel_relaxed(val, vbase + 0x0);
+
+	val = readl_relaxed(vbase + 0x4);
+	val &= ~BIT(0);
+	writel_relaxed(val, vbase + 0x4);
+
+	/*
+	 * Perform typical secure-world HW initialization
+	 * as necessary.
+	 */
+	clk_osm_do_additional_setup(&l3_clk, pdev);
+	clk_osm_do_additional_setup(&pwrcl_clk, pdev);
+	clk_osm_do_additional_setup(&perfcl_clk, pdev);
+
+	/* MEM-ACC Programming */
+	clk_osm_program_mem_acc_regs(&l3_clk);
+	clk_osm_program_mem_acc_regs(&pwrcl_clk);
+	clk_osm_program_mem_acc_regs(&perfcl_clk);
+
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,osm-pll-setup")) {
+		clk_osm_setup_cluster_pll(&l3_clk);
+		clk_osm_setup_cluster_pll(&pwrcl_clk);
+		clk_osm_setup_cluster_pll(&perfcl_clk);
+	}
+
+	/* Misc programming */
+	clk_osm_misc_programming(&l3_clk);
+	clk_osm_misc_programming(&pwrcl_clk);
+	clk_osm_misc_programming(&perfcl_clk);
+
+	if (of_property_read_bool(pdev->dev.of_node,
+				"qcom,enable-per-core-dcvs")) {
+		val = clk_osm_read_reg(&pwrcl_clk, CORE_DCVS_CTRL);
+		val |= BIT(0);
+		clk_osm_write_reg(&pwrcl_clk, val, CORE_DCVS_CTRL);
+
+		val = clk_osm_read_reg(&perfcl_clk, CORE_DCVS_CTRL);
+		val |= BIT(0);
+		clk_osm_write_reg(&perfcl_clk, val, CORE_DCVS_CTRL);
+	}
+
+	clk_ops_core = clk_dummy_ops;
+	clk_ops_core.set_rate = cpu_clk_set_rate;
+	clk_ops_core.recalc_rate = cpu_clk_recalc_rate;
+
+	spin_lock_init(&l3_clk.lock);
+	spin_lock_init(&pwrcl_clk.lock);
+	spin_lock_init(&perfcl_clk.lock);
+
+	/* Register OSM l3, pwr and perf clocks with Clock Framework */
+	for (i = 0; i < num_clks; i++) {
+		clk = devm_clk_register(&pdev->dev, osm_qcom_clk_hws[i]);
+		if (IS_ERR(clk)) {
+			dev_err(&pdev->dev, "Unable to register CPU clock at index %d\n",
+				i);
+			return PTR_ERR(clk);
+		}
+		clk_data->clks[i] = clk;
+	}
+
+	rc = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get,
+								clk_data);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to register CPU clocks\n");
+			goto provider_err;
+	}
+
+	get_online_cpus();
+
+	/* Enable OSM */
+	for_each_online_cpu(cpu) {
+		c = logical_cpu_to_clk(cpu);
+		if (!c) {
+			pr_err("no clock device for CPU=%d\n", cpu);
+			return -EINVAL;
+		}
+
+		rc = clk_set_rate(c->hw.clk, init_rate);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to set init rate on CPU %d, rc=%d\n",
+			cpu, rc);
+			goto provider_err;
+		}
+		WARN(clk_prepare_enable(c->hw.clk),
+		     "Failed to enable clock for cpu %d\n", cpu);
+		udelay(300);
+	}
+
+	rc = clk_set_rate(l3_clk.hw.clk, init_rate);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to set init rate on L3 cluster, rc=%d\n",
+			rc);
+		goto provider_err;
+	}
+	WARN(clk_prepare_enable(l3_clk.hw.clk),
+		     "Failed to enable clock for L3\n");
+	udelay(300);
+
+	populate_opp_table(pdev);
+
+	of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	register_cpu_cycle_counter_cb(&cb);
+	pr_info("OSM driver inited\n");
+	put_online_cpus();
+
+	return 0;
+provider_err:
+	if (clk_data)
+		devm_kfree(&pdev->dev, clk_data->clks);
+clk_err:
+	devm_kfree(&pdev->dev, clk_data);
+exit:
+	dev_err(&pdev->dev, "OSM driver failed to initialize, rc=%d\n", rc);
+	panic("Unable to Setup OSM");
+}
+
+static const struct of_device_id match_table[] = {
+	{ .compatible = "qcom,clk-cpu-osm" },
+	{}
+};
+
+static struct platform_driver clk_cpu_osm_driver = {
+	.probe = clk_cpu_osm_driver_probe,
+	.driver = {
+		.name = "clk-cpu-osm",
+		.of_match_table = match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init clk_cpu_osm_init(void)
+{
+	return platform_driver_register(&clk_cpu_osm_driver);
+}
+arch_initcall(clk_cpu_osm_init);
+
+static void __exit clk_cpu_osm_exit(void)
+{
+	platform_driver_unregister(&clk_cpu_osm_driver);
+}
+module_exit(clk_cpu_osm_exit);
+
+MODULE_DESCRIPTION("QTI CPU clock driver for OSM");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/clk-debug.c b/drivers/clk/qcom/clk-debug.c
new file mode 100644
index 0000000..53288f7
--- /dev/null
+++ b/drivers/clk/qcom/clk-debug.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+
+#include "clk-regmap.h"
+#include "clk-debug.h"
+#include "common.h"
+
+static struct clk_hw *measure;
+
+static DEFINE_SPINLOCK(clk_reg_lock);
+static DEFINE_MUTEX(clk_debug_lock);
+
+#define TCXO_DIV_4_HZ		4800000
+#define SAMPLE_TICKS_1_MS	0x1000
+#define SAMPLE_TICKS_14_MS	0x10000
+
+#define XO_DIV4_CNT_DONE	BIT(25)
+#define CNT_EN			BIT(20)
+#define MEASURE_CNT		BM(24, 0)
+
+/* Sample clock for 'ticks' reference clock ticks. */
+static u32 run_measurement(unsigned int ticks, struct regmap *regmap,
+		u32 ctl_reg, u32 status_reg)
+{
+	u32 regval;
+
+	/* Stop counters and set the XO4 counter start value. */
+	regmap_write(regmap, ctl_reg, ticks);
+
+	regmap_read(regmap, status_reg, &regval);
+
+	/* Wait for timer to become ready. */
+	while ((regval & XO_DIV4_CNT_DONE) != 0) {
+		cpu_relax();
+		regmap_read(regmap, status_reg, &regval);
+	}
+
+	/* Run measurement and wait for completion. */
+	regmap_write(regmap, ctl_reg, (CNT_EN|ticks));
+
+	regmap_read(regmap, status_reg, &regval);
+
+	while ((regval & XO_DIV4_CNT_DONE) == 0) {
+		cpu_relax();
+		regmap_read(regmap, status_reg, &regval);
+	}
+
+	/* Return measured ticks. */
+	regmap_read(regmap, status_reg, &regval);
+	regval &= MEASURE_CNT;
+
+	/* Stop the counters */
+	regmap_write(regmap, ctl_reg, ticks);
+
+	return regval;
+}
+
+/*
+ * Perform a hardware rate measurement for a given clock.
+ * FOR DEBUG USE ONLY: Measurements take ~15 ms!
+ */
+static unsigned long clk_debug_mux_measure_rate(struct clk_hw *hw)
+{
+	unsigned long flags, ret = 0;
+	u32 gcc_xo4_reg, multiplier = 1;
+	u64 raw_count_short, raw_count_full;
+	struct clk_debug_mux *meas = to_clk_measure(hw);
+	struct measure_clk_data *data = meas->priv;
+
+	clk_prepare_enable(data->cxo);
+
+	spin_lock_irqsave(&clk_reg_lock, flags);
+
+	/* Enable CXO/4 and RINGOSC branch. */
+	regmap_read(meas->regmap[GCC], data->xo_div4_cbcr, &gcc_xo4_reg);
+	gcc_xo4_reg |= BIT(0);
+	regmap_write(meas->regmap[GCC], data->xo_div4_cbcr, gcc_xo4_reg);
+
+	/*
+	 * The ring oscillator counter will not reset if the measured clock
+	 * is not running.  To detect this, run a short measurement before
+	 * the full measurement.  If the raw results of the two are the same
+	 * then the clock must be off.
+	 */
+
+	/* Run a short measurement. (~1 ms) */
+	raw_count_short = run_measurement(SAMPLE_TICKS_1_MS, meas->regmap[GCC],
+				data->ctl_reg, data->status_reg);
+
+	/* Run a full measurement. (~14 ms) */
+	raw_count_full = run_measurement(SAMPLE_TICKS_14_MS, meas->regmap[GCC],
+				data->ctl_reg, data->status_reg);
+
+	gcc_xo4_reg &= ~BIT(0);
+	regmap_write(meas->regmap[GCC], data->xo_div4_cbcr, gcc_xo4_reg);
+
+	/* Return 0 if the clock is off. */
+	if (raw_count_full == raw_count_short)
+		ret = 0;
+	else {
+		/* Compute rate in Hz. */
+		raw_count_full = ((raw_count_full * 10) + 15) * TCXO_DIV_4_HZ;
+		do_div(raw_count_full, ((SAMPLE_TICKS_14_MS * 10) + 35));
+		ret = (raw_count_full * multiplier);
+	}
+
+	spin_unlock_irqrestore(&clk_reg_lock, flags);
+
+	clk_disable_unprepare(data->cxo);
+
+	return ret;
+}
+
+static u8 clk_debug_mux_get_parent(struct clk_hw *hw)
+{
+	struct clk_debug_mux *meas = to_clk_measure(hw);
+	int i, num_parents = clk_hw_get_num_parents(hw);
+
+	for (i = 0; i < num_parents; i++) {
+		if (!strcmp(meas->parent[i].parents,
+					hw->init->parent_names[i])) {
+			pr_debug("%s: Clock name %s index %d\n", __func__,
+					hw->init->name, i);
+			return i;
+		}
+	}
+
+	return 0;
+}
+
+static int clk_debug_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+	struct clk_debug_mux *meas = to_clk_measure(hw);
+	u32 regval = 0;
+	int dbg_cc = 0;
+
+	dbg_cc = meas->parent[index].dbg_cc;
+
+	if (dbg_cc != GCC) {
+		/* Update the recursive debug mux */
+		regmap_read(meas->regmap[dbg_cc],
+				meas->parent[index].mux_offset, &regval);
+		regval &= ~meas->parent[index].mux_sel_mask <<
+				meas->parent[index].mux_sel_shift;
+		regval |= (meas->parent[index].dbg_cc_mux_sel &
+				meas->parent[index].mux_sel_mask) <<
+				meas->parent[index].mux_sel_shift;
+		regmap_write(meas->regmap[dbg_cc],
+				meas->parent[index].mux_offset, regval);
+
+		regmap_read(meas->regmap[dbg_cc],
+				meas->parent[index].post_div_offset, &regval);
+		regval &= ~meas->parent[index].post_div_mask <<
+				meas->parent[index].post_div_shift;
+		regval |= ((meas->parent[index].post_div_val - 1) &
+				meas->parent[index].post_div_mask) <<
+				meas->parent[index].post_div_shift;
+		regmap_write(meas->regmap[dbg_cc],
+				meas->parent[index].post_div_offset, regval);
+
+		regmap_read(meas->regmap[dbg_cc],
+				meas->parent[index].cbcr_offset, &regval);
+		regval |= BIT(0);
+		regmap_write(meas->regmap[dbg_cc],
+				meas->parent[index].cbcr_offset, regval);
+	}
+
+	/* Update the debug sel for GCC */
+	regmap_read(meas->regmap[GCC], meas->debug_offset, &regval);
+	regval &= ~meas->src_sel_mask << meas->src_sel_shift;
+	regval |= (meas->parent[index].prim_mux_sel & meas->src_sel_mask) <<
+			meas->src_sel_shift;
+	regmap_write(meas->regmap[GCC], meas->debug_offset, regval);
+
+	/* Set the GCC mux's post divider bits */
+	regmap_read(meas->regmap[GCC], meas->post_div_offset, &regval);
+	regval &= ~meas->post_div_mask << meas->post_div_shift;
+	regval |= ((meas->parent[index].prim_mux_div_val - 1) &
+			meas->post_div_mask) << meas->post_div_shift;
+	regmap_write(meas->regmap[GCC], meas->post_div_offset, regval);
+
+	/* Turn on the GCC_DEBUG_CBCR */
+	regmap_read(meas->regmap[GCC], meas->cbcr_offset, &regval);
+	regval |= BIT(0);
+	regmap_write(meas->regmap[GCC], meas->cbcr_offset, regval);
+
+	return 0;
+}
+
+const struct clk_ops clk_debug_mux_ops = {
+	.get_parent = clk_debug_mux_get_parent,
+	.set_parent = clk_debug_mux_set_parent,
+};
+EXPORT_SYMBOL(clk_debug_mux_ops);
+
+static int clk_debug_measure_get(void *data, u64 *val)
+{
+	struct clk_hw *hw = data, *par;
+	struct clk_debug_mux *meas = to_clk_measure(measure);
+	int index;
+	int ret = 0;
+	unsigned long meas_rate, sw_rate;
+
+	mutex_lock(&clk_debug_lock);
+
+	ret = clk_set_parent(measure->clk, hw->clk);
+	if (!ret) {
+		par = measure;
+		index =  clk_debug_mux_get_parent(measure);
+		while (par && par != hw) {
+			if (par->init->ops->enable)
+				par->init->ops->enable(par);
+			par = clk_hw_get_parent(par);
+		}
+		*val = clk_debug_mux_measure_rate(measure);
+		if (meas->parent[index].dbg_cc != GCC)
+			*val *= meas->parent[index].post_div_val;
+		*val *= meas->parent[index].prim_mux_div_val;
+	}
+
+	meas_rate = clk_get_rate(hw->clk);
+	par = clk_hw_get_parent(measure);
+	if (!par)
+		return -EINVAL;
+
+	sw_rate = clk_get_rate(par->clk);
+	if (sw_rate && meas_rate >= (sw_rate * 2))
+		*val *= DIV_ROUND_CLOSEST(meas_rate, sw_rate);
+
+	mutex_unlock(&clk_debug_lock);
+
+	return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clk_measure_fops, clk_debug_measure_get,
+							NULL, "%lld\n");
+
+int clk_debug_measure_add(struct clk_hw *hw, struct dentry *dentry)
+{
+	if (IS_ERR_OR_NULL(measure)) {
+		pr_err_once("Please check if `measure` clk is registered.\n");
+		return 0;
+	}
+
+	if (clk_set_parent(measure->clk, hw->clk))
+		return 0;
+
+	debugfs_create_file("clk_measure", 0x444, dentry, hw,
+					&clk_measure_fops);
+	return 0;
+}
+EXPORT_SYMBOL(clk_debug_measure_add);
+
+int clk_debug_measure_register(struct clk_hw *hw)
+{
+	if (IS_ERR_OR_NULL(measure)) {
+		if (hw->init->flags & CLK_IS_MEASURE) {
+			measure = hw;
+			return 0;
+		}
+		return -EINVAL;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(clk_debug_measure_register);
+
diff --git a/drivers/clk/qcom/clk-debug.h b/drivers/clk/qcom/clk-debug.h
new file mode 100644
index 0000000..280704e
--- /dev/null
+++ b/drivers/clk/qcom/clk-debug.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_DEBUG_H__
+#define __QCOM_CLK_DEBUG_H__
+
+#include "../clk.h"
+
+/* Debugfs Measure Clocks */
+
+/**
+ * struct measure_clk_data - Structure of clk measure
+ *
+ * @cxo:		XO clock.
+ * @xo_div4_cbcr:	offset of debug XO/4 div register.
+ * @ctl_reg:		offset of debug control register.
+ * @status_reg:		offset of debug status register.
+ * @cbcr_offset:	branch register to turn on debug mux.
+ */
+struct measure_clk_data {
+	struct clk *cxo;
+	u32 ctl_reg;
+	u32 status_reg;
+	u32 xo_div4_cbcr;
+};
+
+/**
+ * List of Debug clock controllers.
+ */
+enum debug_cc {
+	GCC,
+	CAM_CC,
+	DISP_CC,
+	GPU_CC,
+	VIDEO_CC,
+	CPU,
+};
+
+/**
+ * struct clk_src - Structure of clock source for debug mux
+ *
+ * @parents:		clock name to be used as parent for debug mux.
+ * @prim_mux_sel:	debug mux index at global clock controller.
+ * @prim_mux_div_val:	PLL post-divider setting for the primary mux.
+ * @dbg_cc:		indicates the clock controller for recursive debug
+ *			clock controllers.
+ * @dbg_cc_mux_sel:	indicates the debug mux index at recursive debug mux.
+ * @mux_sel_mask:	indicates the mask for the mux selection.
+ * @mux_sel_shift:	indicates the shift required for mux selection.
+ * @post_div_mask:	indicates the post div mask to be used at recursive
+ *			debug mux.
+ * @post_div_shift:	indicates the shift required for post divider
+ *			configuration.
+ * @post_div_val:	indicates the post div value to be used at recursive
+ *			debug mux.
+ * @mux_offset:		the debug mux offset.
+ * @post_div_offset:	register with post-divider settings for the debug mux.
+ * @cbcr_offset:	branch register to turn on debug mux.
+ */
+struct clk_src {
+	const char *parents;
+	int prim_mux_sel;
+	u32 prim_mux_div_val;
+	enum debug_cc dbg_cc;
+	int dbg_cc_mux_sel;
+	u32 mux_sel_mask;
+	u32 mux_sel_shift;
+	u32 post_div_mask;
+	u32 post_div_shift;
+	u32 post_div_val;
+	u32 mux_offset;
+	u32 post_div_offset;
+	u32 cbcr_offset;
+};
+
+#define MUX_SRC_LIST(...) \
+	.parent = (struct clk_src[]){__VA_ARGS__}, \
+	.num_parents = ARRAY_SIZE(((struct clk_src[]){__VA_ARGS__}))
+
+/**
+ * struct clk_debug_mux - Structure of clock debug mux
+ *
+ * @parent:		structure of clk_src
+ * @num_parents:	number of parents
+ * @regmap:		regmaps of debug mux
+ * @priv:		private measure_clk_data to be used by debug mux
+ * @debug_offset:	debug mux offset.
+ * @post_div_offset:	register with post-divider settings for the debug mux.
+ * @cbcr_offset:	branch register to turn on debug mux.
+ * @src_sel_mask:	indicates the mask to be used for src selection in
+			primary mux.
+ * @src_sel_shift:	indicates the shift required for source selection in
+			primary mux.
+ * @post_div_mask:	indicates the post div mask to be used for the primary
+			mux.
+ * @post_div_shift:	indicates the shift required for post divider
+			selection in primary mux.
+ * @hw:			handle between common and hardware-specific interfaces.
+ */
+struct clk_debug_mux {
+	struct clk_src *parent;
+	int num_parents;
+	struct regmap **regmap;
+	void *priv;
+	u32 debug_offset;
+	u32 post_div_offset;
+	u32 cbcr_offset;
+	u32 src_sel_mask;
+	u32 src_sel_shift;
+	u32 post_div_mask;
+	u32 post_div_shift;
+	struct clk_hw hw;
+};
+
+#define to_clk_measure(_hw) container_of((_hw), struct clk_debug_mux, hw)
+
+extern const struct clk_ops clk_debug_mux_ops;
+
+int clk_debug_measure_register(struct clk_hw *hw);
+int clk_debug_measure_add(struct clk_hw *hw, struct dentry *dentry);
+
+#endif
diff --git a/drivers/clk/qcom/clk-dummy.c b/drivers/clk/qcom/clk-dummy.c
index e2465c4..3435999 100644
--- a/drivers/clk/qcom/clk-dummy.c
+++ b/drivers/clk/qcom/clk-dummy.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,7 @@
 #include <linux/slab.h>
 
 #include "common.h"
+#include "clk-debug.h"
 
 #define to_clk_dummy(_hw)	container_of(_hw, struct clk_dummy, hw)
 
@@ -60,6 +61,7 @@
 	.round_rate = dummy_clk_round_rate,
 	.recalc_rate = dummy_clk_recalc_rate,
 	.set_flags = dummy_clk_set_flags,
+	.debug_init = clk_debug_measure_add,
 };
 EXPORT_SYMBOL_GPL(clk_dummy_ops);
 
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 0c0ddf9..3a38d37 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -161,7 +161,7 @@
  * @current_freq: last cached frequency when using branches with shared RCGs
  * @enable_safe_config: When set, the RCG is parked at CXO when it's disabled
  * @clkr: regmap clock handle
- *
+ * @flags: additional flag parameters for the RCG
  */
 struct clk_rcg2 {
 	u32			cmd_rcgr;
@@ -172,6 +172,8 @@
 	unsigned long		current_freq;
 	bool			enable_safe_config;
 	struct clk_regmap	clkr;
+	u8			flags;
+#define FORCE_ENABLE_RCG	BIT(0)
 };
 
 #define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index a13a45e..8484b57 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -164,6 +164,47 @@
 					CMD_ROOT_EN, 0);
 }
 
+static int prepare_enable_rcg_srcs(struct clk *curr, struct clk *new)
+{
+	int rc = 0;
+
+	rc = clk_prepare(curr);
+	if (rc)
+		return rc;
+
+	rc = clk_prepare(new);
+	if (rc)
+		goto err_new_src_prepare;
+
+	rc = clk_enable(curr);
+	if (rc)
+		goto err_curr_src_enable;
+
+	rc = clk_enable(new);
+	if (rc)
+		goto err_new_src_enable;
+
+	return rc;
+
+err_new_src_enable:
+	clk_disable(curr);
+err_curr_src_enable:
+	clk_unprepare(new);
+err_new_src_prepare:
+	clk_unprepare(curr);
+
+	return rc;
+}
+
+static void disable_unprepare_rcg_srcs(struct clk *curr, struct clk *new)
+{
+	clk_disable(new);
+	clk_disable(curr);
+
+	clk_unprepare(new);
+	clk_unprepare(curr);
+}
+
 /*
  * Calculate m/n:d rate
  *
@@ -378,7 +419,8 @@
 {
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
 	const struct freq_tbl *f;
-	int ret;
+	int ret, curr_src_index, new_src_index;
+	struct clk_hw *curr_src = NULL, *new_src = NULL;
 
 	f = qcom_find_freq(rcg->freq_tbl, rate);
 	if (!f)
@@ -393,10 +435,38 @@
 		return 0;
 	}
 
+	if (rcg->flags & FORCE_ENABLE_RCG) {
+		if (!rcg->current_freq)
+			rcg->current_freq = cxo_f.freq;
+
+		if (rcg->current_freq == cxo_f.freq)
+			curr_src_index = 0;
+		else {
+			f = qcom_find_freq(rcg->freq_tbl, rcg->current_freq);
+			curr_src_index = qcom_find_src_index(hw,
+						rcg->parent_map, f->src);
+		}
+
+		new_src_index = qcom_find_src_index(hw, rcg->parent_map,
+							f->src);
+
+		curr_src = clk_hw_get_parent_by_index(hw, curr_src_index);
+		new_src = clk_hw_get_parent_by_index(hw, new_src_index);
+
+		/* The RCG could currently be disabled. Enable its parents. */
+		ret = prepare_enable_rcg_srcs(curr_src->clk, new_src->clk);
+		clk_rcg2_set_force_enable(hw);
+	}
+
 	ret = clk_rcg2_configure(rcg, f);
 	if (ret)
 		return ret;
 
+	if (rcg->flags & FORCE_ENABLE_RCG) {
+		clk_rcg2_clear_force_enable(hw);
+		disable_unprepare_rcg_srcs(curr_src->clk, new_src->clk);
+	}
+
 	/* Update current frequency with the requested frequency. */
 	rcg->current_freq = rate;
 	return ret;
@@ -420,6 +490,11 @@
 	unsigned long rate;
 	const struct freq_tbl *f;
 
+	if (rcg->flags & FORCE_ENABLE_RCG) {
+		clk_rcg2_set_force_enable(hw);
+		return 0;
+	}
+
 	if (!rcg->enable_safe_config)
 		return 0;
 
@@ -456,6 +531,11 @@
 {
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
 
+	if (rcg->flags & FORCE_ENABLE_RCG) {
+		clk_rcg2_clear_force_enable(hw);
+		return;
+	}
+
 	if (!rcg->enable_safe_config)
 		return;
 	/*
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index eface18..e728dec 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -57,4 +57,8 @@
 extern int qcom_cc_probe(struct platform_device *pdev,
 			 const struct qcom_cc_desc *desc);
 extern struct clk_ops clk_dummy_ops;
+
+#define BM(msb, lsb)	(((((uint32_t)-1) << (31-msb)) >> (31-msb+lsb)) << lsb)
+#define BVAL(msb, lsb, val)	(((val) << lsb) & BM(msb, lsb))
+
 #endif
diff --git a/drivers/clk/qcom/debugcc-sdm845.c b/drivers/clk/qcom/debugcc-sdm845.c
new file mode 100644
index 0000000..d74db61
--- /dev/null
+++ b/drivers/clk/qcom/debugcc-sdm845.c
@@ -0,0 +1,885 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#include "clk-debug.h"
+
+static struct measure_clk_data debug_mux_priv = {
+	.ctl_reg = 0x62024,
+	.status_reg = 0x62028,
+	.xo_div4_cbcr = 0x43008,
+};
+
+static const char *const debug_mux_parent_names[] = {
+	"cam_cc_bps_ahb_clk",
+	"cam_cc_bps_areg_clk",
+	"cam_cc_bps_axi_clk",
+	"cam_cc_bps_clk",
+	"cam_cc_camnoc_atb_clk",
+	"cam_cc_camnoc_axi_clk",
+	"cam_cc_cci_clk",
+	"cam_cc_cpas_ahb_clk",
+	"cam_cc_csi0phytimer_clk",
+	"cam_cc_csi1phytimer_clk",
+	"cam_cc_csi2phytimer_clk",
+	"cam_cc_csiphy0_clk",
+	"cam_cc_csiphy1_clk",
+	"cam_cc_csiphy2_clk",
+	"cam_cc_fd_core_clk",
+	"cam_cc_fd_core_uar_clk",
+	"cam_cc_icp_apb_clk",
+	"cam_cc_icp_atb_clk",
+	"cam_cc_icp_clk",
+	"cam_cc_icp_cti_clk",
+	"cam_cc_icp_ts_clk",
+	"cam_cc_ife_0_axi_clk",
+	"cam_cc_ife_0_clk",
+	"cam_cc_ife_0_cphy_rx_clk",
+	"cam_cc_ife_0_csid_clk",
+	"cam_cc_ife_0_dsp_clk",
+	"cam_cc_ife_1_axi_clk",
+	"cam_cc_ife_1_clk",
+	"cam_cc_ife_1_cphy_rx_clk",
+	"cam_cc_ife_1_csid_clk",
+	"cam_cc_ife_1_dsp_clk",
+	"cam_cc_ife_lite_clk",
+	"cam_cc_ife_lite_cphy_rx_clk",
+	"cam_cc_ife_lite_csid_clk",
+	"cam_cc_ipe_0_ahb_clk",
+	"cam_cc_ipe_0_areg_clk",
+	"cam_cc_ipe_0_axi_clk",
+	"cam_cc_ipe_0_clk",
+	"cam_cc_ipe_1_ahb_clk",
+	"cam_cc_ipe_1_areg_clk",
+	"cam_cc_ipe_1_axi_clk",
+	"cam_cc_ipe_1_clk",
+	"cam_cc_jpeg_clk",
+	"cam_cc_lrme_clk",
+	"cam_cc_mclk0_clk",
+	"cam_cc_mclk1_clk",
+	"cam_cc_mclk2_clk",
+	"cam_cc_mclk3_clk",
+	"cam_cc_soc_ahb_clk",
+	"cam_cc_sys_tmr_clk",
+	"disp_cc_mdss_ahb_clk",
+	"disp_cc_mdss_axi_clk",
+	"disp_cc_mdss_byte0_clk",
+	"disp_cc_mdss_byte0_intf_clk",
+	"disp_cc_mdss_byte1_clk",
+	"disp_cc_mdss_byte1_intf_clk",
+	"disp_cc_mdss_dp_aux_clk",
+	"disp_cc_mdss_dp_crypto_clk",
+	"disp_cc_mdss_dp_link_clk",
+	"disp_cc_mdss_dp_link_intf_clk",
+	"disp_cc_mdss_dp_pixel1_clk",
+	"disp_cc_mdss_dp_pixel_clk",
+	"disp_cc_mdss_esc0_clk",
+	"disp_cc_mdss_esc1_clk",
+	"disp_cc_mdss_mdp_clk",
+	"disp_cc_mdss_mdp_lut_clk",
+	"disp_cc_mdss_pclk0_clk",
+	"disp_cc_mdss_pclk1_clk",
+	"disp_cc_mdss_qdss_at_clk",
+	"disp_cc_mdss_qdss_tsctr_div8_clk",
+	"disp_cc_mdss_rot_clk",
+	"disp_cc_mdss_rscc_ahb_clk",
+	"disp_cc_mdss_rscc_vsync_clk",
+	"disp_cc_mdss_spdm_debug_clk",
+	"disp_cc_mdss_spdm_dp_crypto_clk",
+	"disp_cc_mdss_spdm_dp_pixel1_clk",
+	"disp_cc_mdss_spdm_dp_pixel_clk",
+	"disp_cc_mdss_spdm_mdp_clk",
+	"disp_cc_mdss_spdm_pclk0_clk",
+	"disp_cc_mdss_spdm_pclk1_clk",
+	"disp_cc_mdss_spdm_rot_clk",
+	"disp_cc_mdss_vsync_clk",
+	"gcc_aggre_noc_pcie_tbu_clk",
+	"gcc_aggre_ufs_card_axi_clk",
+	"gcc_aggre_ufs_phy_axi_clk",
+	"gcc_aggre_usb3_prim_axi_clk",
+	"gcc_aggre_usb3_sec_axi_clk",
+	"gcc_boot_rom_ahb_clk",
+	"gcc_camera_ahb_clk",
+	"gcc_camera_axi_clk",
+	"gcc_camera_xo_clk",
+	"gcc_ce1_ahb_clk",
+	"gcc_ce1_axi_clk",
+	"gcc_ce1_clk",
+	"gcc_cfg_noc_usb3_prim_axi_clk",
+	"gcc_cfg_noc_usb3_sec_axi_clk",
+	"gcc_cpuss_ahb_clk",
+	"gcc_cpuss_dvm_bus_clk",
+	"gcc_cpuss_gnoc_clk",
+	"gcc_cpuss_rbcpr_clk",
+	"gcc_ddrss_gpu_axi_clk",
+	"gcc_disp_ahb_clk",
+	"gcc_disp_axi_clk",
+	"gcc_disp_gpll0_clk_src",
+	"gcc_disp_gpll0_div_clk_src",
+	"gcc_disp_xo_clk",
+	"gcc_gp1_clk",
+	"gcc_gp2_clk",
+	"gcc_gp3_clk",
+	"gcc_gpu_cfg_ahb_clk",
+	"gcc_gpu_gpll0_clk_src",
+	"gcc_gpu_gpll0_div_clk_src",
+	"gcc_gpu_memnoc_gfx_clk",
+	"gcc_gpu_snoc_dvm_gfx_clk",
+	"gcc_mss_axis2_clk",
+	"gcc_mss_cfg_ahb_clk",
+	"gcc_mss_gpll0_div_clk_src",
+	"gcc_mss_mfab_axis_clk",
+	"gcc_mss_q6_memnoc_axi_clk",
+	"gcc_mss_snoc_axi_clk",
+	"gcc_pcie_0_aux_clk",
+	"gcc_pcie_0_cfg_ahb_clk",
+	"gcc_pcie_0_mstr_axi_clk",
+	"gcc_pcie_0_pipe_clk",
+	"gcc_pcie_0_slv_axi_clk",
+	"gcc_pcie_0_slv_q2a_axi_clk",
+	"gcc_pcie_1_aux_clk",
+	"gcc_pcie_1_cfg_ahb_clk",
+	"gcc_pcie_1_mstr_axi_clk",
+	"gcc_pcie_1_pipe_clk",
+	"gcc_pcie_1_slv_axi_clk",
+	"gcc_pcie_1_slv_q2a_axi_clk",
+	"gcc_pcie_phy_aux_clk",
+	"gcc_pcie_phy_refgen_clk",
+	"gcc_pdm2_clk",
+	"gcc_pdm_ahb_clk",
+	"gcc_pdm_xo4_clk",
+	"gcc_prng_ahb_clk",
+	"gcc_qmip_camera_ahb_clk",
+	"gcc_qmip_disp_ahb_clk",
+	"gcc_qmip_video_ahb_clk",
+	"gcc_qupv3_wrap0_core_2x_clk",
+	"gcc_qupv3_wrap0_core_clk",
+	"gcc_qupv3_wrap0_s0_clk",
+	"gcc_qupv3_wrap0_s1_clk",
+	"gcc_qupv3_wrap0_s2_clk",
+	"gcc_qupv3_wrap0_s3_clk",
+	"gcc_qupv3_wrap0_s4_clk",
+	"gcc_qupv3_wrap0_s5_clk",
+	"gcc_qupv3_wrap0_s6_clk",
+	"gcc_qupv3_wrap0_s7_clk",
+	"gcc_qupv3_wrap1_core_2x_clk",
+	"gcc_qupv3_wrap1_core_clk",
+	"gcc_qupv3_wrap1_s0_clk",
+	"gcc_qupv3_wrap1_s1_clk",
+	"gcc_qupv3_wrap1_s2_clk",
+	"gcc_qupv3_wrap1_s3_clk",
+	"gcc_qupv3_wrap1_s4_clk",
+	"gcc_qupv3_wrap1_s5_clk",
+	"gcc_qupv3_wrap1_s6_clk",
+	"gcc_qupv3_wrap1_s7_clk",
+	"gcc_qupv3_wrap_0_m_ahb_clk",
+	"gcc_qupv3_wrap_0_s_ahb_clk",
+	"gcc_qupv3_wrap_1_m_ahb_clk",
+	"gcc_qupv3_wrap_1_s_ahb_clk",
+	"gcc_sdcc2_ahb_clk",
+	"gcc_sdcc2_apps_clk",
+	"gcc_sdcc4_ahb_clk",
+	"gcc_sdcc4_apps_clk",
+	"gcc_sys_noc_cpuss_ahb_clk",
+	"gcc_tsif_ahb_clk",
+	"gcc_tsif_inactivity_timers_clk",
+	"gcc_tsif_ref_clk",
+	"gcc_ufs_card_ahb_clk",
+	"gcc_ufs_card_axi_clk",
+	"gcc_ufs_card_ice_core_clk",
+	"gcc_ufs_card_phy_aux_clk",
+	"gcc_ufs_card_rx_symbol_0_clk",
+	"gcc_ufs_card_rx_symbol_1_clk",
+	"gcc_ufs_card_tx_symbol_0_clk",
+	"gcc_ufs_card_unipro_core_clk",
+	"gcc_ufs_phy_ahb_clk",
+	"gcc_ufs_phy_axi_clk",
+	"gcc_ufs_phy_ice_core_clk",
+	"gcc_ufs_phy_phy_aux_clk",
+	"gcc_ufs_phy_rx_symbol_0_clk",
+	"gcc_ufs_phy_rx_symbol_1_clk",
+	"gcc_ufs_phy_tx_symbol_0_clk",
+	"gcc_ufs_phy_unipro_core_clk",
+	"gcc_usb30_prim_master_clk",
+	"gcc_usb30_prim_mock_utmi_clk",
+	"gcc_usb30_prim_sleep_clk",
+	"gcc_usb30_sec_master_clk",
+	"gcc_usb30_sec_mock_utmi_clk",
+	"gcc_usb30_sec_sleep_clk",
+	"gcc_usb3_prim_phy_aux_clk",
+	"gcc_usb3_prim_phy_com_aux_clk",
+	"gcc_usb3_prim_phy_pipe_clk",
+	"gcc_usb3_sec_phy_aux_clk",
+	"gcc_usb3_sec_phy_com_aux_clk",
+	"gcc_usb3_sec_phy_pipe_clk",
+	"gcc_usb_phy_cfg_ahb2phy_clk",
+	"gcc_video_ahb_clk",
+	"gcc_video_axi_clk",
+	"gcc_video_xo_clk",
+	"gpu_cc_acd_cxo_clk",
+	"gpu_cc_ahb_clk",
+	"gpu_cc_crc_ahb_clk",
+	"gpu_cc_cx_apb_clk",
+	"gpu_cc_cx_gfx3d_clk",
+	"gpu_cc_cx_gfx3d_slv_clk",
+	"gpu_cc_cx_gmu_clk",
+	"gpu_cc_cx_qdss_at_clk",
+	"gpu_cc_cx_qdss_trig_clk",
+	"gpu_cc_cx_qdss_tsctr_clk",
+	"gpu_cc_cx_snoc_dvm_clk",
+	"gpu_cc_cxo_aon_clk",
+	"gpu_cc_cxo_clk",
+	"gpu_cc_gx_cxo_clk",
+	"gpu_cc_gx_gmu_clk",
+	"gpu_cc_gx_qdss_tsctr_clk",
+	"gpu_cc_gx_vsense_clk",
+	"gpu_cc_rbcpr_ahb_clk",
+	"gpu_cc_rbcpr_clk",
+	"gpu_cc_sleep_clk",
+	"gpu_cc_spdm_gx_gfx3d_div_clk",
+	"video_cc_apb_clk",
+	"video_cc_at_clk",
+	"video_cc_qdss_trig_clk",
+	"video_cc_qdss_tsctr_div8_clk",
+	"video_cc_vcodec0_axi_clk",
+	"video_cc_vcodec0_core_clk",
+	"video_cc_vcodec1_axi_clk",
+	"video_cc_vcodec1_core_clk",
+	"video_cc_venus_ahb_clk",
+	"video_cc_venus_ctl_axi_clk",
+	"video_cc_venus_ctl_core_clk",
+};
+
+static struct clk_debug_mux gcc_debug_mux = {
+	.priv = &debug_mux_priv,
+	.debug_offset = 0x62008,
+	.post_div_offset = 0x62000,
+	.cbcr_offset = 0x62004,
+	.src_sel_mask = 0x3FF,
+	.src_sel_shift = 0,
+	.post_div_mask = 0xF,
+	.post_div_shift = 0,
+	MUX_SRC_LIST(
+		{ "cam_cc_bps_ahb_clk", 0x46, 4, CAM_CC,
+			0xE, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_bps_areg_clk", 0x46, 4, CAM_CC,
+			0xD, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_bps_axi_clk", 0x46, 4, CAM_CC,
+			0xC, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_bps_clk", 0x46, 4, CAM_CC,
+			0xB, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_camnoc_atb_clk", 0x46, 4, CAM_CC,
+			0x34, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_camnoc_axi_clk", 0x46, 4, CAM_CC,
+			0x2D, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_cci_clk", 0x46, 4, CAM_CC,
+			0x2A, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_cpas_ahb_clk", 0x46, 4, CAM_CC,
+			0x2C, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_csi0phytimer_clk", 0x46, 4, CAM_CC,
+			0x5, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_csi1phytimer_clk", 0x46, 4, CAM_CC,
+			0x7, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_csi2phytimer_clk", 0x46, 4, CAM_CC,
+			0x9, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_csiphy0_clk", 0x46, 4, CAM_CC,
+			0x6, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_csiphy1_clk", 0x46, 4, CAM_CC,
+			0x8, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_csiphy2_clk", 0x46, 4, CAM_CC,
+			0xA, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_fd_core_clk", 0x46, 4, CAM_CC,
+			0x28, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_fd_core_uar_clk", 0x46, 4, CAM_CC,
+			0x29, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_icp_apb_clk", 0x46, 4, CAM_CC,
+			0x32, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_icp_atb_clk", 0x46, 4, CAM_CC,
+			0x2F, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_icp_clk", 0x46, 4, CAM_CC,
+			0x26, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_icp_cti_clk", 0x46, 4, CAM_CC,
+			0x30, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_icp_ts_clk", 0x46, 4, CAM_CC,
+			0x31, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_ife_0_axi_clk", 0x46, 4, CAM_CC,
+			0x1B, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_ife_0_clk", 0x46, 4, CAM_CC,
+			0x17, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_ife_0_cphy_rx_clk", 0x46, 4, CAM_CC,
+			0x1A, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_ife_0_csid_clk", 0x46, 4, CAM_CC,
+			0x19, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_ife_0_dsp_clk", 0x46, 4, CAM_CC,
+			0x18, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_ife_1_axi_clk", 0x46, 4, CAM_CC,
+			0x21, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_ife_1_clk", 0x46, 4, CAM_CC,
+			0x1D, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_ife_1_cphy_rx_clk", 0x46, 4, CAM_CC,
+			0x20, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_ife_1_csid_clk", 0x46, 4, CAM_CC,
+			0x1F, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_ife_1_dsp_clk", 0x46, 4, CAM_CC,
+			0x1E, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_ife_lite_clk", 0x46, 4, CAM_CC,
+			0x22, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_ife_lite_cphy_rx_clk", 0x46, 4, CAM_CC,
+			0x24, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_ife_lite_csid_clk", 0x46, 4, CAM_CC,
+			0x23, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_ipe_0_ahb_clk", 0x46, 4, CAM_CC,
+			0x12, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_ipe_0_areg_clk", 0x46, 4, CAM_CC,
+			0x11, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_ipe_0_axi_clk", 0x46, 4, CAM_CC,
+			0x10, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_ipe_0_clk", 0x46, 4, CAM_CC,
+			0xF, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_ipe_1_ahb_clk", 0x46, 4, CAM_CC,
+			0x16, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_ipe_1_areg_clk", 0x46, 4, CAM_CC,
+			0x15, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_ipe_1_axi_clk", 0x46, 4, CAM_CC,
+			0x14, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_ipe_1_clk", 0x46, 4, CAM_CC,
+			0x13, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_jpeg_clk", 0x46, 4, CAM_CC,
+			0x25, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_lrme_clk", 0x46, 4, CAM_CC,
+			0x2B, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_mclk0_clk", 0x46, 4, CAM_CC,
+			0x1, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_mclk1_clk", 0x46, 4, CAM_CC,
+			0x2, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_mclk2_clk", 0x46, 4, CAM_CC,
+			0x3, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_mclk3_clk", 0x46, 4, CAM_CC,
+			0x4, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_soc_ahb_clk", 0x46, 4, CAM_CC,
+			0x2E, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_sys_tmr_clk", 0x46, 4, CAM_CC,
+			0x33, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "disp_cc_mdss_ahb_clk", 0x47, 4, DISP_CC,
+			0x13, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_axi_clk", 0x47, 4, DISP_CC,
+			0x14, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_byte0_clk", 0x47, 4, DISP_CC,
+			0x7, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_byte0_intf_clk", 0x47, 4, DISP_CC,
+			0x8, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_byte1_clk", 0x47, 4, DISP_CC,
+			0x9, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_byte1_intf_clk", 0x47, 4, DISP_CC,
+			0xA, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_dp_aux_clk", 0x47, 4, DISP_CC,
+			0x12, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_dp_crypto_clk", 0x47, 4, DISP_CC,
+			0xF, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_dp_link_clk", 0x47, 4, DISP_CC,
+			0xD, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_dp_link_intf_clk", 0x47, 4, DISP_CC,
+			0xE, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_dp_pixel1_clk", 0x47, 4, DISP_CC,
+			0x11, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_dp_pixel_clk", 0x47, 4, DISP_CC,
+			0x10, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_esc0_clk", 0x47, 4, DISP_CC,
+			0xB, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_esc1_clk", 0x47, 4, DISP_CC,
+			0xC, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_mdp_clk", 0x47, 4, DISP_CC,
+			0x3, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_mdp_lut_clk", 0x47, 4, DISP_CC,
+			0x5, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_pclk0_clk", 0x47, 4, DISP_CC,
+			0x1, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_pclk1_clk", 0x47, 4, DISP_CC,
+			0x2, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_qdss_at_clk", 0x47, 4, DISP_CC,
+			0x15, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_qdss_tsctr_div8_clk", 0x47, 4, DISP_CC,
+			0x16, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_rot_clk", 0x47, 4, DISP_CC,
+			0x4, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_rscc_ahb_clk", 0x47, 4, DISP_CC,
+			0x17, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_rscc_vsync_clk", 0x47, 4, DISP_CC,
+			0x18, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_spdm_debug_clk", 0x47, 4, DISP_CC,
+			0x20, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_spdm_dp_crypto_clk", 0x47, 4, DISP_CC,
+			0x1D, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_spdm_dp_pixel1_clk", 0x47, 4, DISP_CC,
+			0x1F, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_spdm_dp_pixel_clk", 0x47, 4, DISP_CC,
+			0x1E, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_spdm_mdp_clk", 0x47, 4, DISP_CC,
+			0x1B, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_spdm_pclk0_clk", 0x47, 4, DISP_CC,
+			0x19, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_spdm_pclk1_clk", 0x47, 4, DISP_CC,
+			0x1A, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_spdm_rot_clk", 0x47, 4, DISP_CC,
+			0x1C, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "disp_cc_mdss_vsync_clk", 0x47, 4, DISP_CC,
+			0x6, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "gcc_aggre_noc_pcie_tbu_clk", 0x2D, 4, GCC,
+			0x2D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_aggre_ufs_card_axi_clk", 0x11E, 4, GCC,
+			0x11E, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_aggre_ufs_phy_axi_clk", 0x11D, 4, GCC,
+			0x11D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_aggre_usb3_prim_axi_clk", 0x11B, 4, GCC,
+			0x11B, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_aggre_usb3_sec_axi_clk", 0x11C, 4, GCC,
+			0x11C, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_boot_rom_ahb_clk", 0x94, 4, GCC,
+			0x94, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_camera_ahb_clk", 0x3A, 4, GCC,
+			0x3A, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_camera_axi_clk", 0x40, 4, GCC,
+			0x40, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_camera_xo_clk", 0x43, 4, GCC,
+			0x43, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_ce1_ahb_clk", 0xA9, 4, GCC,
+			0xA9, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_ce1_axi_clk", 0xA8, 4, GCC,
+			0xA8, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_ce1_clk", 0xA7, 4, GCC,
+			0xA7, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_cfg_noc_usb3_prim_axi_clk", 0x1D, 4, GCC,
+			0x1D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_cfg_noc_usb3_sec_axi_clk", 0x1E, 4, GCC,
+			0x1E, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_cpuss_ahb_clk", 0xCE, 4, GCC,
+			0xCE, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_cpuss_dvm_bus_clk", 0xD3, 4, GCC,
+			0xD3, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_cpuss_gnoc_clk", 0xCF, 4, GCC,
+			0xCF, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_cpuss_rbcpr_clk", 0xD0, 4, GCC,
+			0xD0, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_ddrss_gpu_axi_clk", 0xBB, 4, GCC,
+			0xBB, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_disp_ahb_clk", 0x3B, 4, GCC,
+			0x3B, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_disp_axi_clk", 0x41, 4, GCC,
+			0x41, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_disp_gpll0_clk_src", 0x4C, 4, GCC,
+			0x4C, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_disp_gpll0_div_clk_src", 0x4D, 4, GCC,
+			0x4D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_disp_xo_clk", 0x44, 4, GCC,
+			0x44, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_gp1_clk", 0xDE, 4, GCC,
+			0xDE, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_gp2_clk", 0xDF, 4, GCC,
+			0xDF, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_gp3_clk", 0xE0, 4, GCC,
+			0xE0, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_gpu_cfg_ahb_clk", 0x142, 4, GCC,
+			0x142, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_gpu_gpll0_clk_src", 0x148, 4, GCC,
+			0x148, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_gpu_gpll0_div_clk_src", 0x149, 4, GCC,
+			0x149, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_gpu_memnoc_gfx_clk", 0x145, 4, GCC,
+			0x145, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_gpu_snoc_dvm_gfx_clk", 0x147, 4, GCC,
+			0x147, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_mss_axis2_clk", 0x12F, 4, GCC,
+			0x12F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_mss_cfg_ahb_clk", 0x12D, 4, GCC,
+			0x12D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_mss_gpll0_div_clk_src", 0x133, 4, GCC,
+			0x133, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_mss_mfab_axis_clk", 0x12E, 4, GCC,
+			0x12E, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_mss_q6_memnoc_axi_clk", 0x135, 4, GCC,
+			0x135, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_mss_snoc_axi_clk", 0x134, 4, GCC,
+			0x134, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_pcie_0_aux_clk", 0xE5, 4, GCC,
+			0xE5, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_pcie_0_cfg_ahb_clk", 0xE4, 4, GCC,
+			0xE4, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_pcie_0_mstr_axi_clk", 0xE3, 4, GCC,
+			0xE3, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_pcie_0_pipe_clk", 0xE6, 4, GCC,
+			0xE6, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_pcie_0_slv_axi_clk", 0xE2, 4, GCC,
+			0xE2, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_pcie_0_slv_q2a_axi_clk", 0xE1, 4, GCC,
+			0xE1, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_pcie_1_aux_clk", 0xEC, 4, GCC,
+			0xEC, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_pcie_1_cfg_ahb_clk", 0xEB, 4, GCC,
+			0xEB, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_pcie_1_mstr_axi_clk", 0xEA, 4, GCC,
+			0xEA, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_pcie_1_pipe_clk", 0xED, 4, GCC,
+			0xED, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_pcie_1_slv_axi_clk", 0xE9, 4, GCC,
+			0xE9, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_pcie_1_slv_q2a_axi_clk", 0xE8, 4, GCC,
+			0xE8, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_pcie_phy_aux_clk", 0xEF, 4, GCC,
+			0xEF, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_pcie_phy_refgen_clk", 0x160, 4, GCC,
+			0x160, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_pdm2_clk", 0x8E, 4, GCC,
+			0x8E, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_pdm_ahb_clk", 0x8C, 4, GCC,
+			0x8C, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_pdm_xo4_clk", 0x8D, 4, GCC,
+			0x8D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_prng_ahb_clk", 0x8F, 4, GCC,
+			0x8F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qmip_camera_ahb_clk", 0x3D, 4, GCC,
+			0x3D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qmip_disp_ahb_clk", 0x3E, 4, GCC,
+			0x3E, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qmip_video_ahb_clk", 0x3C, 4, GCC,
+			0x3C, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap0_core_2x_clk", 0x77, 4, GCC,
+			0x77, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap0_core_clk", 0x76, 4, GCC,
+			0x76, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap0_s0_clk", 0x78, 4, GCC,
+			0x78, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap0_s1_clk", 0x79, 4, GCC,
+			0x79, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap0_s2_clk", 0x7A, 4, GCC,
+			0x7A, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap0_s3_clk", 0x7B, 4, GCC,
+			0x7B, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap0_s4_clk", 0x7C, 4, GCC,
+			0x7C, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap0_s5_clk", 0x7D, 4, GCC,
+			0x7D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap0_s6_clk", 0x7E, 4, GCC,
+			0x7E, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap0_s7_clk", 0x7F, 4, GCC,
+			0x7F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap1_core_2x_clk", 0x80, 4, GCC,
+			0x80, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap1_core_clk", 0x81, 4, GCC,
+			0x81, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap1_s0_clk", 0x84, 4, GCC,
+			0x84, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap1_s1_clk", 0x85, 4, GCC,
+			0x85, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap1_s2_clk", 0x86, 4, GCC,
+			0x86, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap1_s3_clk", 0x87, 4, GCC,
+			0x87, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap1_s4_clk", 0x88, 4, GCC,
+			0x88, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap1_s5_clk", 0x89, 4, GCC,
+			0x89, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap1_s6_clk", 0x8A, 4, GCC,
+			0x8A, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap1_s7_clk", 0x8B, 4, GCC,
+			0x8B, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap_0_m_ahb_clk", 0x74, 4, GCC,
+			0x74, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap_0_s_ahb_clk", 0x75, 4, GCC,
+			0x75, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap_1_m_ahb_clk", 0x82, 4, GCC,
+			0x82, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_qupv3_wrap_1_s_ahb_clk", 0x83, 4, GCC,
+			0x83, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_sdcc2_ahb_clk", 0x71, 4, GCC,
+			0x71, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_sdcc2_apps_clk", 0x70, 4, GCC,
+			0x70, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_sdcc4_ahb_clk", 0x73, 4, GCC,
+			0x73, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_sdcc4_apps_clk", 0x72, 4, GCC,
+			0x72, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_sys_noc_cpuss_ahb_clk", 0xC, 4, GCC,
+			0xC, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_tsif_ahb_clk", 0x90, 4, GCC,
+			0x90, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_tsif_inactivity_timers_clk", 0x92, 4, GCC,
+			0x92, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_tsif_ref_clk", 0x91, 4, GCC,
+			0x91, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_ufs_card_ahb_clk", 0xF1, 4, GCC,
+			0xF1, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_ufs_card_axi_clk", 0xF0, 4, GCC,
+			0xF0, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_ufs_card_ice_core_clk", 0xF7, 4, GCC,
+			0xF7, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_ufs_card_phy_aux_clk", 0xF8, 4, GCC,
+			0xF8, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_ufs_card_rx_symbol_0_clk", 0xF3, 4, GCC,
+			0xF3, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_ufs_card_rx_symbol_1_clk", 0xF9, 4, GCC,
+			0xF9, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_ufs_card_tx_symbol_0_clk", 0xF2, 4, GCC,
+			0xF2, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_ufs_card_unipro_core_clk", 0xF6, 4, GCC,
+			0xF6, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_ufs_phy_ahb_clk", 0xFC, 4, GCC,
+			0xFC, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_ufs_phy_axi_clk", 0xFB, 4, GCC,
+			0xFB, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_ufs_phy_ice_core_clk", 0x102, 4, GCC,
+			0x102, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_ufs_phy_phy_aux_clk", 0x103, 4, GCC,
+			0x103, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_ufs_phy_rx_symbol_0_clk", 0xFE, 4, GCC,
+			0xFE, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_ufs_phy_rx_symbol_1_clk", 0x104, 4, GCC,
+			0x104, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_ufs_phy_tx_symbol_0_clk", 0xFD, 4, GCC,
+			0xFD, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_ufs_phy_unipro_core_clk", 0x101, 4, GCC,
+			0x101, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_usb30_prim_master_clk", 0x5F, 4, GCC,
+			0x5F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_usb30_prim_mock_utmi_clk", 0x61, 4, GCC,
+			0x61, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_usb30_prim_sleep_clk", 0x60, 4, GCC,
+			0x60, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_usb30_sec_master_clk", 0x65, 4, GCC,
+			0x65, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_usb30_sec_mock_utmi_clk", 0x67, 4, GCC,
+			0x67, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_usb30_sec_sleep_clk", 0x66, 4, GCC,
+			0x66, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_usb3_prim_phy_aux_clk", 0x62, 4, GCC,
+			0x62, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_usb3_prim_phy_com_aux_clk", 0x63, 4, GCC,
+			0x63, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_usb3_prim_phy_pipe_clk", 0x64, 4, GCC,
+			0x64, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_usb3_sec_phy_aux_clk", 0x68, 4, GCC,
+			0x68, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_usb3_sec_phy_com_aux_clk", 0x69, 4, GCC,
+			0x69, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_usb3_sec_phy_pipe_clk", 0x6A, 4, GCC,
+			0x6A, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_usb_phy_cfg_ahb2phy_clk", 0x6F, 4, GCC,
+			0x6F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_video_ahb_clk", 0x39, 4, GCC,
+			0x39, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_video_axi_clk", 0x3F, 4, GCC,
+			0x3F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_video_xo_clk", 0x42, 4, GCC,
+			0x42, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gpu_cc_acd_cxo_clk", 0x144, 4, GPU_CC,
+			0x1F, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "gpu_cc_ahb_clk", 0x144, 4, GPU_CC,
+			0x11, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "gpu_cc_crc_ahb_clk", 0x144, 4, GPU_CC,
+			0x12, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "gpu_cc_cx_apb_clk", 0x144, 4, GPU_CC,
+			0x15, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "gpu_cc_cx_gfx3d_clk", 0x144, 4, GPU_CC,
+			0x1A, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "gpu_cc_cx_gfx3d_slv_clk", 0x144, 4, GPU_CC,
+			0x1B, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "gpu_cc_cx_gmu_clk", 0x144, 4, GPU_CC,
+			0x19, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "gpu_cc_cx_qdss_at_clk", 0x144, 4, GPU_CC,
+			0x13, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "gpu_cc_cx_qdss_trig_clk", 0x144, 4, GPU_CC,
+			0x18, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "gpu_cc_cx_qdss_tsctr_clk", 0x144, 4, GPU_CC,
+			0x14, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "gpu_cc_cx_snoc_dvm_clk", 0x144, 4, GPU_CC,
+			0x16, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "gpu_cc_cxo_aon_clk", 0x144, 4, GPU_CC,
+			0xB, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "gpu_cc_cxo_clk", 0x144, 4, GPU_CC,
+			0xA, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "gpu_cc_gx_cxo_clk", 0x144, 4, GPU_CC,
+			0xF, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "gpu_cc_gx_gmu_clk", 0x144, 4, GPU_CC,
+			0x10, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "gpu_cc_gx_qdss_tsctr_clk", 0x144, 4, GPU_CC,
+			0xE, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "gpu_cc_gx_vsense_clk", 0x144, 4, GPU_CC,
+			0xD, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "gpu_cc_rbcpr_ahb_clk", 0x144, 4, GPU_CC,
+			0x1D, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "gpu_cc_rbcpr_clk", 0x144, 4, GPU_CC,
+			0x1C, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "gpu_cc_sleep_clk", 0x144, 4, GPU_CC,
+			0x17, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "gpu_cc_spdm_gx_gfx3d_div_clk", 0x144, 4, GPU_CC,
+			0x1E, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "video_cc_apb_clk", 0x48, 4, VIDEO_CC,
+			0x8, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+		{ "video_cc_at_clk", 0x48, 4, VIDEO_CC,
+			0xB, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+		{ "video_cc_qdss_trig_clk", 0x48, 4, VIDEO_CC,
+			0x7, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+		{ "video_cc_qdss_tsctr_div8_clk", 0x48, 4, VIDEO_CC,
+			0xA, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+		{ "video_cc_vcodec0_axi_clk", 0x48, 4, VIDEO_CC,
+			0x5, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+		{ "video_cc_vcodec0_core_clk", 0x48, 4, VIDEO_CC,
+			0x2, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+		{ "video_cc_vcodec1_axi_clk", 0x48, 4, VIDEO_CC,
+			0x6, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+		{ "video_cc_vcodec1_core_clk", 0x48, 4, VIDEO_CC,
+			0x3, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+		{ "video_cc_venus_ahb_clk", 0x48, 4, VIDEO_CC,
+			0x9, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+		{ "video_cc_venus_ctl_axi_clk", 0x48, 4, VIDEO_CC,
+			0x4, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+		{ "video_cc_venus_ctl_core_clk", 0x48, 4, VIDEO_CC,
+			0x1, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+	),
+	.hw.init = &(struct clk_init_data){
+		.name = "gcc_debug_mux",
+		.ops = &clk_debug_mux_ops,
+		.parent_names = debug_mux_parent_names,
+		.num_parents = ARRAY_SIZE(debug_mux_parent_names),
+		.flags = CLK_IS_MEASURE,
+	},
+};
+
+static const struct of_device_id clk_debug_match_table[] = {
+	{ .compatible = "qcom,debugcc-sdm845" },
+	{}
+};
+
+static int clk_debug_845_probe(struct platform_device *pdev)
+{
+	struct clk *clk;
+	int ret = 0, count;
+
+	clk = devm_clk_get(&pdev->dev, "xo_clk_src");
+	if (IS_ERR(clk)) {
+		if (PTR_ERR(clk) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get xo clock\n");
+		return PTR_ERR(clk);
+	}
+
+	debug_mux_priv.cxo = clk;
+
+	ret = of_property_read_u32(pdev->dev.of_node, "qcom,cc-count",
+								&count);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Num of debug clock controller not specified\n");
+		return ret;
+	}
+
+	if (!count) {
+		dev_err(&pdev->dev, "Count of CC cannot be zero\n");
+		return -EINVAL;
+	}
+
+	gcc_debug_mux.regmap = devm_kzalloc(&pdev->dev,
+				sizeof(struct regmap *) * count, GFP_KERNEL);
+	if (!gcc_debug_mux.regmap)
+		return -ENOMEM;
+
+	if (of_get_property(pdev->dev.of_node, "qcom,gcc", NULL)) {
+		gcc_debug_mux.regmap[GCC] =
+			syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+					"qcom,gcc");
+		if (IS_ERR(gcc_debug_mux.regmap[GCC])) {
+			pr_err("Failed to map qcom,gcc\n");
+			return PTR_ERR(gcc_debug_mux.regmap[GCC]);
+		}
+	}
+
+	if (of_get_property(pdev->dev.of_node, "qcom,dispcc", NULL)) {
+		gcc_debug_mux.regmap[DISP_CC] =
+			syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+					"qcom,dispcc");
+		if (IS_ERR(gcc_debug_mux.regmap[DISP_CC])) {
+			pr_err("Failed to map qcom,dispcc\n");
+			return PTR_ERR(gcc_debug_mux.regmap[DISP_CC]);
+		}
+	}
+
+	if (of_get_property(pdev->dev.of_node, "qcom,videocc", NULL)) {
+		gcc_debug_mux.regmap[VIDEO_CC] =
+			syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+					"qcom,videocc");
+		if (IS_ERR(gcc_debug_mux.regmap[VIDEO_CC])) {
+			pr_err("Failed to map qcom,videocc\n");
+			return PTR_ERR(gcc_debug_mux.regmap[VIDEO_CC]);
+		}
+	}
+
+	if (of_get_property(pdev->dev.of_node, "qcom,camcc", NULL)) {
+		gcc_debug_mux.regmap[CAM_CC] =
+			syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+					"qcom,camcc");
+		if (IS_ERR(gcc_debug_mux.regmap[CAM_CC])) {
+			pr_err("Failed to map qcom,camcc\n");
+			return PTR_ERR(gcc_debug_mux.regmap[CAM_CC]);
+		}
+	}
+
+	if (of_get_property(pdev->dev.of_node, "qcom,gpucc", NULL)) {
+		gcc_debug_mux.regmap[GPU_CC] =
+			syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+					"qcom,gpucc");
+		if (IS_ERR(gcc_debug_mux.regmap[GPU_CC])) {
+			pr_err("Failed to map qcom,gpucc\n");
+			return PTR_ERR(gcc_debug_mux.regmap[GPU_CC]);
+		}
+	}
+
+	clk = devm_clk_register(&pdev->dev, &gcc_debug_mux.hw);
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "Unable to register GCC debug mux\n");
+		return PTR_ERR(clk);
+	}
+
+	ret = clk_debug_measure_register(&gcc_debug_mux.hw);
+	if (ret)
+		dev_err(&pdev->dev, "Could not register Measure clock\n");
+	else
+		dev_info(&pdev->dev, "Registered debug mux successfully\n");
+
+	return ret;
+}
+
+static struct platform_driver clk_debug_driver = {
+	.probe = clk_debug_845_probe,
+	.driver = {
+		.name = "debugcc-sdm845",
+		.of_match_table = clk_debug_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+int __init clk_debug_845_init(void)
+{
+	return platform_driver_register(&clk_debug_driver);
+}
+fs_initcall(clk_debug_845_init);
+
+MODULE_DESCRIPTION("QTI DEBUG CC SDM845 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:debugcc-sdm845");
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
new file mode 100644
index 0000000..6b1eca8
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -0,0 +1,1087 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+#include "clk-alpha-pll.h"
+#include "vdd-level-sdm845.h"
+#include "clk-regmap-divider.h"
+
+#define DISP_CC_MISC_CMD	0x8000
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+#define F_SLEW(f, s, h, m, n, src_freq) { (f), (s), (2 * (h) - 1), (m), (n), \
+					(src_freq) }
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_CX_NUM, 1, vdd_corner);
+
+enum {
+	P_BI_TCXO,
+	P_CORE_BI_PLL_TEST_SE,
+	P_DISP_CC_PLL0_OUT_MAIN,
+	P_DP_PHY_PLL_LINK_CLK,
+	P_DP_PHY_PLL_VCO_DIV_CLK,
+	P_DSI0_PHY_PLL_OUT_BYTECLK,
+	P_DSI0_PHY_PLL_OUT_DSICLK,
+	P_DSI1_PHY_PLL_OUT_BYTECLK,
+	P_DSI1_PHY_PLL_OUT_DSICLK,
+	P_GPLL0_OUT_MAIN,
+	P_GPLL0_OUT_MAIN_DIV,
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+	{ P_DSI1_PHY_PLL_OUT_BYTECLK, 2 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_0[] = {
+	"bi_tcxo",
+	"dsi0_phy_pll_out_byteclk",
+	"dsi1_phy_pll_out_byteclk",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_DP_PHY_PLL_LINK_CLK, 1 },
+	{ P_DP_PHY_PLL_VCO_DIV_CLK, 2 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_1[] = {
+	"bi_tcxo",
+	"dp_phy_pll_link_clk",
+	"dp_phy_pll_vco_div_clk",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_2[] = {
+	"bi_tcxo",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_DISP_CC_PLL0_OUT_MAIN, 1 },
+	{ P_GPLL0_OUT_MAIN, 4 },
+	{ P_GPLL0_OUT_MAIN_DIV, 5 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_3[] = {
+	"bi_tcxo",
+	"disp_cc_pll0",
+	"gpll0",
+	"gpll0",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+	{ P_DSI1_PHY_PLL_OUT_DSICLK, 2 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_4[] = {
+	"bi_tcxo",
+	"dsi0_phy_pll_out_dsiclk",
+	"dsi1_phy_pll_out_dsiclk",
+	"core_bi_pll_test_se",
+};
+
+static struct pll_vco fabia_vco[] = {
+	{ 250000000, 2000000000, 0 },
+	{ 125000000, 1000000000, 1 },
+};
+
+static const struct pll_config disp_cc_pll0_config = {
+	.l = 0x2c,
+	.frac = 0xcaab,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+	.offset = 0x0,
+	.vco_table = fabia_vco,
+	.num_vco = ARRAY_SIZE(fabia_vco),
+	.type = FABIA_PLL,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_pll0",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_fabia_pll_ops,
+			VDD_CX_FMAX_MAP4(
+				MIN, 615000000,
+				LOW, 1066000000,
+				LOW_L1, 1600000000,
+				NOMINAL, 2000000000),
+		},
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+	.cmd_rcgr = 0x20d0,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_0,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_byte0_clk_src",
+		.parent_names = disp_cc_parent_names_0,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+		.ops = &clk_byte2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 150000000,
+			LOW, 240000000,
+			LOW_L1, 262500000,
+			NOMINAL, 358000000),
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
+	.cmd_rcgr = 0x20ec,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_0,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_byte1_clk_src",
+		.parent_names = disp_cc_parent_names_0,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+		.ops = &clk_byte2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 150000000,
+			LOW, 240000000,
+			LOW_L1, 262500000,
+			NOMINAL, 358000000),
+	},
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_aux_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
+	.cmd_rcgr = 0x219c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_2,
+	.freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_dp_aux_clk_src",
+		.parent_names = disp_cc_parent_names_2,
+		.num_parents = 2,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP1(
+			MIN, 19200000),
+	},
+};
+
+/* Need to get the exact frequencies that are supported */
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_crypto_clk_src[] = {
+	F( 108000000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
+	F( 180000000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
+	F( 360000000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
+	F( 540000000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
+	.cmd_rcgr = 0x2154,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_1,
+	.freq_tbl = ftbl_disp_cc_mdss_dp_crypto_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_dp_crypto_clk_src",
+		.parent_names = disp_cc_parent_names_1,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 12800000,
+			LOWER, 108000000,
+			LOW, 180000000,
+			LOW_L1, 360000000,
+			NOMINAL, 540000000),
+	},
+};
+
+/* Need to get the exact frequencies that are supported */
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_link_clk_src[] = {
+	F_SLEW( 162000000, P_DP_PHY_PLL_LINK_CLK,   2,   0,   0,  324000000),
+	F_SLEW( 270000000, P_DP_PHY_PLL_LINK_CLK,   2,   0,   0,  540000000),
+	F_SLEW( 540000000, P_DP_PHY_PLL_LINK_CLK,   2,   0,   0, 1080000000),
+	F_SLEW( 810000000, P_DP_PHY_PLL_LINK_CLK,   2,   0,   0, 1620000000),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
+	.cmd_rcgr = 0x2138,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_1,
+	.freq_tbl = ftbl_disp_cc_mdss_dp_link_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_dp_link_clk_src",
+		.parent_names = disp_cc_parent_names_1,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 162000000,
+			LOW, 270000000,
+			LOW_L1, 540000000,
+			NOMINAL, 810000000),
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel1_clk_src = {
+	.cmd_rcgr = 0x2184,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_1,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_dp_pixel1_clk_src",
+		.parent_names = (const char *[]){
+			"dp_phy_pll_vco_div_clk",
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+		.ops = &clk_dp_ops,
+		VDD_CX_FMAX_MAP4(
+			MIN, 19200000,
+			LOWER, 202500000,
+			LOW, 296735905,
+			LOW_L1, 675000000),
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel_clk_src = {
+	.cmd_rcgr = 0x216c,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_1,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_dp_pixel_clk_src",
+		.parent_names = (const char *[]){
+			"dp_phy_pll_vco_div_clk",
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+		.ops = &clk_dp_ops,
+		VDD_CX_FMAX_MAP4(
+			MIN, 19200000,
+			LOWER, 202500000,
+			LOW, 296735905,
+			LOW_L1, 675000000),
+	},
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_esc0_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+	.cmd_rcgr = 0x2108,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_0,
+	.freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_esc0_clk_src",
+		.parent_names = disp_cc_parent_names_0,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP1(
+			MIN, 19200000),
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
+	.cmd_rcgr = 0x2120,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_0,
+	.freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_esc1_clk_src",
+		.parent_names = disp_cc_parent_names_0,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP1(
+			MIN, 19200000),
+	},
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(85714286, P_GPLL0_OUT_MAIN, 7, 0, 0),
+	F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+	F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+	F(165000000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+	F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+	F(275000000, P_DISP_CC_PLL0_OUT_MAIN, 1.5, 0, 0),
+	F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+	F(412500000, P_DISP_CC_PLL0_OUT_MAIN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+	.cmd_rcgr = 0x2088,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_3,
+	.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_mdp_clk_src",
+		.parent_names = disp_cc_parent_names_3,
+		.num_parents = 5,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP4(
+			MIN, 19200000,
+			LOWER, 165000000,
+			LOW, 300000000,
+			NOMINAL, 412500000),
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+	.cmd_rcgr = 0x2058,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_pclk0_clk_src",
+		.parent_names = disp_cc_parent_names_4,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+		.ops = &clk_pixel_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 184000000,
+			LOW, 295000000,
+			LOW_L1, 350000000,
+			NOMINAL, 571428571),
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
+	.cmd_rcgr = 0x2070,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_pclk1_clk_src",
+		.parent_names = disp_cc_parent_names_4,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+		.ops = &clk_pixel_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 184000000,
+			LOW, 295000000,
+			LOW_L1, 350000000,
+			NOMINAL, 571428571),
+	},
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_rot_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(165000000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+	F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+	F(412500000, P_DISP_CC_PLL0_OUT_MAIN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+	.cmd_rcgr = 0x20a0,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_3,
+	.freq_tbl = ftbl_disp_cc_mdss_rot_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_rot_clk_src",
+		.parent_names = disp_cc_parent_names_3,
+		.num_parents = 5,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP4(
+			MIN, 19200000,
+			LOWER, 165000000,
+			LOW, 300000000,
+			NOMINAL, 412500000),
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+	.cmd_rcgr = 0x20b8,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_2,
+	.freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_vsync_clk_src",
+		.parent_names = disp_cc_parent_names_2,
+		.num_parents = 2,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP1(
+			MIN, 19200000),
+	},
+};
+
+static struct clk_branch disp_cc_debug_clk = {
+	.halt_reg = 0x600c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x600c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_debug_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+	.halt_reg = 0x4004,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x4004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_axi_clk = {
+	.halt_reg = 0x4008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x4008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+	.halt_reg = 0x2028,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_byte0_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_byte0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+	.reg = 0x20e8,
+	.shift = 0,
+	.width = 2,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_byte0_div_clk_src",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_byte0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+	.halt_reg = 0x202c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x202c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_byte0_intf_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_byte0_div_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_byte1_clk = {
+	.halt_reg = 0x2030,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2030,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_byte1_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_byte1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
+	.reg = 0x2104,
+	.shift = 0,
+	.width = 2,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_byte1_div_clk_src",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_byte1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
+	.halt_reg = 0x2034,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_byte1_intf_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_byte1_div_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_dp_aux_clk = {
+	.halt_reg = 0x2054,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_aux_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_dp_aux_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_dp_crypto_clk = {
+	.halt_reg = 0x2048,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2048,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_crypto_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_dp_crypto_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_clk = {
+	.halt_reg = 0x2040,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2040,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_link_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_dp_link_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div disp_cc_mdss_dp_link_div_clk_src = {
+	.reg = 0x2150,
+	.shift = 0,
+	.width = 2,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_link_div_clk_src",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_dp_link_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
+	.halt_reg = 0x2044,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2044,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_link_intf_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_dp_link_div_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel1_clk = {
+	.halt_reg = 0x2050,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2050,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_pixel1_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_dp_pixel1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel_clk = {
+	.halt_reg = 0x204c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x204c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_pixel_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_dp_pixel_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+	.halt_reg = 0x2038,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2038,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_esc0_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_esc0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_esc1_clk = {
+	.halt_reg = 0x203c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x203c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_esc1_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_esc1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+	.halt_reg = 0x200c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x200c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_mdp_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_mdp_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+	.halt_reg = 0x201c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x201c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_mdp_lut_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_mdp_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+	.halt_reg = 0x2004,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_pclk0_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_pclk0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_pclk1_clk = {
+	.halt_reg = 0x2008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_pclk1_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_pclk1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_qdss_at_clk = {
+	.halt_reg = 0x4010,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x4010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_qdss_at_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_qdss_tsctr_div8_clk = {
+	.halt_reg = 0x4014,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x4014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_qdss_tsctr_div8_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+	.halt_reg = 0x2014,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_rot_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_rot_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+	.halt_reg = 0x5004,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x5004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_rscc_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+	.halt_reg = 0x5008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x5008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_rscc_vsync_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_vsync_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+	.halt_reg = 0x2024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_vsync_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_vsync_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap *disp_cc_sdm845_clocks[] = {
+	[DISP_CC_DEBUG_CLK] = &disp_cc_debug_clk.clkr,
+	[DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+	[DISP_CC_MDSS_AXI_CLK] = &disp_cc_mdss_axi_clk.clkr,
+	[DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+	[DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+	[DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+	[DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] =
+					&disp_cc_mdss_byte0_div_clk_src.clkr,
+	[DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr,
+	[DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr,
+	[DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
+	[DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] =
+					&disp_cc_mdss_byte1_div_clk_src.clkr,
+	[DISP_CC_MDSS_DP_AUX_CLK] = &disp_cc_mdss_dp_aux_clk.clkr,
+	[DISP_CC_MDSS_DP_AUX_CLK_SRC] = &disp_cc_mdss_dp_aux_clk_src.clkr,
+	[DISP_CC_MDSS_DP_CRYPTO_CLK] = &disp_cc_mdss_dp_crypto_clk.clkr,
+	[DISP_CC_MDSS_DP_CRYPTO_CLK_SRC] =
+					&disp_cc_mdss_dp_crypto_clk_src.clkr,
+	[DISP_CC_MDSS_DP_LINK_CLK] = &disp_cc_mdss_dp_link_clk.clkr,
+	[DISP_CC_MDSS_DP_LINK_CLK_SRC] = &disp_cc_mdss_dp_link_clk_src.clkr,
+	[DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC] =
+					&disp_cc_mdss_dp_link_div_clk_src.clkr,
+	[DISP_CC_MDSS_DP_LINK_INTF_CLK] = &disp_cc_mdss_dp_link_intf_clk.clkr,
+	[DISP_CC_MDSS_DP_PIXEL1_CLK] = &disp_cc_mdss_dp_pixel1_clk.clkr,
+	[DISP_CC_MDSS_DP_PIXEL1_CLK_SRC] =
+					&disp_cc_mdss_dp_pixel1_clk_src.clkr,
+	[DISP_CC_MDSS_DP_PIXEL_CLK] = &disp_cc_mdss_dp_pixel_clk.clkr,
+	[DISP_CC_MDSS_DP_PIXEL_CLK_SRC] = &disp_cc_mdss_dp_pixel_clk_src.clkr,
+	[DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+	[DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+	[DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
+	[DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr,
+	[DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+	[DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+	[DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+	[DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+	[DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+	[DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr,
+	[DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr,
+	[DISP_CC_MDSS_QDSS_AT_CLK] = &disp_cc_mdss_qdss_at_clk.clkr,
+	[DISP_CC_MDSS_QDSS_TSCTR_DIV8_CLK] =
+					&disp_cc_mdss_qdss_tsctr_div8_clk.clkr,
+	[DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+	[DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+	[DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+	[DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+	[DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+	[DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+	[DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+};
+
+static const struct qcom_reset_map disp_cc_sdm845_resets[] = {
+	[DISP_CC_MDSS_CORE_BCR] = { 0x2000 },
+	[DISP_CC_MDSS_GCC_CLOCKS_BCR] = { 0x4000 },
+	[DISP_CC_MDSS_RSCC_BCR] = { 0x5000 },
+};
+
+static const struct regmap_config disp_cc_sdm845_regmap_config = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register	= 0x10000,
+	.fast_io	= true,
+};
+
+static const struct qcom_cc_desc disp_cc_sdm845_desc = {
+	.config = &disp_cc_sdm845_regmap_config,
+	.clks = disp_cc_sdm845_clocks,
+	.num_clks = ARRAY_SIZE(disp_cc_sdm845_clocks),
+	.resets = disp_cc_sdm845_resets,
+	.num_resets = ARRAY_SIZE(disp_cc_sdm845_resets),
+};
+
+static const struct of_device_id disp_cc_sdm845_match_table[] = {
+	{ .compatible = "qcom,dispcc-sdm845" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sdm845_match_table);
+
+static int disp_cc_sdm845_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	int ret = 0;
+
+	regmap = qcom_cc_map(pdev, &disp_cc_sdm845_desc);
+	if (IS_ERR(regmap)) {
+		pr_err("Failed to map the Display CC registers\n");
+		return PTR_ERR(regmap);
+	}
+
+	vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(vdd_cx.regulator[0])) {
+		if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+				"Unable to get vdd_cx regulator\n");
+		return PTR_ERR(vdd_cx.regulator[0]);
+	}
+
+	clk_fabia_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+
+	/* Enable clock gating for DSI and MDP clocks */
+	regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x7f0, 0x7f0);
+
+	ret = qcom_cc_really_probe(pdev, &disp_cc_sdm845_desc, regmap);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register Display CC clocks\n");
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Registered Display CC clocks\n");
+	return ret;
+}
+
+static struct platform_driver disp_cc_sdm845_driver = {
+	.probe		= disp_cc_sdm845_probe,
+	.driver		= {
+		.name	= "disp_cc-sdm845",
+		.of_match_table = disp_cc_sdm845_match_table,
+	},
+};
+
+static int __init disp_cc_sdm845_init(void)
+{
+	return platform_driver_register(&disp_cc_sdm845_driver);
+}
+core_initcall(disp_cc_sdm845_init);
+
+static void __exit disp_cc_sdm845_exit(void)
+{
+	platform_driver_unregister(&disp_cc_sdm845_driver);
+}
+module_exit(disp_cc_sdm845_exit);
+
+MODULE_DESCRIPTION("QTI DISP_CC SDM845 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:disp_cc-sdm845");
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index f4cc1bd..08dce3f 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -168,6 +168,11 @@
 			.parent_names = (const char *[]){ "bi_tcxo" },
 			.num_parents = 1,
 			.ops = &clk_fabia_fixed_pll_ops,
+			VDD_CX_FMAX_MAP4(
+				MIN, 615000000,
+				LOW, 1066000000,
+				LOW_L1, 1600000000,
+				NOMINAL, 2000000000),
 		},
 	},
 };
@@ -207,7 +212,11 @@
 			.parent_names = (const char *[]){ "bi_tcxo" },
 			.num_parents = 1,
 			.ops = &clk_fabia_fixed_pll_ops,
-			VDD_CX_FMAX_MAP1(MIN, 1066000000),
+			VDD_CX_FMAX_MAP4(
+				MIN, 615000000,
+				LOW, 1066000000,
+				LOW_L1, 1600000000,
+				NOMINAL, 2000000000),
 		},
 	},
 };
@@ -328,32 +337,6 @@
 	},
 };
 
-static const struct freq_tbl ftbl_gcc_mmss_qm_core_clk_src[] = {
-	F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
-	F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
-	F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
-	{ }
-};
-
-static struct clk_rcg2 gcc_mmss_qm_core_clk_src = {
-	.cmd_rcgr = 0xb040,
-	.mnd_width = 0,
-	.hid_width = 5,
-	.parent_map = gcc_parent_map_0,
-	.freq_tbl = ftbl_gcc_mmss_qm_core_clk_src,
-	.clkr.hw.init = &(struct clk_init_data){
-		.name = "gcc_mmss_qm_core_clk_src",
-		.parent_names = gcc_parent_names_0,
-		.num_parents = 4,
-		.flags = CLK_SET_RATE_PARENT,
-		.ops = &clk_rcg2_ops,
-		VDD_CX_FMAX_MAP3(
-			MIN, 75000000,
-			LOWER, 150000000,
-			LOW, 300000000),
-	},
-};
-
 static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
 	F(9600000, P_BI_TCXO, 2, 0, 0),
 	F(19200000, P_BI_TCXO, 1, 0, 0),
@@ -1669,37 +1652,6 @@
 	},
 };
 
-static struct clk_branch gcc_mmss_qm_ahb_clk = {
-	.halt_reg = 0xb05c,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0xb05c,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_mmss_qm_ahb_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch gcc_mmss_qm_core_clk = {
-	.halt_reg = 0xb038,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0xb038,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_mmss_qm_core_clk",
-			.parent_names = (const char *[]){
-				"gcc_mmss_qm_core_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch gcc_mss_axis2_clk = {
 	.halt_reg = 0x8a008,
 	.halt_check = BRANCH_HALT,
@@ -3233,9 +3185,6 @@
 	[GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
 	[GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
 	[GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
-	[GCC_MMSS_QM_AHB_CLK] = &gcc_mmss_qm_ahb_clk.clkr,
-	[GCC_MMSS_QM_CORE_CLK] = &gcc_mmss_qm_core_clk.clkr,
-	[GCC_MMSS_QM_CORE_CLK_SRC] = &gcc_mmss_qm_core_clk_src.clkr,
 	[GCC_MSS_AXIS2_CLK] = &gcc_mss_axis2_clk.clkr,
 	[GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
 	[GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr,
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
new file mode 100644
index 0000000..a95deff
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -0,0 +1,739 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <linux/clk.h>
+#include <linux/clk/qcom.h>
+#include <dt-bindings/clock/qcom,gpucc-sdm845.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+#include "clk-alpha-pll.h"
+#include "vdd-level-sdm845.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+#define F_SLEW(f, s, h, m, n, sf) { (f), (s), (2 * (h) - 1), (m), (n), (sf) }
+
+static int vdd_gx_corner[] = {
+	RPMH_REGULATOR_LEVEL_OFF,		/* VDD_GX_NONE */
+	RPMH_REGULATOR_LEVEL_MIN_SVS,		/* VDD_GX_MIN */
+	RPMH_REGULATOR_LEVEL_LOW_SVS,		/* VDD_GX_LOWER */
+	RPMH_REGULATOR_LEVEL_SVS,		/* VDD_GX_LOW */
+	RPMH_REGULATOR_LEVEL_SVS_L1,		/* VDD_GX_LOW_L1 */
+	RPMH_REGULATOR_LEVEL_NOM,		/* VDD_GX_NOMINAL */
+	RPMH_REGULATOR_LEVEL_NOM_L1,		/* VDD_GX_NOMINAL_L1 */
+	RPMH_REGULATOR_LEVEL_TURBO,		/* VDD_GX_HIGH */
+	RPMH_REGULATOR_LEVEL_TURBO_L1,		/* VDD_GX_HIGH_L1 */
+	RPMH_REGULATOR_LEVEL_MAX,		/* VDD_GX_MAX */
+};
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_CX_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_mx, VDD_CX_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_gfx, VDD_GX_NUM, 1, vdd_gx_corner);
+
+enum {
+	P_BI_TCXO,
+	P_CORE_BI_PLL_TEST_SE,
+	P_GPLL0_OUT_MAIN,
+	P_GPLL0_OUT_MAIN_DIV,
+	P_GPU_CC_PLL0_OUT_EVEN,
+	P_GPU_CC_PLL0_OUT_MAIN,
+	P_GPU_CC_PLL0_OUT_ODD,
+	P_GPU_CC_PLL1_OUT_EVEN,
+	P_GPU_CC_PLL1_OUT_MAIN,
+	P_GPU_CC_PLL1_OUT_ODD,
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPU_CC_PLL0_OUT_MAIN, 1 },
+	{ P_GPU_CC_PLL1_OUT_MAIN, 3 },
+	{ P_GPLL0_OUT_MAIN, 5 },
+	{ P_GPLL0_OUT_MAIN_DIV, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpu_cc_parent_names_0[] = {
+	"bi_tcxo",
+	"gpu_cc_pll0",
+	"gpu_cc_pll1",
+	"gpll0",
+	"gpll0_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPU_CC_PLL0_OUT_EVEN, 1 },
+	{ P_GPU_CC_PLL0_OUT_ODD, 2 },
+	{ P_GPU_CC_PLL1_OUT_EVEN, 3 },
+	{ P_GPU_CC_PLL1_OUT_ODD, 4 },
+	{ P_GPLL0_OUT_MAIN, 5 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpu_cc_parent_names_1[] = {
+	"bi_tcxo",
+	"gpu_cc_pll0_out_even",
+	"gpu_cc_pll0_out_odd",
+	"gpu_cc_pll1_out_even",
+	"gpu_cc_pll1_out_odd",
+	"gpll0",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gpu_cc_parent_map_2[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_MAIN, 5 },
+	{ P_GPLL0_OUT_MAIN_DIV, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpu_cc_parent_names_2[] = {
+	"bi_tcxo",
+	"gpll0",
+	"gpll0",
+	"core_bi_pll_test_se",
+};
+
+static struct pll_vco fabia_vco[] = {
+	{ 250000000, 2000000000, 0 },
+	{ 125000000, 1000000000, 1 },
+};
+
+static const struct pll_config gpu_cc_pll0_config = {
+	.l = 0x1d,
+	.frac = 0x2aaa,
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+	.offset = 0x0,
+	.vco_table = fabia_vco,
+	.num_vco = ARRAY_SIZE(fabia_vco),
+	.type = FABIA_PLL,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_pll0",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_fabia_pll_ops,
+			VDD_MX_FMAX_MAP4(
+				MIN, 615000000,
+				LOW, 1066000000,
+				LOW_L1, 1600000000,
+				NOMINAL, 2000000000),
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_fabia_even[] = {
+	{ 0x0, 1 },
+	{ 0x1, 2 },
+	{ 0x3, 4 },
+	{ 0x7, 8 },
+	{},
+};
+
+static struct clk_alpha_pll_postdiv gpu_cc_pll0_out_even = {
+	.offset = 0x0,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_fabia_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpu_cc_pll0_out_even",
+		.parent_names = (const char *[]){ "gpu_cc_pll0" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_generic_pll_postdiv_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
+	F(400000000, P_GPLL0_OUT_MAIN, 1.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+	.cmd_rcgr = 0x1120,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = gpu_cc_parent_map_0,
+	.freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpu_cc_gmu_clk_src",
+		.parent_names = gpu_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP2(
+			MIN, 200000000,
+			LOW, 400000000),
+	},
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src[] = {
+	F_SLEW(147000000, P_GPU_CC_PLL0_OUT_EVEN,  1, 0, 0,  294000000),
+	F_SLEW(210000000, P_GPU_CC_PLL0_OUT_EVEN,  1, 0, 0,  420000000),
+	F_SLEW(338000000, P_GPU_CC_PLL0_OUT_EVEN,  1, 0, 0,  676000000),
+	F_SLEW(425000000, P_GPU_CC_PLL0_OUT_EVEN,  1, 0, 0,  850000000),
+	F_SLEW(600000000, P_GPU_CC_PLL0_OUT_EVEN,  1, 0, 0, 1200000000),
+	{ }
+};
+
+static struct clk_rcg2 gpu_cc_gx_gfx3d_clk_src = {
+	.cmd_rcgr = 0x101c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gpu_cc_parent_map_1,
+	.freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src,
+	.flags = FORCE_ENABLE_RCG,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpu_cc_gx_gfx3d_clk_src",
+		.parent_names = gpu_cc_parent_names_1,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops =  &clk_rcg2_ops,
+		VDD_GX_FMAX_MAP8(
+			MIN, 147000000,
+			LOWER, 210000000,
+			LOW, 280000000,
+			LOW_L1, 338000000,
+			NOMINAL, 425000000,
+			NOMINAL_L1, 487000000,
+			HIGH, 548000000,
+			HIGH_L1, 600000000),
+	},
+};
+
+static const struct freq_tbl ftbl_gpu_cc_rbcpr_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gpu_cc_rbcpr_clk_src = {
+	.cmd_rcgr = 0x10b0,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gpu_cc_parent_map_2,
+	.freq_tbl = ftbl_gpu_cc_rbcpr_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpu_cc_rbcpr_clk_src",
+		.parent_names = gpu_cc_parent_names_2,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP2(
+			MIN, 19200000,
+			NOMINAL, 50000000),
+	},
+};
+
+static struct clk_branch gpu_cc_acd_ahb_clk = {
+	.halt_reg = 0x1168,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1168,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_acd_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_acd_cxo_clk = {
+	.halt_reg = 0x1164,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1164,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_acd_cxo_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+	.halt_reg = 0x1078,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1078,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+	.halt_reg = 0x107c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x107c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_crc_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cx_apb_clk = {
+	.halt_reg = 0x1088,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1088,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cx_apb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cx_gfx3d_clk = {
+	.halt_reg = 0x10a4,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10a4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cx_gfx3d_clk",
+			.parent_names = (const char *[]){
+				"gpu_cc_gx_gfx3d_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cx_gfx3d_slv_clk = {
+	.halt_reg = 0x10a8,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10a8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cx_gfx3d_slv_clk",
+			.parent_names = (const char *[]){
+				"gpu_cc_gx_gfx3d_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+	.halt_reg = 0x1098,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1098,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cx_gmu_clk",
+			.parent_names = (const char *[]){
+				"gpu_cc_gmu_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+	.halt_reg = 0x108c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x108c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cx_snoc_dvm_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+	.halt_reg = 0x1004,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cxo_aon_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+	.halt_reg = 0x109c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x109c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cxo_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_debug_clk = {
+	.halt_reg = 0x1100,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1100,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_debug_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_gx_cxo_clk = {
+	.halt_reg = 0x1060,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1060,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_gx_cxo_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_gx_gfx3d_clk = {
+	.halt_reg = 0x1054,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_gx_gfx3d_clk",
+			.parent_names = (const char *[]){
+				"gpu_cc_gx_gfx3d_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+	.halt_reg = 0x1064,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1064,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_gx_gmu_clk",
+			.parent_names = (const char *[]){
+				"gpu_cc_gmu_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_gx_vsense_clk = {
+	.halt_reg = 0x1058,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1058,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_gx_vsense_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_pll_test_clk = {
+	.halt_reg = 0x110c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x110c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_pll_test_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_rbcpr_ahb_clk = {
+	.halt_reg = 0x10f4,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10f4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_rbcpr_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_rbcpr_clk = {
+	.halt_reg = 0x10f0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10f0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_rbcpr_clk",
+			.parent_names = (const char *[]){
+				"gpu_cc_rbcpr_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap *gpu_cc_sdm845_clocks[] = {
+	[GPU_CC_ACD_AHB_CLK] = &gpu_cc_acd_ahb_clk.clkr,
+	[GPU_CC_ACD_CXO_CLK] = &gpu_cc_acd_cxo_clk.clkr,
+	[GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+	[GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+	[GPU_CC_CX_APB_CLK] = &gpu_cc_cx_apb_clk.clkr,
+	[GPU_CC_CX_GFX3D_CLK] = &gpu_cc_cx_gfx3d_clk.clkr,
+	[GPU_CC_CX_GFX3D_SLV_CLK] = &gpu_cc_cx_gfx3d_slv_clk.clkr,
+	[GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+	[GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+	[GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+	[GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+	[GPU_CC_DEBUG_CLK] = &gpu_cc_debug_clk.clkr,
+	[GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+	[GPU_CC_GX_CXO_CLK] = &gpu_cc_gx_cxo_clk.clkr,
+	[GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+	[GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
+	[GPU_CC_PLL_TEST_CLK] = &gpu_cc_pll_test_clk.clkr,
+	[GPU_CC_RBCPR_AHB_CLK] = &gpu_cc_rbcpr_ahb_clk.clkr,
+	[GPU_CC_RBCPR_CLK] = &gpu_cc_rbcpr_clk.clkr,
+	[GPU_CC_RBCPR_CLK_SRC] = &gpu_cc_rbcpr_clk_src.clkr,
+};
+
+static struct clk_regmap *gpu_cc_gfx_sdm845_clocks[] = {
+	[GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+	[GPU_CC_PLL0_OUT_EVEN] = &gpu_cc_pll0_out_even.clkr,
+	[GPU_CC_GX_GFX3D_CLK_SRC] = &gpu_cc_gx_gfx3d_clk_src.clkr,
+	[GPU_CC_GX_GFX3D_CLK] = &gpu_cc_gx_gfx3d_clk.clkr,
+};
+
+static const struct qcom_reset_map gpu_cc_sdm845_resets[] = {
+	[GPUCC_GPU_CC_ACD_BCR] = { 0x1160 },
+	[GPUCC_GPU_CC_CX_BCR] = { 0x1068 },
+	[GPUCC_GPU_CC_GFX3D_AON_BCR] = { 0x10a0 },
+	[GPUCC_GPU_CC_GMU_BCR] = { 0x111c },
+	[GPUCC_GPU_CC_GX_BCR] = { 0x1008 },
+	[GPUCC_GPU_CC_RBCPR_BCR] = { 0x10ac },
+	[GPUCC_GPU_CC_SPDM_BCR] = { 0x1110 },
+	[GPUCC_GPU_CC_XO_BCR] = { 0x1000 },
+};
+
+static const struct regmap_config gpu_cc_sdm845_regmap_config = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register	= 0x8008,
+	.fast_io	= true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sdm845_desc = {
+	.config = &gpu_cc_sdm845_regmap_config,
+	.clks = gpu_cc_sdm845_clocks,
+	.num_clks = ARRAY_SIZE(gpu_cc_sdm845_clocks),
+	.resets = gpu_cc_sdm845_resets,
+	.num_resets = ARRAY_SIZE(gpu_cc_sdm845_resets),
+};
+
+static const struct qcom_cc_desc gpu_cc_gfx_sdm845_desc = {
+	.config = &gpu_cc_sdm845_regmap_config,
+	.clks = gpu_cc_gfx_sdm845_clocks,
+	.num_clks = ARRAY_SIZE(gpu_cc_gfx_sdm845_clocks),
+};
+
+static const struct of_device_id gpu_cc_sdm845_match_table[] = {
+	{ .compatible = "qcom,gpucc-sdm845" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sdm845_match_table);
+
+static const struct of_device_id gpu_cc_gfx_sdm845_match_table[] = {
+	{ .compatible = "qcom,gfxcc-sdm845" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_gfx_sdm845_match_table);
+
+static int gpu_cc_gfx_sdm845_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	struct resource *res;
+	void __iomem *base;
+	int ret = 0;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "Failed to get resources for clock_gfxcc.\n");
+		return -EINVAL;
+	}
+
+	base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (IS_ERR(base)) {
+		dev_err(&pdev->dev, "Failed to ioremap the GFX CC base.\n");
+		return PTR_ERR(base);
+	}
+
+	regmap = devm_regmap_init_mmio(&pdev->dev, base,
+				gpu_cc_gfx_sdm845_desc.config);
+	if (IS_ERR(regmap)) {
+		dev_err(&pdev->dev, "Failed to init regmap\n");
+		return PTR_ERR(regmap);
+	}
+
+	/* Get MX voltage regulator for GPU PLL graphic clock. */
+	vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx");
+	if (IS_ERR(vdd_mx.regulator[0])) {
+		if (!(PTR_ERR(vdd_mx.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+				"Unable to get vdd_mx regulator\n");
+		return PTR_ERR(vdd_mx.regulator[0]);
+	}
+
+	/* GFX voltage regulators for GFX3D  graphic clock. */
+	vdd_gfx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_gfx");
+	if (IS_ERR(vdd_gfx.regulator[0])) {
+		if (PTR_ERR(vdd_gfx.regulator[0]) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get vdd_gfx regulator\n");
+		return PTR_ERR(vdd_gfx.regulator[0]);
+	}
+
+	clk_fabia_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+
+	ret = qcom_cc_really_probe(pdev, &gpu_cc_gfx_sdm845_desc, regmap);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register GFX CC clocks\n");
+		return ret;
+	}
+
+	clk_prepare_enable(gpu_cc_cxo_clk.clkr.hw.clk);
+
+	dev_info(&pdev->dev, "Registered GFX CC clocks.\n");
+
+	return ret;
+}
+
+static struct platform_driver gpu_cc_gfx_sdm845_driver = {
+	.probe = gpu_cc_gfx_sdm845_probe,
+	.driver = {
+		.name = "gfxcc-sdm845",
+		.of_match_table = gpu_cc_gfx_sdm845_match_table,
+	},
+};
+
+static int __init gpu_cc_gfx_sdm845_init(void)
+{
+	return platform_driver_register(&gpu_cc_gfx_sdm845_driver);
+}
+arch_initcall(gpu_cc_gfx_sdm845_init);
+
+static void __exit gpu_cc_gfx_sdm845_exit(void)
+{
+	platform_driver_unregister(&gpu_cc_gfx_sdm845_driver);
+}
+module_exit(gpu_cc_gfx_sdm845_exit);
+
+static int gpu_cc_sdm845_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	int ret = 0;
+
+	regmap = qcom_cc_map(pdev, &gpu_cc_sdm845_desc);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	/* Get CX voltage regulator for CX and GMU clocks. */
+	vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(vdd_cx.regulator[0])) {
+		if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+				"Unable to get vdd_cx regulator\n");
+		return PTR_ERR(vdd_cx.regulator[0]);
+	}
+
+	ret = qcom_cc_really_probe(pdev, &gpu_cc_sdm845_desc, regmap);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register GPU CC clocks\n");
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Registered GPU CC clocks.\n");
+
+	return ret;
+}
+
+static struct platform_driver gpu_cc_sdm845_driver = {
+	.probe = gpu_cc_sdm845_probe,
+	.driver = {
+		.name = "gpu_cc-sdm845",
+		.of_match_table = gpu_cc_sdm845_match_table,
+	},
+};
+
+static int __init gpu_cc_sdm845_init(void)
+{
+	return platform_driver_register(&gpu_cc_sdm845_driver);
+}
+core_initcall(gpu_cc_sdm845_init);
+
+static void __exit gpu_cc_sdm845_exit(void)
+{
+	platform_driver_unregister(&gpu_cc_sdm845_driver);
+}
+module_exit(gpu_cc_sdm845_exit);
+
+MODULE_DESCRIPTION("QTI GPU_CC SDM845 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gpu_cc-sdm845");
diff --git a/drivers/clk/qcom/mdss/Kconfig b/drivers/clk/qcom/mdss/Kconfig
index 229780e..7213e37 100644
--- a/drivers/clk/qcom/mdss/Kconfig
+++ b/drivers/clk/qcom/mdss/Kconfig
@@ -1,5 +1,6 @@
-config MSM_MDSS_PLL
+config QCOM_MDSS_PLL
 	bool "MDSS pll programming"
+	depends on COMMON_CLK_QCOM
 	---help---
 	It provides support for DSI, eDP and HDMI interface pll programming on MDSS
 	hardware. It also handles the pll specific resources and turn them on/off when
diff --git a/drivers/clk/qcom/mdss/Makefile b/drivers/clk/qcom/mdss/Makefile
index 64c7609..d183393 100644
--- a/drivers/clk/qcom/mdss/Makefile
+++ b/drivers/clk/qcom/mdss/Makefile
@@ -1,9 +1,3 @@
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-pll-util.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-pll.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8996.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8996-util.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8998.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dp-pll-8998.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dp-pll-8998-util.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-hdmi-pll-8996.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-hdmi-pll-8998.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll-util.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-10nm.o
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-8998.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
similarity index 62%
rename from drivers/clk/qcom/mdss/mdss-dsi-pll-8998.c
rename to drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
index 8c6bc2c..6ce0d76 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-8998.c
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
@@ -17,14 +17,9 @@
 #include <linux/err.h>
 #include <linux/iopoll.h>
 #include <linux/delay.h>
-#include <linux/clk/msm-clk-provider.h>
-#include <linux/clk/msm-clk.h>
-#include <linux/clk/msm-clock-generic.h>
-#include <dt-bindings/clock/msm-clocks-8998.h>
-
-#include "mdss-pll.h"
 #include "mdss-dsi-pll.h"
 #include "mdss-pll.h"
+#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
 
 #define VCO_DELAY_USEC 1
 
@@ -128,14 +123,14 @@
 	u32 refclk_cycles;
 };
 
-struct dsi_pll_8998 {
+struct dsi_pll_10nm {
 	struct mdss_pll_resources *rsc;
 	struct dsi_pll_config pll_configuration;
 	struct dsi_pll_regs reg_setup;
 };
 
 static struct mdss_pll_resources *pll_rsc_db[DSI_PLL_MAX];
-static struct dsi_pll_8998 plls[DSI_PLL_MAX];
+static struct dsi_pll_10nm plls[DSI_PLL_MAX];
 
 static void dsi_pll_config_slave(struct mdss_pll_resources *rsc)
 {
@@ -166,7 +161,7 @@
 	pr_debug("Slave PLL %s\n", rsc->slave ? "configured" : "absent");
 }
 
-static void dsi_pll_setup_config(struct dsi_pll_8998 *pll,
+static void dsi_pll_setup_config(struct dsi_pll_10nm *pll,
 				 struct mdss_pll_resources *rsc)
 {
 	struct dsi_pll_config *config = &pll->pll_configuration;
@@ -198,14 +193,14 @@
 	dsi_pll_config_slave(rsc);
 }
 
-static void dsi_pll_calc_dec_frac(struct dsi_pll_8998 *pll,
+static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll,
 				  struct mdss_pll_resources *rsc)
 {
 	struct dsi_pll_config *config = &pll->pll_configuration;
 	struct dsi_pll_regs *regs = &pll->reg_setup;
 	u64 target_freq;
 	u64 fref = rsc->vco_ref_clk_rate;
-	u32 computed_output_div, div_log;
+	u32 computed_output_div, div_log = 0;
 	u64 pll_freq;
 	u64 divider;
 	u64 dec, dec_multiple;
@@ -262,7 +257,7 @@
 	regs->frac_div_start_high = (frac & 0x30000) >> 16;
 }
 
-static void dsi_pll_calc_ssc(struct dsi_pll_8998 *pll,
+static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll,
 		  struct mdss_pll_resources *rsc)
 {
 	struct dsi_pll_config *config = &pll->pll_configuration;
@@ -307,7 +302,7 @@
 			ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
 }
 
-static void dsi_pll_ssc_commit(struct dsi_pll_8998 *pll,
+static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll,
 		struct mdss_pll_resources *rsc)
 {
 	void __iomem *pll_base = rsc->pll_base;
@@ -333,7 +328,7 @@
 	}
 }
 
-static void dsi_pll_config_hzindep_reg(struct dsi_pll_8998 *pll,
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll,
 				  struct mdss_pll_resources *rsc)
 {
 	void __iomem *pll_base = rsc->pll_base;
@@ -357,7 +352,7 @@
 	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_OVERRIDE, 0x80);
 }
 
-static void dsi_pll_commit(struct dsi_pll_8998 *pll,
+static void dsi_pll_commit(struct dsi_pll_10nm *pll,
 			   struct mdss_pll_resources *rsc)
 {
 	void __iomem *pll_base = rsc->pll_base;
@@ -378,12 +373,13 @@
 
 }
 
-static int vco_8998_set_rate(struct clk *c, unsigned long rate)
+static int vco_10nm_set_rate(struct clk_hw *hw, unsigned long rate,
+			unsigned long parent_rate)
 {
 	int rc;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
 	struct mdss_pll_resources *rsc = vco->priv;
-	struct dsi_pll_8998 *pll;
+	struct dsi_pll_10nm *pll;
 
 	if (!rsc) {
 		pr_err("pll resource not found\n");
@@ -431,7 +427,7 @@
 	return 0;
 }
 
-static int dsi_pll_8998_lock_status(struct mdss_pll_resources *pll)
+static int dsi_pll_10nm_lock_status(struct mdss_pll_resources *pll)
 {
 	int rc;
 	u32 status;
@@ -487,7 +483,7 @@
 	wmb();
 
 	/* Check for PLL lock */
-	rc = dsi_pll_8998_lock_status(rsc);
+	rc = dsi_pll_10nm_lock_status(rsc);
 	if (rc) {
 		pr_err("PLL(%d) lock failed\n", rsc->index);
 		goto error;
@@ -532,9 +528,25 @@
 	rsc->pll_on = false;
 }
 
-static void vco_8998_unprepare(struct clk *c)
+long vco_10nm_round_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long *parent_rate)
 {
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	unsigned long rrate = rate;
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+
+	if (rate < vco->min_rate)
+		rrate = vco->min_rate;
+	if (rate > vco->max_rate)
+		rrate = vco->max_rate;
+
+	*parent_rate = rrate;
+
+	return rrate;
+}
+
+static void vco_10nm_unprepare(struct clk_hw *hw)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
 	struct mdss_pll_resources *pll = vco->priv;
 
 	if (!pll) {
@@ -542,15 +554,15 @@
 		return;
 	}
 
-	pll->vco_cached_rate = c->rate;
+	pll->vco_cached_rate = clk_hw_get_rate(hw);
 	dsi_pll_disable(vco);
 	mdss_pll_resource_enable(pll, false);
 }
 
-static int vco_8998_prepare(struct clk *c)
+static int vco_10nm_prepare(struct clk_hw *hw)
 {
 	int rc = 0;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
 	struct mdss_pll_resources *pll = vco->priv;
 
 	if (!pll) {
@@ -566,8 +578,9 @@
 	}
 
 	if ((pll->vco_cached_rate != 0) &&
-	    (pll->vco_cached_rate == c->rate)) {
-		rc = c->ops->set_rate(c, pll->vco_cached_rate);
+	    (pll->vco_cached_rate == clk_hw_get_rate(hw))) {
+		rc = hw->init->ops->set_rate(hw, pll->vco_cached_rate,
+				pll->vco_cached_rate);
 		if (rc) {
 			pr_err("pll(%d) set_rate failed, rc=%d\n",
 			       pll->index, rc);
@@ -586,9 +599,10 @@
 	return rc;
 }
 
-static unsigned long dsi_pll_get_vco_rate(struct clk *c)
+static unsigned long vco_10nm_recalc_rate(struct clk_hw *hw,
+						unsigned long parent_rate)
 {
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
 	struct mdss_pll_resources *pll = vco->priv;
 	int rc;
 	u64 ref_clk = vco->ref_clk_rate;
@@ -642,46 +656,11 @@
 	return (unsigned long)vco_rate;
 }
 
-enum handoff vco_8998_handoff(struct clk *c)
-{
-	enum handoff ret = HANDOFF_DISABLED_CLK;
-	int rc;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *pll = vco->priv;
-	u32 status;
-
-	if (!pll) {
-		pr_err("Unable to find pll resource\n");
-		return HANDOFF_DISABLED_CLK;
-	}
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("failed to enable pll(%d) resources, rc=%d\n",
-		       pll->index, rc);
-		return ret;
-	}
-
-	status = MDSS_PLL_REG_R(pll->pll_base, PLL_COMMON_STATUS_ONE);
-	if (status & BIT(0)) {
-		pll->handoff_resources = true;
-		pll->pll_on = true;
-		c->rate = dsi_pll_get_vco_rate(c);
-		ret = HANDOFF_ENABLED_CLK;
-	} else {
-		(void)mdss_pll_resource_enable(pll, false);
-		ret = HANDOFF_DISABLED_CLK;
-	}
-
-	return ret;
-}
-
-static int pixel_clk_get_div(struct div_clk *clk)
+static int pixel_clk_get_div(void *context, unsigned int reg, unsigned int *div)
 {
 	int rc;
-	struct mdss_pll_resources *pll = clk->priv;
+	struct mdss_pll_resources *pll = context;
 	u32 reg_val;
-	int div;
 
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
@@ -690,11 +669,16 @@
 	}
 
 	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
-	div = (reg_val & 0xF0) >> 4;
+	*div = (reg_val & 0xF0) >> 4;
+
+	if (*div == 0)
+		*div = 1;
+	else
+		*div -= 1;
 
 	(void)mdss_pll_resource_enable(pll, false);
 
-	return div;
+	return rc;
 }
 
 static void pixel_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
@@ -707,16 +691,18 @@
 	MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0, reg_val);
 }
 
-static int pixel_clk_set_div(struct div_clk *clk, int div)
+static int pixel_clk_set_div(void *context, unsigned int reg, unsigned int div)
 {
 	int rc;
-	struct mdss_pll_resources *pll = clk->priv;
+	struct mdss_pll_resources *pll = context;
 
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
 		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
 		return rc;
 	}
+	/* In common clock framework the divider value provided is one less */
+	div++;
 
 	pixel_clk_set_div_sub(pll, div);
 	if (pll->slave)
@@ -727,12 +713,11 @@
 	return 0;
 }
 
-static int bit_clk_get_div(struct div_clk *clk)
+static int bit_clk_get_div(void *context, unsigned int reg, unsigned int *div)
 {
 	int rc;
-	struct mdss_pll_resources *pll = clk->priv;
+	struct mdss_pll_resources *pll = context;
 	u32 reg_val;
-	int div;
 
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
@@ -741,11 +726,17 @@
 	}
 
 	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
-	div = (reg_val & 0x0F);
+	*div = (reg_val & 0x0F);
+
+	/* Common clock framework will add one to divider value sent */
+	if (*div == 0)
+		*div = 1;
+	else
+		*div -= 1;
 
 	(void)mdss_pll_resource_enable(pll, false);
 
-	return div;
+	return rc;
 }
 
 static void bit_clk_set_div_sub(struct mdss_pll_resources *rsc, int div)
@@ -758,10 +749,10 @@
 	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG0, reg_val);
 }
 
-static int bit_clk_set_div(struct div_clk *clk, int div)
+static int bit_clk_set_div(void *context, unsigned int reg, unsigned int div)
 {
 	int rc;
-	struct mdss_pll_resources *rsc = clk->priv;
+	struct mdss_pll_resources *rsc = context;
 	struct dsi_pll_8998 *pll;
 
 	if (!rsc) {
@@ -780,6 +771,7 @@
 		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
 		return rc;
 	}
+	div++;
 
 	bit_clk_set_div_sub(rsc, div);
 	/* For slave PLL, this divider always should be set to 1 */
@@ -791,12 +783,12 @@
 	return rc;
 }
 
-static int post_vco_clk_get_div(struct div_clk *clk)
+static int post_vco_clk_get_div(void *context, unsigned int reg,
+			unsigned int *div)
 {
 	int rc;
-	struct mdss_pll_resources *pll = clk->priv;
+	struct mdss_pll_resources *pll = context;
 	u32 reg_val;
-	int div;
 
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
@@ -808,15 +800,20 @@
 	reg_val &= 0x3;
 
 	if (reg_val == 2)
-		div = 1;
+		*div = 1;
 	else if (reg_val == 3)
-		div = 4;
+		*div = 4;
 	else
-		div = 1;
+		*div = 1;
+
+	if (*div == 0)
+		*div = 1;
+	else
+		*div -= 1;
 
 	(void)mdss_pll_resource_enable(pll, false);
 
-	return div;
+	return rc;
 }
 
 static int post_vco_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
@@ -842,10 +839,11 @@
 	return rc;
 }
 
-static int post_vco_clk_set_div(struct div_clk *clk, int div)
+static int post_vco_clk_set_div(void *context, unsigned int reg,
+		unsigned int div)
 {
 	int rc = 0;
-	struct mdss_pll_resources *pll = clk->priv;
+	struct mdss_pll_resources *pll = context;
 
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
@@ -853,6 +851,8 @@
 		return rc;
 	}
 
+	div++;
+
 	rc = post_vco_clk_set_div_sub(pll, div);
 	if (!rc && pll->slave)
 		rc = post_vco_clk_set_div_sub(pll->slave, div);
@@ -862,12 +862,12 @@
 	return rc;
 }
 
-static int post_bit_clk_get_div(struct div_clk *clk)
+static int post_bit_clk_get_div(void *context, unsigned int reg,
+			unsigned int *div)
 {
 	int rc;
-	struct mdss_pll_resources *pll = clk->priv;
+	struct mdss_pll_resources *pll = context;
 	u32 reg_val;
-	int div;
 
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
@@ -879,15 +879,20 @@
 	reg_val &= 0x3;
 
 	if (reg_val == 0)
-		div = 1;
+		*div = 1;
 	else if (reg_val == 1)
-		div = 2;
+		*div = 2;
 	else
-		div = 1;
+		*div = 1;
+
+	if (*div == 0)
+		*div = 1;
+	else
+		*div -= 1;
 
 	(void)mdss_pll_resource_enable(pll, false);
 
-	return div;
+	return rc;
 }
 
 static int post_bit_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
@@ -913,10 +918,11 @@
 	return rc;
 }
 
-static int post_bit_clk_set_div(struct div_clk *clk, int div)
+static int post_bit_clk_set_div(void *context, unsigned int reg,
+		unsigned int div)
 {
 	int rc = 0;
-	struct mdss_pll_resources *pll = clk->priv;
+	struct mdss_pll_resources *pll = context;
 
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
@@ -924,6 +930,8 @@
 		return rc;
 	}
 
+	div++;
+
 	rc = post_bit_clk_set_div_sub(pll, div);
 	if (!rc && pll->slave)
 		rc = post_bit_clk_set_div_sub(pll->slave, div);
@@ -933,57 +941,44 @@
 	return rc;
 }
 
-long vco_8998_round_rate(struct clk *c, unsigned long rate)
-{
-	unsigned long rrate = rate;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-
-	if (rate < vco->min_rate)
-		rrate = vco->min_rate;
-	if (rate > vco->max_rate)
-		rrate = vco->max_rate;
-
-	return rrate;
-}
-
-/* clk ops that require runtime fixup */
-static const struct clk_ops clk_ops_gen_mux_dsi;
-static const struct clk_ops clk_ops_bitclk_src_c;
-static const struct clk_ops clk_ops_post_vco_div_c;
-static const struct clk_ops clk_ops_post_bit_div_c;
-static const struct clk_ops clk_ops_pclk_src_c;
-
-static struct clk_div_ops clk_post_vco_div_ops = {
-	.set_div = post_vco_clk_set_div,
-	.get_div = post_vco_clk_get_div,
+static struct regmap_config dsi_pll_10nm_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0x7c0,
 };
 
-static struct clk_div_ops clk_post_bit_div_ops = {
-	.set_div = post_bit_clk_set_div,
-	.get_div = post_bit_clk_get_div,
+static struct regmap_bus post_vco_regmap_bus = {
+	.reg_write = post_vco_clk_set_div,
+	.reg_read = post_vco_clk_get_div,
 };
 
-static struct clk_div_ops pixel_clk_div_ops = {
-	.set_div = pixel_clk_set_div,
-	.get_div = pixel_clk_get_div,
+static struct regmap_bus post_bit_regmap_bus = {
+	.reg_write = post_bit_clk_set_div,
+	.reg_read = post_bit_clk_get_div,
 };
 
-static struct clk_div_ops clk_bitclk_src_ops = {
-	.set_div = bit_clk_set_div,
-	.get_div = bit_clk_get_div,
+static struct regmap_bus pclk_src_regmap_bus = {
+	.reg_write = pixel_clk_set_div,
+	.reg_read = pixel_clk_get_div,
 };
 
-static const struct clk_ops clk_ops_vco_8998 = {
-	.set_rate = vco_8998_set_rate,
-	.round_rate = vco_8998_round_rate,
-	.handoff = vco_8998_handoff,
-	.prepare = vco_8998_prepare,
-	.unprepare = vco_8998_unprepare,
+static struct regmap_bus bitclk_src_regmap_bus = {
+	.reg_write = bit_clk_set_div,
+	.reg_read = bit_clk_get_div,
 };
 
-static struct clk_mux_ops mdss_mux_ops = {
-	.set_mux_sel = mdss_set_mux_sel,
-	.get_mux_sel = mdss_get_mux_sel,
+static const struct clk_ops clk_ops_vco_10nm = {
+	.recalc_rate = vco_10nm_recalc_rate,
+	.set_rate = vco_10nm_set_rate,
+	.round_rate = vco_10nm_round_rate,
+	.prepare = vco_10nm_prepare,
+	.unprepare = vco_10nm_unprepare,
+};
+
+static struct regmap_bus mdss_mux_regmap_bus = {
+	.reg_write = mdss_set_mux_sel,
+	.reg_read = mdss_get_mux_sel,
 };
 
 /*
@@ -1039,303 +1034,296 @@
 	.ref_clk_rate = 19200000UL,
 	.min_rate = 1500000000UL,
 	.max_rate = 3500000000UL,
-	.c = {
-		.dbg_name = "dsi0pll_vco_clk",
-		.ops = &clk_ops_vco_8998,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_vco_clk.c),
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_vco_clk",
+			.parent_names = (const char *[]){"xo_board"},
+			.num_parents = 1,
+			.ops = &clk_ops_vco_10nm,
+			.flags = CLK_GET_RATE_NOCACHE,
 	},
 };
 
-static struct div_clk dsi0pll_bitclk_src = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 15,
-	},
-	.ops = &clk_bitclk_src_ops,
-	.c = {
-		.parent = &dsi0pll_vco_clk.c,
-		.dbg_name = "dsi0pll_bitclk_src",
-		.ops = &clk_ops_bitclk_src_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_bitclk_src.c),
-	}
-};
-
-static struct div_clk dsi0pll_post_vco_div = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 4,
-	},
-	.ops = &clk_post_vco_div_ops,
-	.c = {
-		.parent = &dsi0pll_vco_clk.c,
-		.dbg_name = "dsi0pll_post_vco_div",
-		.ops = &clk_ops_post_vco_div_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_post_vco_div.c),
-	}
-};
-
-static struct div_clk dsi0pll_post_bit_div = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 2,
-	},
-	.ops = &clk_post_bit_div_ops,
-	.c = {
-		.parent = &dsi0pll_bitclk_src.c,
-		.dbg_name = "dsi0pll_post_bit_div",
-		.ops = &clk_ops_post_bit_div_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_post_bit_div.c),
-	}
-};
-
-static struct mux_clk dsi0pll_pclk_src_mux = {
-	.num_parents = 2,
-	.parents = (struct clk_src[]) {
-		{&dsi0pll_post_bit_div.c, 0},
-		{&dsi0pll_post_vco_div.c, 1},
-	},
-	.ops = &mdss_mux_ops,
-	.c = {
-		.parent = &dsi0pll_post_bit_div.c,
-		.dbg_name = "dsi0pll_pclk_src_mux",
-		.ops = &clk_ops_gen_mux,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_pclk_src_mux.c),
-	}
-};
-
-static struct div_clk dsi0pll_pclk_src = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 15,
-	},
-	.ops = &pixel_clk_div_ops,
-	.c = {
-		.parent = &dsi0pll_pclk_src_mux.c,
-		.dbg_name = "dsi0pll_pclk_src",
-		.ops = &clk_ops_pclk_src_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_pclk_src.c),
-	},
-};
-
-static struct mux_clk dsi0pll_pclk_mux = {
-	.num_parents = 1,
-	.parents = (struct clk_src[]) {
-		{&dsi0pll_pclk_src.c, 0},
-	},
-	.ops = &mdss_mux_ops,
-	.c = {
-		.parent = &dsi0pll_pclk_src.c,
-		.dbg_name = "dsi0pll_pclk_mux",
-		.ops = &clk_ops_gen_mux_dsi,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_pclk_mux.c),
-	}
-};
-
-static struct div_clk dsi0pll_byteclk_src = {
-	.data = {
-		.div = 8,
-		.min_div = 8,
-		.max_div = 8,
-	},
-	.c = {
-		.parent = &dsi0pll_bitclk_src.c,
-		.dbg_name = "dsi0pll_byteclk_src",
-		.ops = &clk_ops_div,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_byteclk_src.c),
-	},
-};
-
-static struct mux_clk dsi0pll_byteclk_mux = {
-	.num_parents = 1,
-	.parents = (struct clk_src[]) {
-		{&dsi0pll_byteclk_src.c, 0},
-	},
-	.ops = &mdss_mux_ops,
-	.c = {
-		.parent = &dsi0pll_byteclk_src.c,
-		.dbg_name = "dsi0pll_byteclk_mux",
-		.ops = &clk_ops_gen_mux_dsi,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_byteclk_mux.c),
-	}
-};
-
 static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
 	.ref_clk_rate = 19200000UL,
 	.min_rate = 1500000000UL,
 	.max_rate = 3500000000UL,
-	.c = {
-		.dbg_name = "dsi1pll_vco_clk",
-		.ops = &clk_ops_vco_8998,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_vco_clk.c),
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_vco_clk",
+			.parent_names = (const char *[]){"xo_board"},
+			.num_parents = 1,
+			.ops = &clk_ops_vco_10nm,
+			.flags = CLK_GET_RATE_NOCACHE,
 	},
 };
 
-static struct div_clk dsi1pll_bitclk_src = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 15,
-	},
-	.ops = &clk_bitclk_src_ops,
-	.c = {
-		.parent = &dsi1pll_vco_clk.c,
-		.dbg_name = "dsi1pll_bitclk_src",
-		.ops = &clk_ops_bitclk_src_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_bitclk_src.c),
-	}
-};
-
-static struct div_clk dsi1pll_post_vco_div = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 4,
-	},
-	.ops = &clk_post_vco_div_ops,
-	.c = {
-		.parent = &dsi1pll_vco_clk.c,
-		.dbg_name = "dsi1pll_post_vco_div",
-		.ops = &clk_ops_post_vco_div_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_post_vco_div.c),
-	}
-};
-
-static struct div_clk dsi1pll_post_bit_div = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 2,
-	},
-	.ops = &clk_post_bit_div_ops,
-	.c = {
-		.parent = &dsi1pll_bitclk_src.c,
-		.dbg_name = "dsi1pll_post_bit_div",
-		.ops = &clk_ops_post_bit_div_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_post_bit_div.c),
-	}
-};
-
-static struct mux_clk dsi1pll_pclk_src_mux = {
-	.num_parents = 2,
-	.parents = (struct clk_src[]) {
-		{&dsi1pll_post_bit_div.c, 0},
-		{&dsi1pll_post_vco_div.c, 1},
-	},
-	.ops = &mdss_mux_ops,
-	.c = {
-		.parent = &dsi1pll_post_bit_div.c,
-		.dbg_name = "dsi1pll_pclk_src_mux",
-		.ops = &clk_ops_gen_mux,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_pclk_src_mux.c),
-	}
-};
-
-static struct div_clk dsi1pll_pclk_src = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 15,
-	},
-	.ops = &pixel_clk_div_ops,
-	.c = {
-		.parent = &dsi1pll_pclk_src_mux.c,
-		.dbg_name = "dsi1pll_pclk_src",
-		.ops = &clk_ops_pclk_src_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_pclk_src.c),
+static struct clk_regmap_div dsi0pll_bitclk_src = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_bitclk_src",
+			.parent_names = (const char *[]){"dsi0pll_vco_clk"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
 	},
 };
 
-static struct mux_clk dsi1pll_pclk_mux = {
-	.num_parents = 1,
-	.parents = (struct clk_src[]) {
-		{&dsi1pll_pclk_src.c, 0},
-	},
-	.ops = &mdss_mux_ops,
-	.c = {
-		.parent = &dsi1pll_pclk_src.c,
-		.dbg_name = "dsi1pll_pclk_mux",
-		.ops = &clk_ops_gen_mux_dsi,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_pclk_mux.c),
-	}
-};
-
-static struct div_clk dsi1pll_byteclk_src = {
-	.data = {
-		.div = 8,
-		.min_div = 8,
-		.max_div = 8,
-	},
-	.c = {
-		.parent = &dsi1pll_bitclk_src.c,
-		.dbg_name = "dsi1pll_byteclk_src",
-		.ops = &clk_ops_div,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_byteclk_src.c),
+static struct clk_regmap_div dsi1pll_bitclk_src = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_bitclk_src",
+			.parent_names = (const char *[]){"dsi1pll_vco_clk"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
 	},
 };
 
-static struct mux_clk dsi1pll_byteclk_mux = {
-	.num_parents = 1,
-	.parents = (struct clk_src[]) {
-		{&dsi1pll_byteclk_src.c, 0},
+static struct clk_regmap_div dsi0pll_post_vco_div = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_post_vco_div",
+			.parent_names = (const char *[]){"dsi0pll_vco_clk"},
+			.num_parents = 1,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_div_ops,
+		},
 	},
-	.ops = &mdss_mux_ops,
-	.c = {
-		.parent = &dsi1pll_byteclk_src.c,
-		.dbg_name = "dsi1pll_byteclk_mux",
-		.ops = &clk_ops_gen_mux_dsi,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_byteclk_mux.c),
-	}
 };
 
-static struct clk_lookup mdss_dsi_pll0cc_8998[] = {
-	CLK_LIST(dsi0pll_byteclk_mux),
-	CLK_LIST(dsi0pll_byteclk_src),
-	CLK_LIST(dsi0pll_pclk_mux),
-	CLK_LIST(dsi0pll_pclk_src),
-	CLK_LIST(dsi0pll_pclk_src_mux),
-	CLK_LIST(dsi0pll_post_bit_div),
-	CLK_LIST(dsi0pll_post_vco_div),
-	CLK_LIST(dsi0pll_bitclk_src),
-	CLK_LIST(dsi0pll_vco_clk),
-};
-static struct clk_lookup mdss_dsi_pll1cc_8998[] = {
-	CLK_LIST(dsi1pll_byteclk_mux),
-	CLK_LIST(dsi1pll_byteclk_src),
-	CLK_LIST(dsi1pll_pclk_mux),
-	CLK_LIST(dsi1pll_pclk_src),
-	CLK_LIST(dsi1pll_pclk_src_mux),
-	CLK_LIST(dsi1pll_post_bit_div),
-	CLK_LIST(dsi1pll_post_vco_div),
-	CLK_LIST(dsi1pll_bitclk_src),
-	CLK_LIST(dsi1pll_vco_clk),
+static struct clk_regmap_div dsi1pll_post_vco_div = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_post_vco_div",
+			.parent_names = (const char *[]){"dsi1pll_vco_clk"},
+			.num_parents = 1,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
 };
 
-int dsi_pll_clock_register_8998(struct platform_device *pdev,
+static struct clk_fixed_factor dsi0pll_byteclk_src = {
+	.div = 8,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_byteclk_src",
+		.parent_names = (const char *[]){"dsi0pll_bitclk_src"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi1pll_byteclk_src = {
+	.div = 8,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_byteclk_src",
+		.parent_names = (const char *[]){"dsi1pll_bitclk_src"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_regmap_div dsi0pll_post_bit_div = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_post_bit_div",
+			.parent_names = (const char *[]){"dsi0pll_bitclk_src"},
+			.num_parents = 1,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi1pll_post_bit_div = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_post_bit_div",
+			.parent_names = (const char *[]){"dsi1pll_bitclk_src"},
+			.num_parents = 1,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi0pll_byteclk_mux = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_byteclk_mux",
+			.parent_names = (const char *[]){"dsi0pll_byteclk_src"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi1pll_byteclk_mux = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_byteclk_mux",
+			.parent_names = (const char *[]){"dsi1pll_byteclk_src"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi0pll_pclk_src_mux = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_pclk_src_mux",
+			.parent_names = (const char *[]){"dsi0pll_post_bit_div",
+						"dsi0pll_post_bit_div"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_pclk_src_mux",
+			.parent_names = (const char *[]){"dsi1pll_post_bit_div",
+						"dsi1pll_post_bit_div"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi0pll_pclk_src = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_pclk_src",
+			.parent_names = (const char *[]){
+					"dsi0pll_pclk_src_mux"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi1pll_pclk_src = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_pclk_src",
+			.parent_names = (const char *[]){
+					"dsi1pll_pclk_src_mux"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi0pll_pclk_mux = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_pclk_mux",
+			.parent_names = (const char *[]){"dsi0pll_pclk_src"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi1pll_pclk_mux = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_pclk_mux",
+			.parent_names = (const char *[]){"dsi1pll_pclk_src"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_hw *mdss_dsi_pllcc_10nm[] = {
+	[VCO_CLK_0] = &dsi0pll_vco_clk.hw,
+	[BITCLK_SRC_0_CLK] = &dsi0pll_bitclk_src.clkr.hw,
+	[BYTECLK_SRC_0_CLK] = &dsi0pll_byteclk_src.hw,
+	[POST_BIT_DIV_0_CLK] = &dsi0pll_post_bit_div.clkr.hw,
+	[POST_VCO_DIV_0_CLK] = &dsi0pll_post_vco_div.clkr.hw,
+	[BYTECLK_MUX_0_CLK] = &dsi0pll_byteclk_mux.clkr.hw,
+	[PCLK_SRC_MUX_0_CLK] = &dsi0pll_pclk_src_mux.clkr.hw,
+	[PCLK_SRC_0_CLK] = &dsi0pll_pclk_src.clkr.hw,
+	[PCLK_MUX_0_CLK] = &dsi0pll_pclk_mux.clkr.hw,
+	[VCO_CLK_1] = &dsi1pll_vco_clk.hw,
+	[BITCLK_SRC_1_CLK] = &dsi1pll_bitclk_src.clkr.hw,
+	[BYTECLK_SRC_1_CLK] = &dsi1pll_byteclk_src.hw,
+	[POST_BIT_DIV_1_CLK] = &dsi1pll_post_bit_div.clkr.hw,
+	[POST_VCO_DIV_1_CLK] = &dsi1pll_post_vco_div.clkr.hw,
+	[BYTECLK_MUX_1_CLK] = &dsi1pll_byteclk_mux.clkr.hw,
+	[PCLK_SRC_MUX_1_CLK] = &dsi1pll_pclk_src_mux.clkr.hw,
+	[PCLK_SRC_1_CLK] = &dsi1pll_pclk_src.clkr.hw,
+	[PCLK_MUX_1_CLK] = &dsi1pll_pclk_mux.clkr.hw,
+
+};
+
+int dsi_pll_clock_register_10nm(struct platform_device *pdev,
 				  struct mdss_pll_resources *pll_res)
 {
-	int rc = 0, ndx;
+	int rc = 0, ndx, i;
+	struct clk *clk;
+	struct clk_onecell_data *clk_data;
+	int num_clks = ARRAY_SIZE(mdss_dsi_pllcc_10nm);
+	struct regmap *rmap;
 
 	if (!pdev || !pdev->dev.of_node ||
 		!pll_res || !pll_res->pll_base || !pll_res->phy_base) {
@@ -1353,62 +1341,120 @@
 	pll_rsc_db[ndx] = pll_res;
 	pll_res->priv = &plls[ndx];
 	plls[ndx].rsc = pll_res;
-
-	/* runtime fixup of all div and mux clock ops */
-	clk_ops_gen_mux_dsi = clk_ops_gen_mux;
-	clk_ops_gen_mux_dsi.round_rate = parent_round_rate;
-	clk_ops_gen_mux_dsi.set_rate = parent_set_rate;
-
-	clk_ops_bitclk_src_c = clk_ops_div;
-	clk_ops_bitclk_src_c.prepare = mdss_pll_div_prepare;
-
-	/*
-	 * Set the ops for the two dividers in the pixel clock tree to the
-	 * slave_div to ensure that a set rate on this divider clock will not
-	 * be propagated to it's parent. This is needed ensure that when we set
-	 * the rate for pixel clock, the vco is not reconfigured
-	 */
-	clk_ops_post_vco_div_c = clk_ops_slave_div;
-	clk_ops_post_vco_div_c.prepare = mdss_pll_div_prepare;
-
-	clk_ops_post_bit_div_c = clk_ops_slave_div;
-	clk_ops_post_bit_div_c.prepare = mdss_pll_div_prepare;
-
-	clk_ops_pclk_src_c = clk_ops_div;
-	clk_ops_pclk_src_c.prepare = mdss_pll_div_prepare;
-
 	pll_res->vco_delay = VCO_DELAY_USEC;
-	if (ndx == 0) {
-		dsi0pll_byteclk_mux.priv = pll_res;
-		dsi0pll_byteclk_src.priv = pll_res;
-		dsi0pll_pclk_mux.priv = pll_res;
-		dsi0pll_pclk_src.priv = pll_res;
-		dsi0pll_pclk_src_mux.priv = pll_res;
-		dsi0pll_post_bit_div.priv = pll_res;
-		dsi0pll_post_vco_div.priv = pll_res;
-		dsi0pll_bitclk_src.priv = pll_res;
-		dsi0pll_vco_clk.priv = pll_res;
 
-		rc = of_msm_clock_register(pdev->dev.of_node,
-			mdss_dsi_pll0cc_8998,
-			ARRAY_SIZE(mdss_dsi_pll0cc_8998));
-	} else {
-		dsi1pll_byteclk_mux.priv = pll_res;
-		dsi1pll_byteclk_src.priv = pll_res;
-		dsi1pll_pclk_mux.priv = pll_res;
-		dsi1pll_pclk_src.priv = pll_res;
-		dsi1pll_pclk_src_mux.priv = pll_res;
-		dsi1pll_post_bit_div.priv = pll_res;
-		dsi1pll_post_vco_div.priv = pll_res;
-		dsi1pll_bitclk_src.priv = pll_res;
-		dsi1pll_vco_clk.priv = pll_res;
+	clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
+					GFP_KERNEL);
+	if (!clk_data)
+		return -ENOMEM;
 
-		rc = of_msm_clock_register(pdev->dev.of_node,
-			mdss_dsi_pll1cc_8998,
-			ARRAY_SIZE(mdss_dsi_pll1cc_8998));
+	clk_data->clks = devm_kzalloc(&pdev->dev, (num_clks *
+				sizeof(struct clk *)), GFP_KERNEL);
+	if (!clk_data->clks) {
+		devm_kfree(&pdev->dev, clk_data);
+		return -ENOMEM;
 	}
-	if (rc)
-		pr_err("dsi%dpll clock register failed, rc=%d\n", ndx, rc);
+	clk_data->clk_num = num_clks;
 
+	/* Establish client data */
+	if (ndx == 0) {
+		rmap = devm_regmap_init(&pdev->dev, &post_vco_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_post_vco_div.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &post_bit_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_post_bit_div.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_bitclk_src.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_pclk_src.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_pclk_mux.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_pclk_src_mux.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_byteclk_mux.clkr.regmap = rmap;
+
+		for (i = VCO_CLK_0; i <= PCLK_MUX_0_CLK; i++) {
+			clk = devm_clk_register(&pdev->dev,
+						mdss_dsi_pllcc_10nm[i]);
+			if (IS_ERR(clk)) {
+				pr_err("clk registration failed for DSI clock:%d\n",
+							pll_res->index);
+				rc = -EINVAL;
+				goto clk_register_fail;
+			}
+			clk_data->clks[i] = clk;
+
+		}
+
+		rc = of_clk_add_provider(pdev->dev.of_node,
+				of_clk_src_onecell_get, clk_data);
+
+
+	} else {
+		rmap = devm_regmap_init(&pdev->dev, &post_vco_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_post_vco_div.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &post_bit_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_post_bit_div.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_bitclk_src.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_pclk_src.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_pclk_mux.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_pclk_src_mux.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_byteclk_mux.clkr.regmap = rmap;
+
+		for (i = VCO_CLK_1; i <= PCLK_MUX_1_CLK; i++) {
+			clk = devm_clk_register(&pdev->dev,
+						mdss_dsi_pllcc_10nm[i]);
+			if (IS_ERR(clk)) {
+				pr_err("clk registration failed for DSI clock:%d\n",
+						pll_res->index);
+				rc = -EINVAL;
+				goto clk_register_fail;
+			}
+			clk_data->clks[i] = clk;
+
+		}
+
+		rc = of_clk_add_provider(pdev->dev.of_node,
+				of_clk_src_onecell_get, clk_data);
+	}
+	if (!rc) {
+		pr_info("Registered DSI PLL ndx=%d, clocks successfully", ndx);
+
+		return rc;
+	}
+clk_register_fail:
+	devm_kfree(&pdev->dev, clk_data->clks);
+	devm_kfree(&pdev->dev, clk_data);
 	return rc;
 }
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll.h b/drivers/clk/qcom/mdss/mdss-dsi-pll.h
index 286c99e..7fc38a2 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,6 +13,8 @@
 #ifndef __MDSS_DSI_PLL_H
 #define __MDSS_DSI_PLL_H
 
+#include <linux/clk-provider.h>
+#include "mdss-pll.h"
 #define MAX_DSI_PLL_EN_SEQS	10
 
 #define DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG		(0x0020)
@@ -31,6 +33,7 @@
 };
 
 struct dsi_pll_vco_clk {
+	struct clk_hw	hw;
 	unsigned long	ref_clk_rate;
 	unsigned long	min_rate;
 	unsigned long	max_rate;
@@ -38,73 +41,16 @@
 	struct lpfr_cfg *lpfr_lut;
 	u32		lpfr_lut_size;
 	void		*priv;
-
-	struct clk	c;
-
 	int (*pll_enable_seqs[MAX_DSI_PLL_EN_SEQS])
 			(struct mdss_pll_resources *dsi_pll_Res);
 };
 
-static inline struct dsi_pll_vco_clk *to_vco_clk(struct clk *clk)
+int dsi_pll_clock_register_10nm(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+
+static inline struct dsi_pll_vco_clk *to_vco_clk_hw(struct clk_hw *hw)
 {
-	return container_of(clk, struct dsi_pll_vco_clk, c);
+	return container_of(hw, struct dsi_pll_vco_clk, hw);
 }
 
-int dsi_pll_clock_register_hpm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-int dsi_pll_clock_register_20nm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-int dsi_pll_clock_register_lpm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-int dsi_pll_clock_register_8996(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-int dsi_pll_clock_register_8998(struct platform_device *pdev,
-				  struct mdss_pll_resources *pll_res);
-
-int set_byte_mux_sel(struct mux_clk *clk, int sel);
-int get_byte_mux_sel(struct mux_clk *clk);
-int dsi_pll_mux_prepare(struct clk *c);
-int fixed_4div_set_div(struct div_clk *clk, int div);
-int fixed_4div_get_div(struct div_clk *clk);
-int digital_set_div(struct div_clk *clk, int div);
-int digital_get_div(struct div_clk *clk);
-int analog_set_div(struct div_clk *clk, int div);
-int analog_get_div(struct div_clk *clk);
-int dsi_pll_lock_status(struct mdss_pll_resources *dsi_pll_res);
-int vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate);
-unsigned long vco_get_rate(struct clk *c);
-long vco_round_rate(struct clk *c, unsigned long rate);
-enum handoff vco_handoff(struct clk *c);
-int vco_prepare(struct clk *c);
-void vco_unprepare(struct clk *c);
-
-/* APIs for 20nm PHY PLL */
-int pll_20nm_vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate);
-int shadow_pll_20nm_vco_set_rate(struct dsi_pll_vco_clk *vco,
-				unsigned long rate);
-long pll_20nm_vco_round_rate(struct clk *c, unsigned long rate);
-enum handoff pll_20nm_vco_handoff(struct clk *c);
-int pll_20nm_vco_prepare(struct clk *c);
-void pll_20nm_vco_unprepare(struct clk *c);
-int pll_20nm_vco_enable_seq(struct mdss_pll_resources *dsi_pll_res);
-
-int set_bypass_lp_div_mux_sel(struct mux_clk *clk, int sel);
-int set_shadow_bypass_lp_div_mux_sel(struct mux_clk *clk, int sel);
-int get_bypass_lp_div_mux_sel(struct mux_clk *clk);
-int fixed_hr_oclk2_set_div(struct div_clk *clk, int div);
-int shadow_fixed_hr_oclk2_set_div(struct div_clk *clk, int div);
-int fixed_hr_oclk2_get_div(struct div_clk *clk);
-int hr_oclk3_set_div(struct div_clk *clk, int div);
-int shadow_hr_oclk3_set_div(struct div_clk *clk, int div);
-int hr_oclk3_get_div(struct div_clk *clk);
-int ndiv_set_div(struct div_clk *clk, int div);
-int shadow_ndiv_set_div(struct div_clk *clk, int div);
-int ndiv_get_div(struct div_clk *clk);
-void __dsi_pll_disable(void __iomem *pll_base);
-
-int set_mdss_pixel_mux_sel(struct mux_clk *clk, int sel);
-int get_mdss_pixel_mux_sel(struct mux_clk *clk);
-int set_mdss_byte_mux_sel(struct mux_clk *clk, int sel);
-int get_mdss_byte_mux_sel(struct mux_clk *clk);
-
 #endif
diff --git a/drivers/clk/qcom/mdss/mdss-pll-util.c b/drivers/clk/qcom/mdss/mdss-pll-util.c
index 690c53f..4d79772 100644
--- a/drivers/clk/qcom/mdss/mdss-pll-util.c
+++ b/drivers/clk/qcom/mdss/mdss-pll-util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,7 +16,6 @@
 #include <linux/kernel.h>
 #include <linux/err.h>
 #include <linux/string.h>
-#include <linux/clk/msm-clock-generic.h>
 #include <linux/of_address.h>
 #include <linux/dma-mapping.h>
 #include <linux/vmalloc.h>
diff --git a/drivers/clk/qcom/mdss/mdss-pll.c b/drivers/clk/qcom/mdss/mdss-pll.c
index c22fa80..0a0d303 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.c
+++ b/drivers/clk/qcom/mdss/mdss-pll.c
@@ -19,12 +19,8 @@
 #include <linux/err.h>
 #include <linux/delay.h>
 #include <linux/iopoll.h>
-#include <linux/clk/msm-clock-generic.h>
-
 #include "mdss-pll.h"
 #include "mdss-dsi-pll.h"
-#include "mdss-hdmi-pll.h"
-#include "mdss-dp-pll.h"
 
 int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable)
 {
@@ -128,32 +124,10 @@
 		goto err;
 	}
 
-	if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8996")) {
-		pll_res->pll_interface_type = MDSS_DSI_PLL_8996;
-		pll_res->target_id = MDSS_PLL_TARGET_8996;
-		pll_res->revision = 1;
-	} else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8996_v2")) {
-		pll_res->pll_interface_type = MDSS_DSI_PLL_8996;
-		pll_res->target_id = MDSS_PLL_TARGET_8996;
-		pll_res->revision = 2;
-	} else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8998")) {
-		pll_res->pll_interface_type = MDSS_DSI_PLL_8998;
-	} else if (!strcmp(compatible_stream, "qcom,mdss_dp_pll_8998")) {
-		pll_res->pll_interface_type = MDSS_DP_PLL_8998;
-	} else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8996")) {
-		pll_res->pll_interface_type = MDSS_HDMI_PLL_8996;
-	} else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8996_v2")) {
-		pll_res->pll_interface_type = MDSS_HDMI_PLL_8996_V2;
-	} else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8996_v3")) {
-		pll_res->pll_interface_type = MDSS_HDMI_PLL_8996_V3;
-	} else if (!strcmp(compatible_stream,
-				"qcom,mdss_hdmi_pll_8996_v3_1p8")) {
-		pll_res->pll_interface_type = MDSS_HDMI_PLL_8996_V3_1_8;
-	} else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8998")) {
-		pll_res->pll_interface_type = MDSS_HDMI_PLL_8998;
-	} else {
+	if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_10nm"))
+		pll_res->pll_interface_type = MDSS_DSI_PLL_10NM;
+	else
 		goto err;
-	}
 
 	return rc;
 
@@ -174,29 +148,8 @@
 	}
 
 	switch (pll_res->pll_interface_type) {
-	case MDSS_DSI_PLL_8996:
-		rc = dsi_pll_clock_register_8996(pdev, pll_res);
-		break;
-	case MDSS_DSI_PLL_8998:
-		rc = dsi_pll_clock_register_8998(pdev, pll_res);
-	case MDSS_DP_PLL_8998:
-		rc = dp_pll_clock_register_8998(pdev, pll_res);
-		break;
-	case MDSS_HDMI_PLL_8996:
-		rc = hdmi_8996_v1_pll_clock_register(pdev, pll_res);
-		break;
-	case MDSS_HDMI_PLL_8996_V2:
-		rc = hdmi_8996_v2_pll_clock_register(pdev, pll_res);
-		break;
-	case MDSS_HDMI_PLL_8996_V3:
-		rc = hdmi_8996_v3_pll_clock_register(pdev, pll_res);
-		break;
-	case MDSS_HDMI_PLL_8996_V3_1_8:
-		rc = hdmi_8996_v3_1p8_pll_clock_register(pdev, pll_res);
-		break;
-	case MDSS_HDMI_PLL_8998:
-		rc = hdmi_8998_pll_clock_register(pdev, pll_res);
-		break;
+	case MDSS_DSI_PLL_10NM:
+		rc = dsi_pll_clock_register_10nm(pdev, pll_res);
 	case MDSS_UNKNOWN_PLL:
 	default:
 		rc = -EINVAL;
@@ -392,15 +345,7 @@
 }
 
 static const struct of_device_id mdss_pll_dt_match[] = {
-	{.compatible = "qcom,mdss_dsi_pll_8996"},
-	{.compatible = "qcom,mdss_dsi_pll_8996_v2"},
-	{.compatible = "qcom,mdss_dsi_pll_8998"},
-	{.compatible = "qcom,mdss_hdmi_pll_8996"},
-	{.compatible = "qcom,mdss_hdmi_pll_8996_v2"},
-	{.compatible = "qcom,mdss_hdmi_pll_8996_v3"},
-	{.compatible = "qcom,mdss_hdmi_pll_8996_v3_1p8"},
-	{.compatible = "qcom,mdss_dp_pll_8998"},
-	{.compatible = "qcom,mdss_hdmi_pll_8998"},
+	{.compatible = "qcom,mdss_dsi_pll_10nm"},
 	{}
 };
 
diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h
index 48dddf6..28b7ca6 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-pll.h
@@ -12,10 +12,16 @@
 
 #ifndef __MDSS_PLL_H
 #define __MDSS_PLL_H
-
-#include <linux/mdss_io_util.h>
-#include <linux/clk/msm-clock-generic.h>
+#include <linux/sde_io_util.h>
+#include <linux/clk-provider.h>
 #include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/regmap.h>
+#include "../clk-regmap.h"
+#include "../clk-regmap-divider.h"
+#include "../clk-regmap-mux.h"
+
 
 #define MDSS_PLL_REG_W(base, offset, data)	\
 				writel_relaxed((data), (base) + (offset))
@@ -30,14 +36,7 @@
 			(base) + (offset))
 
 enum {
-	MDSS_DSI_PLL_8996,
-	MDSS_DSI_PLL_8998,
-	MDSS_DP_PLL_8998,
-	MDSS_HDMI_PLL_8996,
-	MDSS_HDMI_PLL_8996_V2,
-	MDSS_HDMI_PLL_8996_V3,
-	MDSS_HDMI_PLL_8996_V3_1_8,
-	MDSS_HDMI_PLL_8998,
+	MDSS_DSI_PLL_10NM,
 	MDSS_UNKNOWN_PLL,
 };
 
@@ -200,20 +199,24 @@
 		(!(readl_relaxed(pll_res->gdsc_base) & BIT(0)))) ? false : true;
 }
 
-static inline int mdss_pll_div_prepare(struct clk *c)
+static inline int mdss_pll_div_prepare(struct clk_hw *hw)
 {
-	struct div_clk *div = to_div_clk(c);
+	struct clk_hw *parent_hw = clk_hw_get_parent(hw);
 	/* Restore the divider's value */
-	return div->ops->set_div(div, div->data.div);
+	return hw->init->ops->set_rate(hw, clk_hw_get_rate(hw),
+				clk_hw_get_rate(parent_hw));
 }
 
-static inline int mdss_set_mux_sel(struct mux_clk *clk, int sel)
+static inline int mdss_set_mux_sel(void *context, unsigned int reg,
+					unsigned int val)
 {
 	return 0;
 }
 
-static inline int mdss_get_mux_sel(struct mux_clk *clk)
+static inline int mdss_get_mux_sel(void *context, unsigned int reg,
+					unsigned int *val)
 {
+	*val = 0;
 	return 0;
 }
 
diff --git a/drivers/clk/qcom/vdd-level-sdm845.h b/drivers/clk/qcom/vdd-level-sdm845.h
index 5be7a28..a8d08b3 100644
--- a/drivers/clk/qcom/vdd-level-sdm845.h
+++ b/drivers/clk/qcom/vdd-level-sdm845.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -90,6 +90,30 @@
 	},					\
 	.num_rate_max = VDD_CX_NUM
 
+#define VDD_MX_FMAX_MAP4(l1, f1, l2, f2, l3, f3, l4, f4) \
+	.vdd_class = &vdd_mx,			\
+	.rate_max = (unsigned long[VDD_CX_NUM]) {	\
+		[VDD_CX_##l1] = (f1),		\
+		[VDD_CX_##l2] = (f2),		\
+		[VDD_CX_##l3] = (f3),		\
+		[VDD_CX_##l4] = (f4),		\
+	},					\
+	.num_rate_max = VDD_CX_NUM
+
+#define VDD_GX_FMAX_MAP8(l1, f1, l2, f2, l3, f3, l4, f4, l5, f5, l6, f6, \
+				l7, f7, l8, f8) \
+	.vdd_class = &vdd_gfx,			\
+	.rate_max = (unsigned long[VDD_GX_NUM]) {	\
+		[VDD_GX_##l1] = (f1),		\
+		[VDD_GX_##l2] = (f2),		\
+		[VDD_GX_##l3] = (f3),		\
+		[VDD_GX_##l4] = (f4),		\
+		[VDD_GX_##l5] = (f5),		\
+		[VDD_GX_##l6] = (f6),		\
+		[VDD_GX_##l7] = (f7),		\
+		[VDD_GX_##l8] = (f8),		\
+	},					\
+	.num_rate_max = VDD_GX_NUM
 
 enum vdd_cx_levels {
 	VDD_CX_NONE,
@@ -102,6 +126,19 @@
 	VDD_CX_NUM,
 };
 
+enum vdd_gx_levels {
+	VDD_GX_NONE,
+	VDD_GX_MIN,		/* MIN SVS */
+	VDD_GX_LOWER,		/* SVS2 */
+	VDD_GX_LOW,		/* SVS */
+	VDD_GX_LOW_L1,		/* SVSL1 */
+	VDD_GX_NOMINAL,		/* NOM */
+	VDD_GX_NOMINAL_L1,		/* NOM1 */
+	VDD_GX_HIGH,		/* TURBO */
+	VDD_GX_HIGH_L1,		/* TURBO1 */
+	VDD_GX_NUM,
+};
+
 /* Need to use the correct VI/VL mappings */
 static int vdd_corner[] = {
 	RPMH_REGULATOR_LEVEL_OFF,		/* VDD_CX_NONE */
diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
index 0e9cf88..8b63979 100644
--- a/drivers/clk/qcom/videocc-sdm845.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -83,13 +83,11 @@
 			.parent_names = (const char *[]){ "bi_tcxo" },
 			.num_parents = 1,
 			.ops = &clk_fabia_pll_ops,
-			VDD_CX_FMAX_MAP5(
-				MIN, 200000000,
-				LOW, 640000000,
-				LOW_L1, 760000000,
-				NOMINAL, 1332000000,
-				HIGH, 1599000000),
-
+			VDD_CX_FMAX_MAP4(
+				MIN, 615000000,
+				LOW, 1066000000,
+				LOW_L1, 1600000000,
+				NOMINAL, 2000000000),
 		},
 	},
 };
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index fc75a33..8ca07fe 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -608,7 +608,7 @@
 				 0x150, 0, 4, 24, 2, BIT(31),
 				 CLK_SET_RATE_PARENT);
 
-static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(31), 0);
+static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(30), 0);
 
 static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0);
 
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index ebb1b31..ee78104 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -85,6 +85,10 @@
 	unsigned int m, p;
 	u32 reg;
 
+	/* Adjust parent_rate according to pre-dividers */
+	ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux,
+						-1, &parent_rate);
+
 	reg = readl(cmp->common.base + cmp->common.reg);
 
 	m = reg >> cmp->m.shift;
@@ -114,6 +118,10 @@
 	unsigned int m, p;
 	u32 reg;
 
+	/* Adjust parent_rate according to pre-dividers */
+	ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux,
+						-1, &parent_rate);
+
 	max_m = cmp->m.max ?: 1 << cmp->m.width;
 	max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
 
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 884e557..f18dccf 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -113,7 +113,6 @@
 config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
 	bool "interactive"
 	select CPU_FREQ_GOV_INTERACTIVE
-	select CPU_FREQ_GOV_PERFORMANCE
 	help
 	  Use the CPUFreq governor 'interactive' as default. This allows
 	  you to get a full dynamic cpu frequency capable system by simply
@@ -187,6 +186,23 @@
 
 	  If in doubt, say N.
 
+config CPU_FREQ_GOV_INTERACTIVE
+	tristate "'interactive' cpufreq policy governor"
+	help
+	  'interactive' - This driver adds a dynamic cpufreq policy governor
+	  designed for latency-sensitive workloads.
+
+	  This governor attempts to reduce the latency of clock
+	  increases so that the system is more responsive to
+	  interactive workloads.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called cpufreq_interactive.
+
+	  For details, take a look at linux/Documentation/cpu-freq.
+
+	  If in doubt, say N.
+
 config CPU_FREQ_GOV_CONSERVATIVE
 	tristate "'conservative' cpufreq governor"
 	depends on CPU_FREQ
@@ -211,6 +227,17 @@
 
 	  If in doubt, say N.
 
+config CPU_BOOST
+	tristate "Event base short term CPU freq boost"
+	depends on CPU_FREQ
+	help
+	  This driver boosts the frequency of one or more CPUs based on
+	  various events that might occur in the system. As of now, the
+	  events it reacts to are:
+	  - Migration of important threads from one CPU to another.
+
+	  If in doubt, say N.
+
 config CPU_FREQ_GOV_SCHED
 	bool "'sched' cpufreq governor"
 	depends on CPU_FREQ
@@ -224,26 +251,6 @@
 
 	  If in doubt, say N.
 
-config CPU_FREQ_GOV_INTERACTIVE
-	tristate "'interactive' cpufreq policy governor"
-	depends on CPU_FREQ
-	select CPU_FREQ_GOV_ATTR_SET
-	select IRQ_WORK
-	help
-	  'interactive' - This driver adds a dynamic cpufreq policy governor
-	  designed for latency-sensitive workloads.
-
-	  This governor attempts to reduce the latency of clock
-	  increases so that the system is more responsive to
-	  interactive workloads.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called cpufreq_interactive.
-
-	  For details, take a look at linux/Documentation/cpu-freq.
-
-	  If in doubt, say N.
-
 config CPU_FREQ_GOV_SCHEDUTIL
 	bool "'schedutil' cpufreq policy governor"
 	depends on CPU_FREQ && SMP
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index d89b8af..e2023bd 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -258,3 +258,9 @@
 	  support for its operation.
 
 	  If in doubt, say N.
+
+config CPU_FREQ_MSM
+	bool "MSM CPUFreq support"
+	depends on CPU_FREQ
+	help
+	  This enables the CPUFreq driver for Qualcomm Technologies, Inc. CPUs.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index f0c9905..bf98b28 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -13,6 +13,7 @@
 obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE)	+= cpufreq_interactive.o
 obj-$(CONFIG_CPU_FREQ_GOV_COMMON)		+= cpufreq_governor.o
 obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET)	+= cpufreq_governor_attr_set.o
+obj-$(CONFIG_CPU_BOOST)			+= cpu-boost.o
 
 obj-$(CONFIG_CPUFREQ_DT)		+= cpufreq-dt.o
 obj-$(CONFIG_CPUFREQ_DT_PLATDEV)	+= cpufreq-dt-platdev.o
@@ -60,6 +61,7 @@
 obj-$(CONFIG_ARM_INTEGRATOR)		+= integrator-cpufreq.o
 obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ)	+= kirkwood-cpufreq.o
 obj-$(CONFIG_ARM_MT8173_CPUFREQ)	+= mt8173-cpufreq.o
+obj-$(CONFIG_CPU_FREQ_MSM)              += qcom-cpufreq.o
 obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ)	+= omap-cpufreq.o
 obj-$(CONFIG_ARM_PXA2xx_CPUFREQ)	+= pxa2xx-cpufreq.o
 obj-$(CONFIG_PXA3xx)			+= pxa3xx-cpufreq.o
@@ -82,7 +84,6 @@
 obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
 obj-$(CONFIG_MACH_MVEBU_V7)		+= mvebu-cpufreq.o
 
-
 ##################################################################################
 # PowerPC platform drivers
 obj-$(CONFIG_CPU_FREQ_CBE)		+= ppc-cbe-cpufreq.o
diff --git a/drivers/cpufreq/cpu-boost.c b/drivers/cpufreq/cpu-boost.c
new file mode 100644
index 0000000..07603fe
--- /dev/null
+++ b/drivers/cpufreq/cpu-boost.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "cpu-boost: " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/cpu.h>
+#include <linux/sched.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/time.h>
+
+struct cpu_sync {
+	int cpu;
+	unsigned int input_boost_min;
+	unsigned int input_boost_freq;
+};
+
+static DEFINE_PER_CPU(struct cpu_sync, sync_info);
+static struct workqueue_struct *cpu_boost_wq;
+
+static struct work_struct input_boost_work;
+
+static bool input_boost_enabled;
+
+static unsigned int input_boost_ms = 40;
+module_param(input_boost_ms, uint, 0644);
+
+static bool sched_boost_on_input;
+module_param(sched_boost_on_input, bool, 0644);
+
+static bool sched_boost_active;
+
+static struct delayed_work input_boost_rem;
+static u64 last_input_time;
+#define MIN_INPUT_INTERVAL (150 * USEC_PER_MSEC)
+
+static int set_input_boost_freq(const char *buf, const struct kernel_param *kp)
+{
+	int i, ntokens = 0;
+	unsigned int val, cpu;
+	const char *cp = buf;
+	bool enabled = false;
+
+	while ((cp = strpbrk(cp + 1, " :")))
+		ntokens++;
+
+	/* single number: apply to all CPUs */
+	if (!ntokens) {
+		if (sscanf(buf, "%u\n", &val) != 1)
+			return -EINVAL;
+		for_each_possible_cpu(i)
+			per_cpu(sync_info, i).input_boost_freq = val;
+		goto check_enable;
+	}
+
+	/* CPU:value pair */
+	if (!(ntokens % 2))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < ntokens; i += 2) {
+		if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
+			return -EINVAL;
+		if (cpu > num_possible_cpus())
+			return -EINVAL;
+
+		per_cpu(sync_info, cpu).input_boost_freq = val;
+		cp = strchr(cp, ' ');
+		cp++;
+	}
+
+check_enable:
+	for_each_possible_cpu(i) {
+		if (per_cpu(sync_info, i).input_boost_freq) {
+			enabled = true;
+			break;
+		}
+	}
+	input_boost_enabled = enabled;
+
+	return 0;
+}
+
+static int get_input_boost_freq(char *buf, const struct kernel_param *kp)
+{
+	int cnt = 0, cpu;
+	struct cpu_sync *s;
+
+	for_each_possible_cpu(cpu) {
+		s = &per_cpu(sync_info, cpu);
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"%d:%u ", cpu, s->input_boost_freq);
+	}
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_input_boost_freq = {
+	.set = set_input_boost_freq,
+	.get = get_input_boost_freq,
+};
+module_param_cb(input_boost_freq, &param_ops_input_boost_freq, NULL, 0644);
+
+/*
+ * The CPUFREQ_ADJUST notifier is used to override the current policy min to
+ * make sure policy min >= boost_min. The cpufreq framework then does the job
+ * of enforcing the new policy.
+ */
+static int boost_adjust_notify(struct notifier_block *nb, unsigned long val,
+				void *data)
+{
+	struct cpufreq_policy *policy = data;
+	unsigned int cpu = policy->cpu;
+	struct cpu_sync *s = &per_cpu(sync_info, cpu);
+	unsigned int ib_min = s->input_boost_min;
+
+	switch (val) {
+	case CPUFREQ_ADJUST:
+		if (!ib_min)
+			break;
+
+		pr_debug("CPU%u policy min before boost: %u kHz\n",
+			 cpu, policy->min);
+		pr_debug("CPU%u boost min: %u kHz\n", cpu, ib_min);
+
+		cpufreq_verify_within_limits(policy, ib_min, UINT_MAX);
+
+		pr_debug("CPU%u policy min after boost: %u kHz\n",
+			 cpu, policy->min);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block boost_adjust_nb = {
+	.notifier_call = boost_adjust_notify,
+};
+
+static void update_policy_online(void)
+{
+	unsigned int i;
+
+	/* Re-evaluate policy to trigger adjust notifier for online CPUs */
+	get_online_cpus();
+	for_each_online_cpu(i) {
+		pr_debug("Updating policy for CPU%d\n", i);
+		cpufreq_update_policy(i);
+	}
+	put_online_cpus();
+}
+
+static void do_input_boost_rem(struct work_struct *work)
+{
+	unsigned int i, ret;
+	struct cpu_sync *i_sync_info;
+
+	/* Reset the input_boost_min for all CPUs in the system */
+	pr_debug("Resetting input boost min for all CPUs\n");
+	for_each_possible_cpu(i) {
+		i_sync_info = &per_cpu(sync_info, i);
+		i_sync_info->input_boost_min = 0;
+	}
+
+	/* Update policies for all online CPUs */
+	update_policy_online();
+
+	if (sched_boost_active) {
+		ret = sched_set_boost(0);
+		if (ret)
+			pr_err("cpu-boost: HMP boost disable failed\n");
+		sched_boost_active = false;
+	}
+}
+
+static void do_input_boost(struct work_struct *work)
+{
+	unsigned int i, ret;
+	struct cpu_sync *i_sync_info;
+
+	cancel_delayed_work_sync(&input_boost_rem);
+	if (sched_boost_active) {
+		sched_set_boost(0);
+		sched_boost_active = false;
+	}
+
+	/* Set the input_boost_min for all CPUs in the system */
+	pr_debug("Setting input boost min for all CPUs\n");
+	for_each_possible_cpu(i) {
+		i_sync_info = &per_cpu(sync_info, i);
+		i_sync_info->input_boost_min = i_sync_info->input_boost_freq;
+	}
+
+	/* Update policies for all online CPUs */
+	update_policy_online();
+
+	/* Enable scheduler boost to migrate tasks to big cluster */
+	if (sched_boost_on_input) {
+		ret = sched_set_boost(1);
+		if (ret)
+			pr_err("cpu-boost: HMP boost enable failed\n");
+		else
+			sched_boost_active = true;
+	}
+
+	queue_delayed_work(cpu_boost_wq, &input_boost_rem,
+					msecs_to_jiffies(input_boost_ms));
+}
+
+static void cpuboost_input_event(struct input_handle *handle,
+		unsigned int type, unsigned int code, int value)
+{
+	u64 now;
+
+	if (!input_boost_enabled)
+		return;
+
+	now = ktime_to_us(ktime_get());
+	if (now - last_input_time < MIN_INPUT_INTERVAL)
+		return;
+
+	if (work_pending(&input_boost_work))
+		return;
+
+	queue_work(cpu_boost_wq, &input_boost_work);
+	last_input_time = ktime_to_us(ktime_get());
+}
+
+static int cpuboost_input_connect(struct input_handler *handler,
+		struct input_dev *dev, const struct input_device_id *id)
+{
+	struct input_handle *handle;
+	int error;
+
+	handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->dev = dev;
+	handle->handler = handler;
+	handle->name = "cpufreq";
+
+	error = input_register_handle(handle);
+	if (error)
+		goto err2;
+
+	error = input_open_device(handle);
+	if (error)
+		goto err1;
+
+	return 0;
+err1:
+	input_unregister_handle(handle);
+err2:
+	kfree(handle);
+	return error;
+}
+
+static void cpuboost_input_disconnect(struct input_handle *handle)
+{
+	input_close_device(handle);
+	input_unregister_handle(handle);
+	kfree(handle);
+}
+
+static const struct input_device_id cpuboost_ids[] = {
+	/* multi-touch touchscreen */
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+			INPUT_DEVICE_ID_MATCH_ABSBIT,
+		.evbit = { BIT_MASK(EV_ABS) },
+		.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+			BIT_MASK(ABS_MT_POSITION_X) |
+			BIT_MASK(ABS_MT_POSITION_Y) },
+	},
+	/* touchpad */
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
+			INPUT_DEVICE_ID_MATCH_ABSBIT,
+		.keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
+		.absbit = { [BIT_WORD(ABS_X)] =
+			BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
+	},
+	/* Keypad */
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+		.evbit = { BIT_MASK(EV_KEY) },
+	},
+	{ },
+};
+
+static struct input_handler cpuboost_input_handler = {
+	.event          = cpuboost_input_event,
+	.connect        = cpuboost_input_connect,
+	.disconnect     = cpuboost_input_disconnect,
+	.name           = "cpu-boost",
+	.id_table       = cpuboost_ids,
+};
+
+static int cpu_boost_init(void)
+{
+	int cpu, ret;
+	struct cpu_sync *s;
+
+	cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0);
+	if (!cpu_boost_wq)
+		return -EFAULT;
+
+	INIT_WORK(&input_boost_work, do_input_boost);
+	INIT_DELAYED_WORK(&input_boost_rem, do_input_boost_rem);
+
+	for_each_possible_cpu(cpu) {
+		s = &per_cpu(sync_info, cpu);
+		s->cpu = cpu;
+	}
+	cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER);
+
+	ret = input_register_handler(&cpuboost_input_handler);
+	return 0;
+}
+late_initcall(cpu_boost_init);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 019e817..66e604e 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -93,6 +93,7 @@
  */
 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
 static struct srcu_notifier_head cpufreq_transition_notifier_list;
+struct atomic_notifier_head cpufreq_govinfo_notifier_list;
 
 static bool init_cpufreq_transition_notifier_list_called;
 static int __init init_cpufreq_transition_notifier_list(void)
@@ -103,6 +104,15 @@
 }
 pure_initcall(init_cpufreq_transition_notifier_list);
 
+static bool init_cpufreq_govinfo_notifier_list_called;
+static int __init init_cpufreq_govinfo_notifier_list(void)
+{
+	ATOMIC_INIT_NOTIFIER_HEAD(&cpufreq_govinfo_notifier_list);
+	init_cpufreq_govinfo_notifier_list_called = true;
+	return 0;
+}
+pure_initcall(init_cpufreq_govinfo_notifier_list);
+
 static int off __read_mostly;
 static int cpufreq_disabled(void)
 {
@@ -742,9 +752,11 @@
 					char *buf)
 {
 	unsigned int cur_freq = __cpufreq_get(policy);
-	if (!cur_freq)
-		return sprintf(buf, "<unknown>");
-	return sprintf(buf, "%u\n", cur_freq);
+
+	if (cur_freq)
+		return sprintf(buf, "%u\n", cur_freq);
+
+	return sprintf(buf, "<unknown>\n");
 }
 
 /**
@@ -1078,7 +1090,8 @@
 	if (has_target()) {
 		ret = cpufreq_start_governor(policy);
 		if (ret)
-			pr_err("%s: Failed to start governor\n", __func__);
+			pr_err("%s: Failed to start governor for CPU%u, policy CPU%u\n",
+			       __func__, cpu, policy->cpu);
 	}
 	up_write(&policy->rwsem);
 	return ret;
@@ -1250,6 +1263,9 @@
 		for_each_cpu(j, policy->related_cpus)
 			per_cpu(cpufreq_cpu_data, j) = policy;
 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+	} else {
+		policy->min = policy->user_policy.min;
+		policy->max = policy->user_policy.max;
 	}
 
 	if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
@@ -1776,7 +1792,8 @@
 	if (cpufreq_disabled())
 		return -EINVAL;
 
-	WARN_ON(!init_cpufreq_transition_notifier_list_called);
+	WARN_ON(!init_cpufreq_transition_notifier_list_called ||
+		!init_cpufreq_govinfo_notifier_list_called);
 
 	switch (list) {
 	case CPUFREQ_TRANSITION_NOTIFIER:
@@ -1797,6 +1814,10 @@
 		ret = blocking_notifier_chain_register(
 				&cpufreq_policy_notifier_list, nb);
 		break;
+	case CPUFREQ_GOVINFO_NOTIFIER:
+		ret = atomic_notifier_chain_register(
+				&cpufreq_govinfo_notifier_list, nb);
+		break;
 	default:
 		ret = -EINVAL;
 	}
@@ -1837,6 +1858,10 @@
 		ret = blocking_notifier_chain_unregister(
 				&cpufreq_policy_notifier_list, nb);
 		break;
+	case CPUFREQ_GOVINFO_NOTIFIER:
+		ret = atomic_notifier_chain_unregister(
+				&cpufreq_govinfo_notifier_list, nb);
+		break;
 	default:
 		ret = -EINVAL;
 	}
@@ -1980,15 +2005,6 @@
 	pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
 		 policy->cpu, target_freq, relation, old_target_freq);
 
-	/*
-	 * This might look like a redundant call as we are checking it again
-	 * after finding index. But it is left intentionally for cases where
-	 * exactly same freq is called again and so we can save on few function
-	 * calls.
-	 */
-	if (target_freq == policy->cur)
-		return 0;
-
 	/* Save last value to restore later on errors */
 	policy->restore_freq = policy->cur;
 
@@ -2533,7 +2549,7 @@
 	hp_online = ret;
 	ret = 0;
 
-	pr_debug("driver %s up and running\n", driver_data->name);
+	pr_info("driver %s up and running\n", driver_data->name);
 	goto out;
 
 err_if_unreg:
@@ -2565,7 +2581,7 @@
 	if (!cpufreq_driver || (driver != cpufreq_driver))
 		return -EINVAL;
 
-	pr_debug("unregistering driver %s\n", driver->name);
+	pr_info("unregistering driver %s\n", driver->name);
 
 	/* Protect against concurrent cpu hotplug */
 	get_online_cpus();
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index d6cac0e..12eb6d8 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -1,7 +1,7 @@
 /*
  * drivers/cpufreq/cpufreq_interactive.c
  *
- * Copyright (C) 2010-2016 Google, Inc.
+ * Copyright (C) 2010 Google, Inc.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -13,14 +13,12 @@
  * GNU General Public License for more details.
  *
  * Author: Mike Chan (mike@android.com)
+ *
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
 #include <linux/cpufreq.h>
-#include <linux/irq_work.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/rwsem.h>
@@ -29,50 +27,96 @@
 #include <linux/tick.h>
 #include <linux/time.h>
 #include <linux/timer.h>
+#include <linux/hrtimer.h>
+#include <linux/workqueue.h>
 #include <linux/kthread.h>
 #include <linux/slab.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/cpufreq_interactive.h>
 
-#define gov_attr_ro(_name)						\
-static struct governor_attr _name =					\
-__ATTR(_name, 0444, show_##_name, NULL)
+static DEFINE_PER_CPU(struct update_util_data, update_util);
 
-#define gov_attr_wo(_name)						\
-static struct governor_attr _name =					\
-__ATTR(_name, 0200, NULL, store_##_name)
+struct cpufreq_interactive_policyinfo {
+	bool work_in_progress;
+	struct irq_work irq_work;
+	spinlock_t irq_work_lock; /* protects work_in_progress */
+	struct timer_list policy_slack_timer;
+	struct hrtimer notif_timer;
+	spinlock_t load_lock; /* protects load tracking stat */
+	u64 last_evaluated_jiffy;
+	struct cpufreq_policy *policy;
+	struct cpufreq_policy p_nolim; /* policy copy with no limits */
+	struct cpufreq_frequency_table *freq_table;
+	spinlock_t target_freq_lock; /*protects target freq */
+	unsigned int target_freq;
+	unsigned int floor_freq;
+	unsigned int min_freq;
+	u64 floor_validate_time;
+	u64 hispeed_validate_time;
+	u64 max_freq_hyst_start_time;
+	struct rw_semaphore enable_sem;
+	bool reject_notification;
+	bool notif_pending;
+	unsigned long notif_cpu;
+	int governor_enabled;
+	struct cpufreq_interactive_tunables *cached_tunables;
+	struct sched_load *sl;
+};
 
-#define gov_attr_rw(_name)						\
-static struct governor_attr _name =					\
-__ATTR(_name, 0644, show_##_name, store_##_name)
+/* Protected by per-policy load_lock */
+struct cpufreq_interactive_cpuinfo {
+	u64 time_in_idle;
+	u64 time_in_idle_timestamp;
+	u64 cputime_speedadj;
+	u64 cputime_speedadj_timestamp;
+	unsigned int loadadjfreq;
+};
 
-/* Separate instance required for each 'interactive' directory in sysfs */
-struct interactive_tunables {
-	struct gov_attr_set attr_set;
+static DEFINE_PER_CPU(struct cpufreq_interactive_policyinfo *, polinfo);
+static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
 
+/* realtime thread handles frequency scaling */
+static struct task_struct *speedchange_task;
+static cpumask_t speedchange_cpumask;
+static spinlock_t speedchange_cpumask_lock;
+static struct mutex gov_lock;
+
+static int set_window_count;
+static int migration_register_count;
+static struct mutex sched_lock;
+static cpumask_t controlled_cpus;
+
+/* Target load.  Lower values result in higher CPU speeds. */
+#define DEFAULT_TARGET_LOAD 90
+static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
+
+#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
+#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
+static unsigned int default_above_hispeed_delay[] = {
+	DEFAULT_ABOVE_HISPEED_DELAY };
+
+struct cpufreq_interactive_tunables {
+	int usage_count;
 	/* Hi speed to bump to from lo speed when load burst (default max) */
 	unsigned int hispeed_freq;
-
 	/* Go to hi speed when CPU load at or above this value. */
 #define DEFAULT_GO_HISPEED_LOAD 99
 	unsigned long go_hispeed_load;
-
 	/* Target load. Lower values result in higher CPU speeds. */
 	spinlock_t target_loads_lock;
 	unsigned int *target_loads;
 	int ntarget_loads;
-
 	/*
 	 * The minimum amount of time to spend at a frequency before we can ramp
 	 * down.
 	 */
 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
 	unsigned long min_sample_time;
-
-	/* The sample rate of the timer used to increase frequency */
-	unsigned long sampling_rate;
-
+	/*
+	 * The sample rate of the timer used to increase frequency
+	 */
+	unsigned long timer_rate;
 	/*
 	 * Wait this long before raising speed above hispeed, by default a
 	 * single timer interval.
@@ -80,175 +124,220 @@
 	spinlock_t above_hispeed_delay_lock;
 	unsigned int *above_hispeed_delay;
 	int nabove_hispeed_delay;
-
 	/* Non-zero means indefinite speed boost active */
-	int boost;
+	int boost_val;
 	/* Duration of a boot pulse in usecs */
-	int boostpulse_duration;
+	int boostpulse_duration_val;
 	/* End time of boost pulse in ktime converted to usecs */
 	u64 boostpulse_endtime;
 	bool boosted;
-
 	/*
-	 * Max additional time to wait in idle, beyond sampling_rate, at speeds
+	 * Max additional time to wait in idle, beyond timer_rate, at speeds
 	 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
 	 */
-#define DEFAULT_TIMER_SLACK (4 * DEFAULT_SAMPLING_RATE)
-	unsigned long timer_slack_delay;
-	unsigned long timer_slack;
+#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
+	int timer_slack_val;
 	bool io_is_busy;
+
+	/* scheduler input related flags */
+	bool use_sched_load;
+	bool use_migration_notif;
+
+	/*
+	 * Whether to align timer windows across all CPUs. When
+	 * use_sched_load is true, this flag is ignored and windows
+	 * will always be aligned.
+	 */
+	bool align_windows;
+
+	/*
+	 * Stay at max freq for at least max_freq_hysteresis before dropping
+	 * frequency.
+	 */
+	unsigned int max_freq_hysteresis;
+
+	/* Ignore hispeed_freq and above_hispeed_delay for notification */
+	bool ignore_hispeed_on_notif;
+
+	/* Ignore min_sample_time for notification */
+	bool fast_ramp_down;
+
+	/* Whether to enable prediction or not */
+	bool enable_prediction;
 };
 
-/* Separate instance required for each 'struct cpufreq_policy' */
-struct interactive_policy {
-	struct cpufreq_policy *policy;
-	struct interactive_tunables *tunables;
-	struct list_head tunables_hook;
-};
+/* For cases where we have single governor instance for system */
+static struct cpufreq_interactive_tunables *common_tunables;
+static struct cpufreq_interactive_tunables *cached_common_tunables;
 
-/* Separate instance required for each CPU */
-struct interactive_cpu {
-	struct update_util_data update_util;
-	struct interactive_policy *ipolicy;
+static struct attribute_group *get_sysfs_attr(void);
 
-	struct irq_work irq_work;
-	u64 last_sample_time;
-	unsigned long next_sample_jiffies;
-	bool work_in_progress;
-
-	struct rw_semaphore enable_sem;
-	struct timer_list slack_timer;
-
-	spinlock_t load_lock; /* protects the next 4 fields */
-	u64 time_in_idle;
-	u64 time_in_idle_timestamp;
-	u64 cputime_speedadj;
-	u64 cputime_speedadj_timestamp;
-
-	spinlock_t target_freq_lock; /*protects target freq */
-	unsigned int target_freq;
-
-	unsigned int floor_freq;
-	u64 pol_floor_val_time; /* policy floor_validate_time */
-	u64 loc_floor_val_time; /* per-cpu floor_validate_time */
-	u64 pol_hispeed_val_time; /* policy hispeed_validate_time */
-	u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */
-};
-
-static DEFINE_PER_CPU(struct interactive_cpu, interactive_cpu);
-
-/* Realtime thread handles frequency scaling */
-static struct task_struct *speedchange_task;
-static cpumask_t speedchange_cpumask;
-static spinlock_t speedchange_cpumask_lock;
-
-/* Target load. Lower values result in higher CPU speeds. */
-#define DEFAULT_TARGET_LOAD 90
-static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
-
-#define DEFAULT_SAMPLING_RATE (20 * USEC_PER_MSEC)
-#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_SAMPLING_RATE
-static unsigned int default_above_hispeed_delay[] = {
-	DEFAULT_ABOVE_HISPEED_DELAY
-};
-
-/* Iterate over interactive policies for tunables */
-#define for_each_ipolicy(__ip)	\
-	list_for_each_entry(__ip, &tunables->attr_set.policy_list, tunables_hook)
-
-static struct interactive_tunables *global_tunables;
-static DEFINE_MUTEX(global_tunables_lock);
-
-static inline void update_slack_delay(struct interactive_tunables *tunables)
+/* Round to starting jiffy of next evaluation window */
+static u64 round_to_nw_start(u64 jif,
+			     struct cpufreq_interactive_tunables *tunables)
 {
-	tunables->timer_slack_delay = usecs_to_jiffies(tunables->timer_slack +
-						       tunables->sampling_rate);
-}
+	unsigned long step = usecs_to_jiffies(tunables->timer_rate);
+	u64 ret;
 
-static bool timer_slack_required(struct interactive_cpu *icpu)
-{
-	struct interactive_policy *ipolicy = icpu->ipolicy;
-	struct interactive_tunables *tunables = ipolicy->tunables;
-
-	if (tunables->timer_slack < 0)
-		return false;
-
-	if (icpu->target_freq > ipolicy->policy->min)
-		return true;
-
-	return false;
-}
-
-static void gov_slack_timer_start(struct interactive_cpu *icpu, int cpu)
-{
-	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
-
-	icpu->slack_timer.expires = jiffies + tunables->timer_slack_delay;
-	add_timer_on(&icpu->slack_timer, cpu);
-}
-
-static void gov_slack_timer_modify(struct interactive_cpu *icpu)
-{
-	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
-
-	mod_timer(&icpu->slack_timer, jiffies + tunables->timer_slack_delay);
-}
-
-static void slack_timer_resched(struct interactive_cpu *icpu, int cpu,
-				bool modify)
-{
-	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
-	unsigned long flags;
-
-	spin_lock_irqsave(&icpu->load_lock, flags);
-
-	icpu->time_in_idle = get_cpu_idle_time(cpu,
-					       &icpu->time_in_idle_timestamp,
-					       tunables->io_is_busy);
-	icpu->cputime_speedadj = 0;
-	icpu->cputime_speedadj_timestamp = icpu->time_in_idle_timestamp;
-
-	if (timer_slack_required(icpu)) {
-		if (modify)
-			gov_slack_timer_modify(icpu);
-		else
-			gov_slack_timer_start(icpu, cpu);
+	if (tunables->use_sched_load || tunables->align_windows) {
+		do_div(jif, step);
+		ret = (jif + 1) * step;
+	} else {
+		ret = jiffies + usecs_to_jiffies(tunables->timer_rate);
 	}
 
-	spin_unlock_irqrestore(&icpu->load_lock, flags);
-}
-
-static unsigned int
-freq_to_above_hispeed_delay(struct interactive_tunables *tunables,
-			    unsigned int freq)
-{
-	unsigned long flags;
-	unsigned int ret;
-	int i;
-
-	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
-
-	for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
-	     freq >= tunables->above_hispeed_delay[i + 1]; i += 2)
-		;
-
-	ret = tunables->above_hispeed_delay[i];
-	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
-
 	return ret;
 }
 
-static unsigned int freq_to_targetload(struct interactive_tunables *tunables,
-				       unsigned int freq)
+static inline int set_window_helper(
+			struct cpufreq_interactive_tunables *tunables)
 {
+	return sched_set_window(round_to_nw_start(get_jiffies_64(), tunables),
+			 usecs_to_jiffies(tunables->timer_rate));
+}
+
+static void cpufreq_interactive_timer_resched(unsigned long cpu,
+					      bool slack_only)
+{
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	struct cpufreq_interactive_tunables *tunables =
+		ppol->policy->governor_data;
+	u64 expires;
 	unsigned long flags;
-	unsigned int ret;
 	int i;
 
+	spin_lock_irqsave(&ppol->load_lock, flags);
+	expires = round_to_nw_start(ppol->last_evaluated_jiffy, tunables);
+	if (!slack_only) {
+		for_each_cpu(i, ppol->policy->cpus) {
+			pcpu = &per_cpu(cpuinfo, i);
+			pcpu->time_in_idle = get_cpu_idle_time(i,
+						&pcpu->time_in_idle_timestamp,
+						tunables->io_is_busy);
+			pcpu->cputime_speedadj = 0;
+			pcpu->cputime_speedadj_timestamp =
+						pcpu->time_in_idle_timestamp;
+		}
+	}
+
+	if (tunables->timer_slack_val >= 0 &&
+	    ppol->target_freq > ppol->policy->min) {
+		expires += usecs_to_jiffies(tunables->timer_slack_val);
+		del_timer(&ppol->policy_slack_timer);
+		ppol->policy_slack_timer.expires = expires;
+		add_timer(&ppol->policy_slack_timer);
+	}
+
+	spin_unlock_irqrestore(&ppol->load_lock, flags);
+}
+
+static void update_util_handler(struct update_util_data *data, u64 time,
+				unsigned int sched_flags)
+{
+	struct cpufreq_interactive_policyinfo *ppol;
+	unsigned long flags;
+
+	ppol = *this_cpu_ptr(&polinfo);
+	spin_lock_irqsave(&ppol->irq_work_lock, flags);
+	/*
+	 * The irq-work may not be allowed to be queued up right now
+	 * because work has already been queued up or is in progress.
+	 */
+	if (ppol->work_in_progress ||
+	    sched_flags & SCHED_CPUFREQ_INTERCLUSTER_MIG)
+		goto out;
+
+	ppol->work_in_progress = true;
+	irq_work_queue(&ppol->irq_work);
+out:
+	spin_unlock_irqrestore(&ppol->irq_work_lock, flags);
+}
+
+static inline void gov_clear_update_util(struct cpufreq_policy *policy)
+{
+	int i;
+
+	for_each_cpu(i, policy->cpus)
+		cpufreq_remove_update_util_hook(i);
+
+	synchronize_sched();
+}
+
+static void gov_set_update_util(struct cpufreq_policy *policy)
+{
+	struct update_util_data *util;
+	int cpu;
+
+	for_each_cpu(cpu, policy->cpus) {
+		util = &per_cpu(update_util, cpu);
+		cpufreq_add_update_util_hook(cpu, util, update_util_handler);
+	}
+}
+
+/* The caller shall take enable_sem write semaphore to avoid any timer race.
+ * The policy_slack_timer must be deactivated when calling this function.
+ */
+static void cpufreq_interactive_timer_start(
+	struct cpufreq_interactive_tunables *tunables, int cpu)
+{
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	u64 expires = round_to_nw_start(ppol->last_evaluated_jiffy, tunables);
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&ppol->load_lock, flags);
+	gov_set_update_util(ppol->policy);
+	if (tunables->timer_slack_val >= 0 &&
+	    ppol->target_freq > ppol->policy->min) {
+		expires += usecs_to_jiffies(tunables->timer_slack_val);
+		ppol->policy_slack_timer.expires = expires;
+		add_timer(&ppol->policy_slack_timer);
+	}
+
+	for_each_cpu(i, ppol->policy->cpus) {
+		pcpu = &per_cpu(cpuinfo, i);
+		pcpu->time_in_idle =
+			get_cpu_idle_time(i, &pcpu->time_in_idle_timestamp,
+					  tunables->io_is_busy);
+		pcpu->cputime_speedadj = 0;
+		pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+	}
+	spin_unlock_irqrestore(&ppol->load_lock, flags);
+}
+
+
+static unsigned int freq_to_above_hispeed_delay(
+	struct cpufreq_interactive_tunables *tunables,
+	unsigned int freq)
+{
+	int i;
+	unsigned int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+
+	for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
+			freq >= tunables->above_hispeed_delay[i+1]; i += 2)
+		;
+
+	ret = tunables->above_hispeed_delay[i];
+	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+	return ret;
+}
+
+static unsigned int freq_to_targetload(
+	struct cpufreq_interactive_tunables *tunables, unsigned int freq)
+{
+	int i;
+	unsigned int ret;
+	unsigned long flags;
+
 	spin_lock_irqsave(&tunables->target_loads_lock, flags);
 
 	for (i = 0; i < tunables->ntarget_loads - 1 &&
-	     freq >= tunables->target_loads[i + 1]; i += 2)
+		    freq >= tunables->target_loads[i+1]; i += 2)
 		;
 
 	ret = tunables->target_loads[i];
@@ -256,76 +345,102 @@
 	return ret;
 }
 
+#define DEFAULT_MAX_LOAD 100
+u32 get_freq_max_load(int cpu, unsigned int freq)
+{
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+
+	if (!cpumask_test_cpu(cpu, &controlled_cpus))
+		return DEFAULT_MAX_LOAD;
+
+	if (have_governor_per_policy()) {
+		if (!ppol || !ppol->cached_tunables)
+			return DEFAULT_MAX_LOAD;
+		return freq_to_targetload(ppol->cached_tunables, freq);
+	}
+
+	if (!cached_common_tunables)
+		return DEFAULT_MAX_LOAD;
+	return freq_to_targetload(cached_common_tunables, freq);
+}
+
 /*
  * If increasing frequencies never map to a lower target load then
  * choose_freq() will find the minimum frequency that does not exceed its
  * target load given the current load.
  */
-static unsigned int choose_freq(struct interactive_cpu *icpu,
-				unsigned int loadadjfreq)
+static unsigned int choose_freq(struct cpufreq_interactive_policyinfo *pcpu,
+		unsigned int loadadjfreq)
 {
-	struct cpufreq_policy *policy = icpu->ipolicy->policy;
-	struct cpufreq_frequency_table *freq_table = policy->freq_table;
-	unsigned int prevfreq, freqmin = 0, freqmax = UINT_MAX, tl;
-	unsigned int freq = policy->cur;
+	unsigned int freq = pcpu->policy->cur;
+	unsigned int prevfreq, freqmin, freqmax;
+	unsigned int tl;
 	int index;
 
+	freqmin = 0;
+	freqmax = UINT_MAX;
+
 	do {
 		prevfreq = freq;
-		tl = freq_to_targetload(icpu->ipolicy->tunables, freq);
+		tl = freq_to_targetload(pcpu->policy->governor_data, freq);
 
 		/*
 		 * Find the lowest frequency where the computed load is less
 		 * than or equal to the target load.
 		 */
 
-		index = cpufreq_frequency_table_target(policy, loadadjfreq / tl,
+		index = cpufreq_frequency_table_target(&pcpu->p_nolim,
+						       loadadjfreq / tl,
 						       CPUFREQ_RELATION_L);
-
-		freq = freq_table[index].frequency;
+		freq = pcpu->freq_table[index].frequency;
 
 		if (freq > prevfreq) {
-			/* The previous frequency is too low */
+			/* The previous frequency is too low. */
 			freqmin = prevfreq;
 
-			if (freq < freqmax)
-				continue;
-
-			/* Find highest frequency that is less than freqmax */
-			index = cpufreq_frequency_table_target(policy,
-					freqmax - 1, CPUFREQ_RELATION_H);
-
-			freq = freq_table[index].frequency;
-
-			if (freq == freqmin) {
+			if (freq >= freqmax) {
 				/*
-				 * The first frequency below freqmax has already
-				 * been found to be too low. freqmax is the
-				 * lowest speed we found that is fast enough.
+				 * Find the highest frequency that is less
+				 * than freqmax.
 				 */
-				freq = freqmax;
-				break;
+				index = cpufreq_frequency_table_target(
+					    &pcpu->p_nolim,
+					    freqmax - 1, CPUFREQ_RELATION_H);
+				freq = pcpu->freq_table[index].frequency;
+
+				if (freq == freqmin) {
+					/*
+					 * The first frequency below freqmax
+					 * has already been found to be too
+					 * low.  freqmax is the lowest speed
+					 * we found that is fast enough.
+					 */
+					freq = freqmax;
+					break;
+				}
 			}
 		} else if (freq < prevfreq) {
 			/* The previous frequency is high enough. */
 			freqmax = prevfreq;
 
-			if (freq > freqmin)
-				continue;
+			if (freq <= freqmin) {
+				/*
+				 * Find the lowest frequency that is higher
+				 * than freqmin.
+				 */
+				index = cpufreq_frequency_table_target(
+					    &pcpu->p_nolim,
+					    freqmin + 1, CPUFREQ_RELATION_L);
+				freq = pcpu->freq_table[index].frequency;
 
-			/* Find lowest frequency that is higher than freqmin */
-			index = cpufreq_frequency_table_target(policy,
-					freqmin + 1, CPUFREQ_RELATION_L);
-
-			freq = freq_table[index].frequency;
-
-			/*
-			 * If freqmax is the first frequency above
-			 * freqmin then we have already found that
-			 * this speed is fast enough.
-			 */
-			if (freq == freqmax)
-				break;
+				/*
+				 * If freqmax is the first frequency above
+				 * freqmin then we have already found that
+				 * this speed is fast enough.
+				 */
+				if (freq == freqmax)
+					break;
+			}
 		}
 
 		/* If same frequency chosen as previous then done. */
@@ -334,97 +449,216 @@
 	return freq;
 }
 
-static u64 update_load(struct interactive_cpu *icpu, int cpu)
+static u64 update_load(int cpu)
 {
-	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
-	unsigned int delta_idle, delta_time;
-	u64 now_idle, now, active_time;
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+	struct cpufreq_interactive_tunables *tunables =
+		ppol->policy->governor_data;
+	u64 now;
+	u64 now_idle;
+	unsigned int delta_idle;
+	unsigned int delta_time;
+	u64 active_time;
 
 	now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
-	delta_idle = (unsigned int)(now_idle - icpu->time_in_idle);
-	delta_time = (unsigned int)(now - icpu->time_in_idle_timestamp);
+	delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
+	delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
 
 	if (delta_time <= delta_idle)
 		active_time = 0;
 	else
 		active_time = delta_time - delta_idle;
 
-	icpu->cputime_speedadj += active_time * icpu->ipolicy->policy->cur;
+	pcpu->cputime_speedadj += active_time * ppol->policy->cur;
 
-	icpu->time_in_idle = now_idle;
-	icpu->time_in_idle_timestamp = now;
-
+	pcpu->time_in_idle = now_idle;
+	pcpu->time_in_idle_timestamp = now;
 	return now;
 }
 
-/* Re-evaluate load to see if a frequency change is required or not */
-static void eval_target_freq(struct interactive_cpu *icpu)
+static unsigned int sl_busy_to_laf(struct cpufreq_interactive_policyinfo *ppol,
+				   unsigned long busy)
 {
-	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
-	struct cpufreq_policy *policy = icpu->ipolicy->policy;
-	struct cpufreq_frequency_table *freq_table = policy->freq_table;
-	u64 cputime_speedadj, now, max_fvtime;
-	unsigned int new_freq, loadadjfreq, index, delta_time;
-	unsigned long flags;
+	int prev_load;
+	struct cpufreq_interactive_tunables *tunables =
+		ppol->policy->governor_data;
+
+	prev_load = mult_frac(ppol->policy->cpuinfo.max_freq * 100,
+				busy, tunables->timer_rate);
+	return prev_load;
+}
+
+#define NEW_TASK_RATIO 75
+#define PRED_TOLERANCE_PCT 10
+static void cpufreq_interactive_timer(int data)
+{
+	s64 now;
+	unsigned int delta_time;
+	u64 cputime_speedadj;
 	int cpu_load;
-	int cpu = smp_processor_id();
+	int pol_load = 0;
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, data);
+	struct cpufreq_interactive_tunables *tunables =
+		ppol->policy->governor_data;
+	struct sched_load *sl = ppol->sl;
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	unsigned int new_freq;
+	unsigned int prev_laf = 0, t_prevlaf;
+	unsigned int pred_laf = 0, t_predlaf = 0;
+	unsigned int prev_chfreq, pred_chfreq, chosen_freq;
+	unsigned int index;
+	unsigned long flags;
+	unsigned long max_cpu;
+	int i, cpu;
+	int new_load_pct = 0;
+	int prev_l, pred_l = 0;
+	struct cpufreq_govinfo govinfo;
+	bool skip_hispeed_logic, skip_min_sample_time;
+	bool jump_to_max_no_ts = false;
+	bool jump_to_max = false;
 
-	spin_lock_irqsave(&icpu->load_lock, flags);
-	now = update_load(icpu, smp_processor_id());
-	delta_time = (unsigned int)(now - icpu->cputime_speedadj_timestamp);
-	cputime_speedadj = icpu->cputime_speedadj;
-	spin_unlock_irqrestore(&icpu->load_lock, flags);
-
-	if (WARN_ON_ONCE(!delta_time))
+	if (!down_read_trylock(&ppol->enable_sem))
 		return;
-
-	spin_lock_irqsave(&icpu->target_freq_lock, flags);
-	do_div(cputime_speedadj, delta_time);
-	loadadjfreq = (unsigned int)cputime_speedadj * 100;
-	cpu_load = loadadjfreq / policy->cur;
-	tunables->boosted = tunables->boost ||
-			    now < tunables->boostpulse_endtime;
-
-	if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
-		if (policy->cur < tunables->hispeed_freq) {
-			new_freq = tunables->hispeed_freq;
-		} else {
-			new_freq = choose_freq(icpu, loadadjfreq);
-
-			if (new_freq < tunables->hispeed_freq)
-				new_freq = tunables->hispeed_freq;
-		}
-	} else {
-		new_freq = choose_freq(icpu, loadadjfreq);
-		if (new_freq > tunables->hispeed_freq &&
-		    policy->cur < tunables->hispeed_freq)
-			new_freq = tunables->hispeed_freq;
-	}
-
-	if (policy->cur >= tunables->hispeed_freq &&
-	    new_freq > policy->cur &&
-	    now - icpu->pol_hispeed_val_time < freq_to_above_hispeed_delay(tunables, policy->cur)) {
-		trace_cpufreq_interactive_notyet(cpu, cpu_load,
-				icpu->target_freq, policy->cur, new_freq);
+	if (!ppol->governor_enabled)
 		goto exit;
+
+	now = ktime_to_us(ktime_get());
+
+	spin_lock_irqsave(&ppol->target_freq_lock, flags);
+	spin_lock(&ppol->load_lock);
+
+	skip_hispeed_logic =
+		tunables->ignore_hispeed_on_notif && ppol->notif_pending;
+	skip_min_sample_time = tunables->fast_ramp_down && ppol->notif_pending;
+	ppol->notif_pending = false;
+	now = ktime_to_us(ktime_get());
+	ppol->last_evaluated_jiffy = get_jiffies_64();
+
+	if (tunables->use_sched_load)
+		sched_get_cpus_busy(sl, ppol->policy->cpus);
+	max_cpu = cpumask_first(ppol->policy->cpus);
+	i = 0;
+	for_each_cpu(cpu, ppol->policy->cpus) {
+		pcpu = &per_cpu(cpuinfo, cpu);
+		if (tunables->use_sched_load) {
+			t_prevlaf = sl_busy_to_laf(ppol, sl[i].prev_load);
+			prev_l = t_prevlaf / ppol->target_freq;
+			if (tunables->enable_prediction) {
+				t_predlaf = sl_busy_to_laf(ppol,
+						sl[i].predicted_load);
+				pred_l = t_predlaf / ppol->target_freq;
+			}
+			if (sl[i].prev_load)
+				new_load_pct = sl[i].new_task_load * 100 /
+							sl[i].prev_load;
+			else
+				new_load_pct = 0;
+		} else {
+			now = update_load(cpu);
+			delta_time = (unsigned int)
+				(now - pcpu->cputime_speedadj_timestamp);
+			if (WARN_ON_ONCE(!delta_time))
+				continue;
+			cputime_speedadj = pcpu->cputime_speedadj;
+			do_div(cputime_speedadj, delta_time);
+			t_prevlaf = (unsigned int)cputime_speedadj * 100;
+			prev_l = t_prevlaf / ppol->target_freq;
+		}
+
+		/* find max of loadadjfreq inside policy */
+		if (t_prevlaf > prev_laf) {
+			prev_laf = t_prevlaf;
+			max_cpu = cpu;
+		}
+		pred_laf = max(t_predlaf, pred_laf);
+
+		cpu_load = max(prev_l, pred_l);
+		pol_load = max(pol_load, cpu_load);
+		trace_cpufreq_interactive_cpuload(cpu, cpu_load, new_load_pct,
+						  prev_l, pred_l);
+
+		/* save loadadjfreq for notification */
+		pcpu->loadadjfreq = max(t_prevlaf, t_predlaf);
+
+		/* detect heavy new task and jump to policy->max */
+		if (prev_l >= tunables->go_hispeed_load &&
+		    new_load_pct >= NEW_TASK_RATIO) {
+			skip_hispeed_logic = true;
+			jump_to_max = true;
+		}
+		i++;
+	}
+	spin_unlock(&ppol->load_lock);
+
+	tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
+
+	prev_chfreq = choose_freq(ppol, prev_laf);
+	pred_chfreq = choose_freq(ppol, pred_laf);
+	chosen_freq = max(prev_chfreq, pred_chfreq);
+
+	if (prev_chfreq < ppol->policy->max && pred_chfreq >= ppol->policy->max)
+		if (!jump_to_max)
+			jump_to_max_no_ts = true;
+
+	if (now - ppol->max_freq_hyst_start_time <
+	    tunables->max_freq_hysteresis &&
+	    pol_load >= tunables->go_hispeed_load &&
+	    ppol->target_freq < ppol->policy->max) {
+		skip_hispeed_logic = true;
+		skip_min_sample_time = true;
+		if (!jump_to_max)
+			jump_to_max_no_ts = true;
 	}
 
-	icpu->loc_hispeed_val_time = now;
+	new_freq = chosen_freq;
+	if (jump_to_max_no_ts || jump_to_max) {
+		new_freq = ppol->policy->cpuinfo.max_freq;
+	} else if (!skip_hispeed_logic) {
+		if (pol_load >= tunables->go_hispeed_load ||
+		    tunables->boosted) {
+			if (ppol->target_freq < tunables->hispeed_freq)
+				new_freq = tunables->hispeed_freq;
+			else
+				new_freq = max(new_freq,
+					       tunables->hispeed_freq);
+		}
+	}
 
-	index = cpufreq_frequency_table_target(policy, new_freq,
-					       CPUFREQ_RELATION_L);
-	new_freq = freq_table[index].frequency;
+	if (now - ppol->max_freq_hyst_start_time <
+	    tunables->max_freq_hysteresis)
+		new_freq = max(tunables->hispeed_freq, new_freq);
+
+	if (!skip_hispeed_logic &&
+	    ppol->target_freq >= tunables->hispeed_freq &&
+	    new_freq > ppol->target_freq &&
+	    now - ppol->hispeed_validate_time <
+	    freq_to_above_hispeed_delay(tunables, ppol->target_freq)) {
+		trace_cpufreq_interactive_notyet(
+			max_cpu, pol_load, ppol->target_freq,
+			ppol->policy->cur, new_freq);
+		spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
+		goto rearm;
+	}
+
+	ppol->hispeed_validate_time = now;
+
+	index = cpufreq_frequency_table_target(&ppol->p_nolim, new_freq,
+					   CPUFREQ_RELATION_L);
+	new_freq = ppol->freq_table[index].frequency;
 
 	/*
 	 * Do not scale below floor_freq unless we have been at or above the
 	 * floor frequency for the minimum sample time since last validated.
 	 */
-	max_fvtime = max(icpu->pol_floor_val_time, icpu->loc_floor_val_time);
-	if (new_freq < icpu->floor_freq && icpu->target_freq >= policy->cur) {
-		if (now - max_fvtime < tunables->min_sample_time) {
-			trace_cpufreq_interactive_notyet(cpu, cpu_load,
-				icpu->target_freq, policy->cur, new_freq);
-			goto exit;
+	if (!skip_min_sample_time && new_freq < ppol->floor_freq) {
+		if (now - ppol->floor_validate_time <
+				tunables->min_sample_time) {
+			trace_cpufreq_interactive_notyet(
+				max_cpu, pol_load, ppol->target_freq,
+				ppol->policy->cur, new_freq);
+			spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
+			goto rearm;
 		}
 	}
 
@@ -433,114 +667,62 @@
 	 * or above the selected frequency for a minimum of min_sample_time,
 	 * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
 	 * allow the speed to drop as soon as the boostpulse duration expires
-	 * (or the indefinite boost is turned off).
+	 * (or the indefinite boost is turned off). If policy->max is restored
+	 * for max_freq_hysteresis, don't extend the timestamp. Otherwise, it
+	 * could incorrectly extended the duration of max_freq_hysteresis by
+	 * min_sample_time.
 	 */
 
-	if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
-		icpu->floor_freq = new_freq;
-		if (icpu->target_freq >= policy->cur || new_freq >= policy->cur)
-			icpu->loc_floor_val_time = now;
+	if ((!tunables->boosted || new_freq > tunables->hispeed_freq)
+	    && !jump_to_max_no_ts) {
+		ppol->floor_freq = new_freq;
+		ppol->floor_validate_time = now;
 	}
 
-	if (icpu->target_freq == new_freq &&
-	    icpu->target_freq <= policy->cur) {
-		trace_cpufreq_interactive_already(cpu, cpu_load,
-			icpu->target_freq, policy->cur, new_freq);
-		goto exit;
+	if (new_freq >= ppol->policy->max && !jump_to_max_no_ts)
+		ppol->max_freq_hyst_start_time = now;
+
+	if (ppol->target_freq == new_freq &&
+			ppol->target_freq <= ppol->policy->cur) {
+		trace_cpufreq_interactive_already(
+			max_cpu, pol_load, ppol->target_freq,
+			ppol->policy->cur, new_freq);
+		spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
+		goto rearm;
 	}
 
-	trace_cpufreq_interactive_target(cpu, cpu_load, icpu->target_freq,
-					 policy->cur, new_freq);
+	trace_cpufreq_interactive_target(max_cpu, pol_load, ppol->target_freq,
+					 ppol->policy->cur, new_freq);
 
-	icpu->target_freq = new_freq;
-	spin_unlock_irqrestore(&icpu->target_freq_lock, flags);
-
+	ppol->target_freq = new_freq;
+	spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
 	spin_lock_irqsave(&speedchange_cpumask_lock, flags);
-	cpumask_set_cpu(cpu, &speedchange_cpumask);
+	cpumask_set_cpu(max_cpu, &speedchange_cpumask);
 	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+	wake_up_process_no_notif(speedchange_task);
 
-	wake_up_process(speedchange_task);
-	return;
+rearm:
+	cpufreq_interactive_timer_resched(data, false);
+
+	/*
+	 * Send govinfo notification.
+	 * Govinfo notification could potentially wake up another thread
+	 * managed by its clients. Thread wakeups might trigger a load
+	 * change callback that executes this function again. Therefore
+	 * no spinlock could be held when sending the notification.
+	 */
+	for_each_cpu(i, ppol->policy->cpus) {
+		pcpu = &per_cpu(cpuinfo, i);
+		govinfo.cpu = i;
+		govinfo.load = pcpu->loadadjfreq / ppol->policy->max;
+		govinfo.sampling_rate_us = tunables->timer_rate;
+		atomic_notifier_call_chain(&cpufreq_govinfo_notifier_list,
+					   CPUFREQ_LOAD_CHANGE, &govinfo);
+	}
 
 exit:
-	spin_unlock_irqrestore(&icpu->target_freq_lock, flags);
-}
-
-static void cpufreq_interactive_update(struct interactive_cpu *icpu)
-{
-	eval_target_freq(icpu);
-	slack_timer_resched(icpu, smp_processor_id(), true);
-}
-
-static void cpufreq_interactive_idle_end(void)
-{
-	struct interactive_cpu *icpu = &per_cpu(interactive_cpu,
-						smp_processor_id());
-
-	if (!down_read_trylock(&icpu->enable_sem))
-		return;
-
-	if (icpu->ipolicy) {
-		/*
-		 * We haven't sampled load for more than sampling_rate time, do
-		 * it right now.
-		 */
-		if (time_after_eq(jiffies, icpu->next_sample_jiffies))
-			cpufreq_interactive_update(icpu);
-	}
-
-	up_read(&icpu->enable_sem);
-}
-
-static void cpufreq_interactive_get_policy_info(struct cpufreq_policy *policy,
-						unsigned int *pmax_freq,
-						u64 *phvt, u64 *pfvt)
-{
-	struct interactive_cpu *icpu;
-	u64 hvt = ~0ULL, fvt = 0;
-	unsigned int max_freq = 0, i;
-
-	for_each_cpu(i, policy->cpus) {
-		icpu = &per_cpu(interactive_cpu, i);
-
-		fvt = max(fvt, icpu->loc_floor_val_time);
-		if (icpu->target_freq > max_freq) {
-			max_freq = icpu->target_freq;
-			hvt = icpu->loc_hispeed_val_time;
-		} else if (icpu->target_freq == max_freq) {
-			hvt = min(hvt, icpu->loc_hispeed_val_time);
-		}
-	}
-
-	*pmax_freq = max_freq;
-	*phvt = hvt;
-	*pfvt = fvt;
-}
-
-static void cpufreq_interactive_adjust_cpu(unsigned int cpu,
-					   struct cpufreq_policy *policy)
-{
-	struct interactive_cpu *icpu;
-	u64 hvt, fvt;
-	unsigned int max_freq;
-	int i;
-
-	cpufreq_interactive_get_policy_info(policy, &max_freq, &hvt, &fvt);
-
-	for_each_cpu(i, policy->cpus) {
-		icpu = &per_cpu(interactive_cpu, i);
-		icpu->pol_floor_val_time = fvt;
-	}
-
-	if (max_freq != policy->cur) {
-		__cpufreq_driver_target(policy, max_freq, CPUFREQ_RELATION_H);
-		for_each_cpu(i, policy->cpus) {
-			icpu = &per_cpu(interactive_cpu, i);
-			icpu->pol_hispeed_val_time = hvt;
-		}
-	}
-
-	trace_cpufreq_interactive_setspeed(cpu, max_freq, policy->cur);
+	up_read(&ppol->enable_sem);
+	return;
 }
 
 static int cpufreq_interactive_speedchange_task(void *data)
@@ -548,112 +730,181 @@
 	unsigned int cpu;
 	cpumask_t tmp_mask;
 	unsigned long flags;
+	struct cpufreq_interactive_policyinfo *ppol;
 
-again:
-	set_current_state(TASK_INTERRUPTIBLE);
-	spin_lock_irqsave(&speedchange_cpumask_lock, flags);
-
-	if (cpumask_empty(&speedchange_cpumask)) {
-		spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
-		schedule();
-
-		if (kthread_should_stop())
-			return 0;
-
+	while (1) {
+		set_current_state(TASK_INTERRUPTIBLE);
 		spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+
+		if (cpumask_empty(&speedchange_cpumask)) {
+			spin_unlock_irqrestore(&speedchange_cpumask_lock,
+					       flags);
+			schedule();
+
+			if (kthread_should_stop())
+				break;
+
+			spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+		}
+
+		set_current_state(TASK_RUNNING);
+		tmp_mask = speedchange_cpumask;
+		cpumask_clear(&speedchange_cpumask);
+		spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+
+		for_each_cpu(cpu, &tmp_mask) {
+			ppol = per_cpu(polinfo, cpu);
+			if (!down_read_trylock(&ppol->enable_sem))
+				continue;
+			if (!ppol->governor_enabled) {
+				up_read(&ppol->enable_sem);
+				continue;
+			}
+
+			if (ppol->target_freq != ppol->policy->cur)
+				__cpufreq_driver_target(ppol->policy,
+							ppol->target_freq,
+							CPUFREQ_RELATION_H);
+			trace_cpufreq_interactive_setspeed(cpu,
+						     ppol->target_freq,
+						     ppol->policy->cur);
+			up_read(&ppol->enable_sem);
+		}
 	}
 
-	set_current_state(TASK_RUNNING);
-	tmp_mask = speedchange_cpumask;
-	cpumask_clear(&speedchange_cpumask);
-	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
-
-	for_each_cpu(cpu, &tmp_mask) {
-		struct interactive_cpu *icpu = &per_cpu(interactive_cpu, cpu);
-		struct cpufreq_policy *policy = icpu->ipolicy->policy;
-
-		if (unlikely(!down_read_trylock(&icpu->enable_sem)))
-			continue;
-
-		if (likely(icpu->ipolicy))
-			cpufreq_interactive_adjust_cpu(cpu, policy);
-
-		up_read(&icpu->enable_sem);
-	}
-
-	goto again;
+	return 0;
 }
 
-static void cpufreq_interactive_boost(struct interactive_tunables *tunables)
+static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
 {
-	struct interactive_policy *ipolicy;
-	struct cpufreq_policy *policy;
-	struct interactive_cpu *icpu;
-	unsigned long flags[2];
-	bool wakeup = false;
 	int i;
+	int anyboost = 0;
+	unsigned long flags[2];
+	struct cpufreq_interactive_policyinfo *ppol;
 
 	tunables->boosted = true;
 
 	spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
 
-	for_each_ipolicy(ipolicy) {
-		policy = ipolicy->policy;
+	for_each_online_cpu(i) {
+		ppol = per_cpu(polinfo, i);
+		if (!ppol || tunables != ppol->policy->governor_data)
+			continue;
 
-		for_each_cpu(i, policy->cpus) {
-			icpu = &per_cpu(interactive_cpu, i);
-
-			if (!down_read_trylock(&icpu->enable_sem))
-				continue;
-
-			if (!icpu->ipolicy) {
-				up_read(&icpu->enable_sem);
-				continue;
-			}
-
-			spin_lock_irqsave(&icpu->target_freq_lock, flags[1]);
-			if (icpu->target_freq < tunables->hispeed_freq) {
-				icpu->target_freq = tunables->hispeed_freq;
-				cpumask_set_cpu(i, &speedchange_cpumask);
-				icpu->pol_hispeed_val_time = ktime_to_us(ktime_get());
-				wakeup = true;
-			}
-			spin_unlock_irqrestore(&icpu->target_freq_lock, flags[1]);
-
-			up_read(&icpu->enable_sem);
+		spin_lock_irqsave(&ppol->target_freq_lock, flags[1]);
+		if (ppol->target_freq < tunables->hispeed_freq) {
+			ppol->target_freq = tunables->hispeed_freq;
+			cpumask_set_cpu(i, &speedchange_cpumask);
+			ppol->hispeed_validate_time =
+				ktime_to_us(ktime_get());
+			anyboost = 1;
 		}
+
+		/*
+		 * Set floor freq and (re)start timer for when last
+		 * validated.
+		 */
+
+		ppol->floor_freq = tunables->hispeed_freq;
+		ppol->floor_validate_time = ktime_to_us(ktime_get());
+		spin_unlock_irqrestore(&ppol->target_freq_lock, flags[1]);
+		break;
 	}
 
 	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
 
-	if (wakeup)
-		wake_up_process(speedchange_task);
+	if (anyboost)
+		wake_up_process_no_notif(speedchange_task);
 }
 
-static int cpufreq_interactive_notifier(struct notifier_block *nb,
-					unsigned long val, void *data)
+static int load_change_callback(struct notifier_block *nb, unsigned long val,
+				void *data)
 {
-	struct cpufreq_freqs *freq = data;
-	struct interactive_cpu *icpu = &per_cpu(interactive_cpu, freq->cpu);
+	unsigned long cpu = (unsigned long) data;
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+	struct cpufreq_interactive_tunables *tunables;
 	unsigned long flags;
 
-	if (val != CPUFREQ_POSTCHANGE)
+	if (!ppol || ppol->reject_notification)
 		return 0;
 
-	if (!down_read_trylock(&icpu->enable_sem))
+	if (!down_read_trylock(&ppol->enable_sem))
 		return 0;
+	if (!ppol->governor_enabled)
+		goto exit;
 
-	if (!icpu->ipolicy) {
-		up_read(&icpu->enable_sem);
+	tunables = ppol->policy->governor_data;
+	if (!tunables->use_sched_load || !tunables->use_migration_notif)
+		goto exit;
+
+	spin_lock_irqsave(&ppol->target_freq_lock, flags);
+	ppol->notif_pending = true;
+	ppol->notif_cpu = cpu;
+	spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
+
+	if (!hrtimer_is_queued(&ppol->notif_timer))
+		hrtimer_start(&ppol->notif_timer, ms_to_ktime(1),
+			      HRTIMER_MODE_REL);
+exit:
+	up_read(&ppol->enable_sem);
+	return 0;
+}
+
+static enum hrtimer_restart cpufreq_interactive_hrtimer(struct hrtimer *timer)
+{
+	struct cpufreq_interactive_policyinfo *ppol = container_of(timer,
+			struct cpufreq_interactive_policyinfo, notif_timer);
+	int cpu;
+
+	if (!down_read_trylock(&ppol->enable_sem))
+		return 0;
+	if (!ppol->governor_enabled) {
+		up_read(&ppol->enable_sem);
 		return 0;
 	}
+	cpu = ppol->notif_cpu;
+	trace_cpufreq_interactive_load_change(cpu);
+	del_timer(&ppol->policy_slack_timer);
+	cpufreq_interactive_timer(cpu);
 
-	spin_lock_irqsave(&icpu->load_lock, flags);
-	update_load(icpu, freq->cpu);
-	spin_unlock_irqrestore(&icpu->load_lock, flags);
+	up_read(&ppol->enable_sem);
+	return HRTIMER_NORESTART;
+}
 
-	up_read(&icpu->enable_sem);
+static struct notifier_block load_notifier_block = {
+	.notifier_call = load_change_callback,
+};
 
+static int cpufreq_interactive_notifier(
+	struct notifier_block *nb, unsigned long val, void *data)
+{
+	struct cpufreq_freqs *freq = data;
+	struct cpufreq_interactive_policyinfo *ppol;
+	int cpu;
+	unsigned long flags;
+
+	if (val == CPUFREQ_POSTCHANGE) {
+		ppol = per_cpu(polinfo, freq->cpu);
+		if (!ppol)
+			return 0;
+		if (!down_read_trylock(&ppol->enable_sem))
+			return 0;
+		if (!ppol->governor_enabled) {
+			up_read(&ppol->enable_sem);
+			return 0;
+		}
+
+		if (cpumask_first(ppol->policy->cpus) != freq->cpu) {
+			up_read(&ppol->enable_sem);
+			return 0;
+		}
+		spin_lock_irqsave(&ppol->load_lock, flags);
+		for_each_cpu(cpu, ppol->policy->cpus)
+			update_load(cpu);
+		spin_unlock_irqrestore(&ppol->load_lock, flags);
+
+		up_read(&ppol->enable_sem);
+	}
 	return 0;
 }
 
@@ -663,26 +914,29 @@
 
 static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
 {
-	const char *cp = buf;
-	int ntokens = 1, i = 0;
+	const char *cp;
+	int i;
+	int ntokens = 1;
 	unsigned int *tokenized_data;
 	int err = -EINVAL;
 
+	cp = buf;
 	while ((cp = strpbrk(cp + 1, " :")))
 		ntokens++;
 
 	if (!(ntokens & 0x1))
 		goto err;
 
-	tokenized_data = kcalloc(ntokens, sizeof(*tokenized_data), GFP_KERNEL);
+	tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
 	if (!tokenized_data) {
 		err = -ENOMEM;
 		goto err;
 	}
 
 	cp = buf;
+	i = 0;
 	while (i < ntokens) {
-		if (kstrtouint(cp, 0, &tokenized_data[i++]) < 0)
+		if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
 			goto err_kfree;
 
 		cp = strpbrk(cp, " :");
@@ -703,25 +957,13 @@
 	return ERR_PTR(err);
 }
 
-/* Interactive governor sysfs interface */
-static struct interactive_tunables *to_tunables(struct gov_attr_set *attr_set)
+static ssize_t show_target_loads(
+	struct cpufreq_interactive_tunables *tunables,
+	char *buf)
 {
-	return container_of(attr_set, struct interactive_tunables, attr_set);
-}
-
-#define show_one(file_name, type)					\
-static ssize_t show_##file_name(struct gov_attr_set *attr_set, char *buf) \
-{									\
-	struct interactive_tunables *tunables = to_tunables(attr_set);	\
-	return sprintf(buf, type "\n", tunables->file_name);		\
-}
-
-static ssize_t show_target_loads(struct gov_attr_set *attr_set, char *buf)
-{
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long flags;
-	ssize_t ret = 0;
 	int i;
+	ssize_t ret = 0;
+	unsigned long flags;
 
 	spin_lock_irqsave(&tunables->target_loads_lock, flags);
 
@@ -731,21 +973,20 @@
 
 	sprintf(buf + ret - 1, "\n");
 	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
-
 	return ret;
 }
 
-static ssize_t store_target_loads(struct gov_attr_set *attr_set,
-				  const char *buf, size_t count)
+static ssize_t store_target_loads(
+	struct cpufreq_interactive_tunables *tunables,
+	const char *buf, size_t count)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned int *new_target_loads;
-	unsigned long flags;
 	int ntokens;
+	unsigned int *new_target_loads = NULL;
+	unsigned long flags;
 
 	new_target_loads = get_tokenized_data(buf, &ntokens);
 	if (IS_ERR(new_target_loads))
-		return PTR_ERR(new_target_loads);
+		return PTR_RET(new_target_loads);
 
 	spin_lock_irqsave(&tunables->target_loads_lock, flags);
 	if (tunables->target_loads != default_target_loads)
@@ -754,16 +995,17 @@
 	tunables->ntarget_loads = ntokens;
 	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
 
+	sched_update_freq_max_load(&controlled_cpus);
+
 	return count;
 }
 
-static ssize_t show_above_hispeed_delay(struct gov_attr_set *attr_set,
-					char *buf)
+static ssize_t show_above_hispeed_delay(
+	struct cpufreq_interactive_tunables *tunables, char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long flags;
-	ssize_t ret = 0;
 	int i;
+	ssize_t ret = 0;
+	unsigned long flags;
 
 	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
 
@@ -774,21 +1016,20 @@
 
 	sprintf(buf + ret - 1, "\n");
 	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
-
 	return ret;
 }
 
-static ssize_t store_above_hispeed_delay(struct gov_attr_set *attr_set,
-					 const char *buf, size_t count)
+static ssize_t store_above_hispeed_delay(
+	struct cpufreq_interactive_tunables *tunables,
+	const char *buf, size_t count)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
+	int ntokens;
 	unsigned int *new_above_hispeed_delay = NULL;
 	unsigned long flags;
-	int ntokens;
 
 	new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
 	if (IS_ERR(new_above_hispeed_delay))
-		return PTR_ERR(new_above_hispeed_delay);
+		return PTR_RET(new_above_hispeed_delay);
 
 	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
 	if (tunables->above_hispeed_delay != default_above_hispeed_delay)
@@ -796,71 +1037,105 @@
 	tunables->above_hispeed_delay = new_above_hispeed_delay;
 	tunables->nabove_hispeed_delay = ntokens;
 	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
-
 	return count;
+
 }
 
-static ssize_t store_hispeed_freq(struct gov_attr_set *attr_set,
-				  const char *buf, size_t count)
+static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long int val;
+	return sprintf(buf, "%u\n", tunables->hispeed_freq);
+}
+
+static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
+{
 	int ret;
+	long unsigned int val;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
 		return ret;
-
 	tunables->hispeed_freq = val;
-
 	return count;
 }
 
-static ssize_t store_go_hispeed_load(struct gov_attr_set *attr_set,
-				     const char *buf, size_t count)
+#define show_store_one(file_name)					\
+static ssize_t show_##file_name(					\
+	struct cpufreq_interactive_tunables *tunables, char *buf)	\
+{									\
+	return snprintf(buf, PAGE_SIZE, "%u\n", tunables->file_name);	\
+}									\
+static ssize_t store_##file_name(					\
+		struct cpufreq_interactive_tunables *tunables,		\
+		const char *buf, size_t count)				\
+{									\
+	int ret;							\
+	unsigned long int val;						\
+									\
+	ret = kstrtoul(buf, 0, &val);				\
+	if (ret < 0)							\
+		return ret;						\
+	tunables->file_name = val;					\
+	return count;							\
+}
+show_store_one(max_freq_hysteresis);
+show_store_one(align_windows);
+show_store_one(ignore_hispeed_on_notif);
+show_store_one(fast_ramp_down);
+show_store_one(enable_prediction);
+
+static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
+		*tunables, char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val;
+	return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
+}
+
+static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
+		*tunables, const char *buf, size_t count)
+{
 	int ret;
+	unsigned long val;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
 		return ret;
-
 	tunables->go_hispeed_load = val;
-
 	return count;
 }
 
-static ssize_t store_min_sample_time(struct gov_attr_set *attr_set,
-				     const char *buf, size_t count)
+static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
+		*tunables, char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val;
+	return sprintf(buf, "%lu\n", tunables->min_sample_time);
+}
+
+static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
+		*tunables, const char *buf, size_t count)
+{
 	int ret;
+	unsigned long val;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
 		return ret;
-
 	tunables->min_sample_time = val;
-
 	return count;
 }
 
-static ssize_t show_timer_rate(struct gov_attr_set *attr_set, char *buf)
+static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-
-	return sprintf(buf, "%lu\n", tunables->sampling_rate);
+	return sprintf(buf, "%lu\n", tunables->timer_rate);
 }
 
-static ssize_t store_timer_rate(struct gov_attr_set *attr_set, const char *buf,
-				size_t count)
+static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val, val_round;
 	int ret;
+	unsigned long val, val_round;
+	struct cpufreq_interactive_tunables *t;
+	int cpu;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
@@ -870,43 +1145,62 @@
 	if (val != val_round)
 		pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
 			val_round);
+	tunables->timer_rate = val_round;
 
-	tunables->sampling_rate = val_round;
+	if (!tunables->use_sched_load)
+		return count;
+
+	for_each_possible_cpu(cpu) {
+		if (!per_cpu(polinfo, cpu))
+			continue;
+		t = per_cpu(polinfo, cpu)->cached_tunables;
+		if (t && t->use_sched_load)
+			t->timer_rate = val_round;
+	}
+	set_window_helper(tunables);
 
 	return count;
 }
 
-static ssize_t store_timer_slack(struct gov_attr_set *attr_set, const char *buf,
-				 size_t count)
+static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val;
+	return sprintf(buf, "%d\n", tunables->timer_slack_val);
+}
+
+static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
+{
 	int ret;
+	unsigned long val;
 
 	ret = kstrtol(buf, 10, &val);
 	if (ret < 0)
 		return ret;
 
-	tunables->timer_slack = val;
-	update_slack_delay(tunables);
-
+	tunables->timer_slack_val = val;
 	return count;
 }
 
-static ssize_t store_boost(struct gov_attr_set *attr_set, const char *buf,
-			   size_t count)
+static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
+			  char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val;
+	return sprintf(buf, "%d\n", tunables->boost_val);
+}
+
+static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
+			   const char *buf, size_t count)
+{
 	int ret;
+	unsigned long val;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
 		return ret;
 
-	tunables->boost = val;
+	tunables->boost_val = val;
 
-	if (tunables->boost) {
+	if (tunables->boost_val) {
 		trace_cpufreq_interactive_boost("on");
 		if (!tunables->boosted)
 			cpufreq_interactive_boost(tunables);
@@ -918,111 +1212,483 @@
 	return count;
 }
 
-static ssize_t store_boostpulse(struct gov_attr_set *attr_set, const char *buf,
-				size_t count)
+static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
+				const char *buf, size_t count)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val;
 	int ret;
+	unsigned long val;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
 		return ret;
 
 	tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
-					tunables->boostpulse_duration;
+		tunables->boostpulse_duration_val;
 	trace_cpufreq_interactive_boost("pulse");
 	if (!tunables->boosted)
 		cpufreq_interactive_boost(tunables);
-
 	return count;
 }
 
-static ssize_t store_boostpulse_duration(struct gov_attr_set *attr_set,
-					 const char *buf, size_t count)
+static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
+		*tunables, char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val;
+	return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
+}
+
+static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
+		*tunables, const char *buf, size_t count)
+{
 	int ret;
+	unsigned long val;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
 		return ret;
 
-	tunables->boostpulse_duration = val;
-
+	tunables->boostpulse_duration_val = val;
 	return count;
 }
 
-static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf,
-				size_t count)
+static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val;
+	return sprintf(buf, "%u\n", tunables->io_is_busy);
+}
+
+static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
+{
 	int ret;
+	unsigned long val;
+	struct cpufreq_interactive_tunables *t;
+	int cpu;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
 		return ret;
-
 	tunables->io_is_busy = val;
 
+	if (!tunables->use_sched_load)
+		return count;
+
+	for_each_possible_cpu(cpu) {
+		if (!per_cpu(polinfo, cpu))
+			continue;
+		t = per_cpu(polinfo, cpu)->cached_tunables;
+		if (t && t->use_sched_load)
+			t->io_is_busy = val;
+	}
+	sched_set_io_is_busy(val);
+
 	return count;
 }
 
-show_one(hispeed_freq, "%u");
-show_one(go_hispeed_load, "%lu");
-show_one(min_sample_time, "%lu");
-show_one(timer_slack, "%lu");
-show_one(boost, "%u");
-show_one(boostpulse_duration, "%u");
-show_one(io_is_busy, "%u");
-
-gov_attr_rw(target_loads);
-gov_attr_rw(above_hispeed_delay);
-gov_attr_rw(hispeed_freq);
-gov_attr_rw(go_hispeed_load);
-gov_attr_rw(min_sample_time);
-gov_attr_rw(timer_rate);
-gov_attr_rw(timer_slack);
-gov_attr_rw(boost);
-gov_attr_wo(boostpulse);
-gov_attr_rw(boostpulse_duration);
-gov_attr_rw(io_is_busy);
-
-static struct attribute *interactive_attributes[] = {
-	&target_loads.attr,
-	&above_hispeed_delay.attr,
-	&hispeed_freq.attr,
-	&go_hispeed_load.attr,
-	&min_sample_time.attr,
-	&timer_rate.attr,
-	&timer_slack.attr,
-	&boost.attr,
-	&boostpulse.attr,
-	&boostpulse_duration.attr,
-	&io_is_busy.attr,
-	NULL
-};
-
-static struct kobj_type interactive_tunables_ktype = {
-	.default_attrs = interactive_attributes,
-	.sysfs_ops = &governor_sysfs_ops,
-};
-
-static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
-					     unsigned long val, void *data)
+static int cpufreq_interactive_enable_sched_input(
+			struct cpufreq_interactive_tunables *tunables)
 {
-	if (val == IDLE_END)
-		cpufreq_interactive_idle_end();
+	int rc = 0, j;
+	struct cpufreq_interactive_tunables *t;
 
+	mutex_lock(&sched_lock);
+
+	set_window_count++;
+	if (set_window_count > 1) {
+		for_each_possible_cpu(j) {
+			if (!per_cpu(polinfo, j))
+				continue;
+			t = per_cpu(polinfo, j)->cached_tunables;
+			if (t && t->use_sched_load) {
+				tunables->timer_rate = t->timer_rate;
+				tunables->io_is_busy = t->io_is_busy;
+				break;
+			}
+		}
+	} else {
+		rc = set_window_helper(tunables);
+		if (rc) {
+			pr_err("%s: Failed to set sched window\n", __func__);
+			set_window_count--;
+			goto out;
+		}
+		sched_set_io_is_busy(tunables->io_is_busy);
+	}
+
+	if (!tunables->use_migration_notif)
+		goto out;
+
+	migration_register_count++;
+	if (migration_register_count > 1)
+		goto out;
+	else
+		atomic_notifier_chain_register(&load_alert_notifier_head,
+						&load_notifier_block);
+out:
+	mutex_unlock(&sched_lock);
+	return rc;
+}
+
+static int cpufreq_interactive_disable_sched_input(
+			struct cpufreq_interactive_tunables *tunables)
+{
+	mutex_lock(&sched_lock);
+
+	if (tunables->use_migration_notif) {
+		migration_register_count--;
+		if (migration_register_count < 1)
+			atomic_notifier_chain_unregister(
+					&load_alert_notifier_head,
+					&load_notifier_block);
+	}
+	set_window_count--;
+
+	mutex_unlock(&sched_lock);
 	return 0;
 }
 
-static struct notifier_block cpufreq_interactive_idle_nb = {
-	.notifier_call = cpufreq_interactive_idle_notifier,
+static ssize_t show_use_sched_load(
+		struct cpufreq_interactive_tunables *tunables, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", tunables->use_sched_load);
+}
+
+static ssize_t store_use_sched_load(
+			struct cpufreq_interactive_tunables *tunables,
+			const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	if (tunables->use_sched_load == (bool) val)
+		return count;
+
+	tunables->use_sched_load = val;
+
+	if (val)
+		ret = cpufreq_interactive_enable_sched_input(tunables);
+	else
+		ret = cpufreq_interactive_disable_sched_input(tunables);
+
+	if (ret) {
+		tunables->use_sched_load = !val;
+		return ret;
+	}
+
+	return count;
+}
+
+static ssize_t show_use_migration_notif(
+		struct cpufreq_interactive_tunables *tunables, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			tunables->use_migration_notif);
+}
+
+static ssize_t store_use_migration_notif(
+			struct cpufreq_interactive_tunables *tunables,
+			const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	if (tunables->use_migration_notif == (bool) val)
+		return count;
+	tunables->use_migration_notif = val;
+
+	if (!tunables->use_sched_load)
+		return count;
+
+	mutex_lock(&sched_lock);
+	if (val) {
+		migration_register_count++;
+		if (migration_register_count == 1)
+			atomic_notifier_chain_register(
+					&load_alert_notifier_head,
+					&load_notifier_block);
+	} else {
+		migration_register_count--;
+		if (!migration_register_count)
+			atomic_notifier_chain_unregister(
+					&load_alert_notifier_head,
+					&load_notifier_block);
+	}
+	mutex_unlock(&sched_lock);
+
+	return count;
+}
+
+/*
+ * Create show/store routines
+ * - sys: One governor instance for complete SYSTEM
+ * - pol: One governor instance per struct cpufreq_policy
+ */
+#define show_gov_pol_sys(file_name)					\
+static ssize_t show_##file_name##_gov_sys				\
+(struct kobject *kobj, struct attribute *attr, char *buf)		\
+{									\
+	return show_##file_name(common_tunables, buf);			\
+}									\
+									\
+static ssize_t show_##file_name##_gov_pol				\
+(struct cpufreq_policy *policy, char *buf)				\
+{									\
+	return show_##file_name(policy->governor_data, buf);		\
+}
+
+#define store_gov_pol_sys(file_name)					\
+static ssize_t store_##file_name##_gov_sys				\
+(struct kobject *kobj, struct attribute *attr, const char *buf,		\
+	size_t count)							\
+{									\
+	return store_##file_name(common_tunables, buf, count);		\
+}									\
+									\
+static ssize_t store_##file_name##_gov_pol				\
+(struct cpufreq_policy *policy, const char *buf, size_t count)		\
+{									\
+	return store_##file_name(policy->governor_data, buf, count);	\
+}
+
+#define show_store_gov_pol_sys(file_name)				\
+show_gov_pol_sys(file_name);						\
+store_gov_pol_sys(file_name)
+
+show_store_gov_pol_sys(target_loads);
+show_store_gov_pol_sys(above_hispeed_delay);
+show_store_gov_pol_sys(hispeed_freq);
+show_store_gov_pol_sys(go_hispeed_load);
+show_store_gov_pol_sys(min_sample_time);
+show_store_gov_pol_sys(timer_rate);
+show_store_gov_pol_sys(timer_slack);
+show_store_gov_pol_sys(boost);
+store_gov_pol_sys(boostpulse);
+show_store_gov_pol_sys(boostpulse_duration);
+show_store_gov_pol_sys(io_is_busy);
+show_store_gov_pol_sys(use_sched_load);
+show_store_gov_pol_sys(use_migration_notif);
+show_store_gov_pol_sys(max_freq_hysteresis);
+show_store_gov_pol_sys(align_windows);
+show_store_gov_pol_sys(ignore_hispeed_on_notif);
+show_store_gov_pol_sys(fast_ramp_down);
+show_store_gov_pol_sys(enable_prediction);
+
+#define gov_sys_attr_rw(_name)						\
+static struct global_attr _name##_gov_sys =				\
+__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
+
+#define gov_pol_attr_rw(_name)						\
+static struct freq_attr _name##_gov_pol =				\
+__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
+
+#define gov_sys_pol_attr_rw(_name)					\
+	gov_sys_attr_rw(_name);						\
+	gov_pol_attr_rw(_name)
+
+gov_sys_pol_attr_rw(target_loads);
+gov_sys_pol_attr_rw(above_hispeed_delay);
+gov_sys_pol_attr_rw(hispeed_freq);
+gov_sys_pol_attr_rw(go_hispeed_load);
+gov_sys_pol_attr_rw(min_sample_time);
+gov_sys_pol_attr_rw(timer_rate);
+gov_sys_pol_attr_rw(timer_slack);
+gov_sys_pol_attr_rw(boost);
+gov_sys_pol_attr_rw(boostpulse_duration);
+gov_sys_pol_attr_rw(io_is_busy);
+gov_sys_pol_attr_rw(use_sched_load);
+gov_sys_pol_attr_rw(use_migration_notif);
+gov_sys_pol_attr_rw(max_freq_hysteresis);
+gov_sys_pol_attr_rw(align_windows);
+gov_sys_pol_attr_rw(ignore_hispeed_on_notif);
+gov_sys_pol_attr_rw(fast_ramp_down);
+gov_sys_pol_attr_rw(enable_prediction);
+
+static struct global_attr boostpulse_gov_sys =
+	__ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
+
+static struct freq_attr boostpulse_gov_pol =
+	__ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
+
+/* One Governor instance for entire system */
+static struct attribute *interactive_attributes_gov_sys[] = {
+	&target_loads_gov_sys.attr,
+	&above_hispeed_delay_gov_sys.attr,
+	&hispeed_freq_gov_sys.attr,
+	&go_hispeed_load_gov_sys.attr,
+	&min_sample_time_gov_sys.attr,
+	&timer_rate_gov_sys.attr,
+	&timer_slack_gov_sys.attr,
+	&boost_gov_sys.attr,
+	&boostpulse_gov_sys.attr,
+	&boostpulse_duration_gov_sys.attr,
+	&io_is_busy_gov_sys.attr,
+	&use_sched_load_gov_sys.attr,
+	&use_migration_notif_gov_sys.attr,
+	&max_freq_hysteresis_gov_sys.attr,
+	&align_windows_gov_sys.attr,
+	&ignore_hispeed_on_notif_gov_sys.attr,
+	&fast_ramp_down_gov_sys.attr,
+	&enable_prediction_gov_sys.attr,
+	NULL,
 };
 
+static struct attribute_group interactive_attr_group_gov_sys = {
+	.attrs = interactive_attributes_gov_sys,
+	.name = "interactive",
+};
+
+/* Per policy governor instance */
+static struct attribute *interactive_attributes_gov_pol[] = {
+	&target_loads_gov_pol.attr,
+	&above_hispeed_delay_gov_pol.attr,
+	&hispeed_freq_gov_pol.attr,
+	&go_hispeed_load_gov_pol.attr,
+	&min_sample_time_gov_pol.attr,
+	&timer_rate_gov_pol.attr,
+	&timer_slack_gov_pol.attr,
+	&boost_gov_pol.attr,
+	&boostpulse_gov_pol.attr,
+	&boostpulse_duration_gov_pol.attr,
+	&io_is_busy_gov_pol.attr,
+	&use_sched_load_gov_pol.attr,
+	&use_migration_notif_gov_pol.attr,
+	&max_freq_hysteresis_gov_pol.attr,
+	&align_windows_gov_pol.attr,
+	&ignore_hispeed_on_notif_gov_pol.attr,
+	&fast_ramp_down_gov_pol.attr,
+	&enable_prediction_gov_pol.attr,
+	NULL,
+};
+
+static struct attribute_group interactive_attr_group_gov_pol = {
+	.attrs = interactive_attributes_gov_pol,
+	.name = "interactive",
+};
+
+static struct attribute_group *get_sysfs_attr(void)
+{
+	if (have_governor_per_policy())
+		return &interactive_attr_group_gov_pol;
+	else
+		return &interactive_attr_group_gov_sys;
+}
+
+static void cpufreq_interactive_nop_timer(unsigned long data)
+{
+}
+
+static struct cpufreq_interactive_tunables *alloc_tunable(
+					struct cpufreq_policy *policy)
+{
+	struct cpufreq_interactive_tunables *tunables;
+
+	tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
+	if (!tunables)
+		return ERR_PTR(-ENOMEM);
+
+	tunables->above_hispeed_delay = default_above_hispeed_delay;
+	tunables->nabove_hispeed_delay =
+		ARRAY_SIZE(default_above_hispeed_delay);
+	tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
+	tunables->target_loads = default_target_loads;
+	tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
+	tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
+	tunables->timer_rate = DEFAULT_TIMER_RATE;
+	tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
+	tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
+
+	spin_lock_init(&tunables->target_loads_lock);
+	spin_lock_init(&tunables->above_hispeed_delay_lock);
+
+	return tunables;
+}
+
+static void irq_work(struct irq_work *irq_work)
+{
+	struct cpufreq_interactive_policyinfo *ppol;
+	unsigned long flags;
+
+	ppol = container_of(irq_work, struct cpufreq_interactive_policyinfo,
+			    irq_work);
+
+	cpufreq_interactive_timer(smp_processor_id());
+	spin_lock_irqsave(&ppol->irq_work_lock, flags);
+	ppol->work_in_progress = false;
+	spin_unlock_irqrestore(&ppol->irq_work_lock, flags);
+}
+
+static struct cpufreq_interactive_policyinfo *get_policyinfo(
+					struct cpufreq_policy *policy)
+{
+	struct cpufreq_interactive_policyinfo *ppol =
+				per_cpu(polinfo, policy->cpu);
+	int i;
+	struct sched_load *sl;
+
+	/* polinfo already allocated for policy, return */
+	if (ppol)
+		return ppol;
+
+	ppol = kzalloc(sizeof(*ppol), GFP_KERNEL);
+	if (!ppol)
+		return ERR_PTR(-ENOMEM);
+
+	sl = kcalloc(cpumask_weight(policy->related_cpus), sizeof(*sl),
+		     GFP_KERNEL);
+	if (!sl) {
+		kfree(ppol);
+		return ERR_PTR(-ENOMEM);
+	}
+	ppol->sl = sl;
+
+	init_timer(&ppol->policy_slack_timer);
+	ppol->policy_slack_timer.function = cpufreq_interactive_nop_timer;
+	hrtimer_init(&ppol->notif_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	ppol->notif_timer.function = cpufreq_interactive_hrtimer;
+	init_irq_work(&ppol->irq_work, irq_work);
+	spin_lock_init(&ppol->irq_work_lock);
+	spin_lock_init(&ppol->load_lock);
+	spin_lock_init(&ppol->target_freq_lock);
+	init_rwsem(&ppol->enable_sem);
+
+	for_each_cpu(i, policy->related_cpus)
+		per_cpu(polinfo, i) = ppol;
+	return ppol;
+}
+
+/* This function is not multithread-safe. */
+static void free_policyinfo(int cpu)
+{
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+	int j;
+
+	if (!ppol)
+		return;
+
+	for_each_possible_cpu(j)
+		if (per_cpu(polinfo, j) == ppol)
+			per_cpu(polinfo, cpu) = NULL;
+	kfree(ppol->cached_tunables);
+	kfree(ppol->sl);
+	kfree(ppol);
+}
+
+static struct cpufreq_interactive_tunables *get_tunables(
+				struct cpufreq_interactive_policyinfo *ppol)
+{
+	if (have_governor_per_policy())
+		return ppol->cached_tunables;
+	else
+		return cached_common_tunables;
+}
+
 /* Interactive Governor callbacks */
 struct interactive_governor {
 	struct cpufreq_governor gov;
@@ -1033,305 +1699,207 @@
 
 #define CPU_FREQ_GOV_INTERACTIVE	(&interactive_gov.gov)
 
-static void irq_work(struct irq_work *irq_work)
-{
-	struct interactive_cpu *icpu = container_of(irq_work, struct
-						    interactive_cpu, irq_work);
-
-	cpufreq_interactive_update(icpu);
-	icpu->work_in_progress = false;
-}
-
-static void update_util_handler(struct update_util_data *data, u64 time,
-				unsigned int flags)
-{
-	struct interactive_cpu *icpu = container_of(data,
-					struct interactive_cpu, update_util);
-	struct interactive_policy *ipolicy = icpu->ipolicy;
-	struct interactive_tunables *tunables = ipolicy->tunables;
-	u64 delta_ns;
-
-	/*
-	 * The irq-work may not be allowed to be queued up right now.
-	 * Possible reasons:
-	 * - Work has already been queued up or is in progress.
-	 * - It is too early (too little time from the previous sample).
-	 */
-	if (icpu->work_in_progress)
-		return;
-
-	delta_ns = time - icpu->last_sample_time;
-	if ((s64)delta_ns < tunables->sampling_rate * NSEC_PER_USEC)
-		return;
-
-	icpu->last_sample_time = time;
-	icpu->next_sample_jiffies = usecs_to_jiffies(tunables->sampling_rate) +
-				    jiffies;
-
-	icpu->work_in_progress = true;
-	irq_work_queue(&icpu->irq_work);
-}
-
-static void gov_set_update_util(struct interactive_policy *ipolicy)
-{
-	struct cpufreq_policy *policy = ipolicy->policy;
-	struct interactive_cpu *icpu;
-	int cpu;
-
-	for_each_cpu(cpu, policy->cpus) {
-		icpu = &per_cpu(interactive_cpu, cpu);
-
-		icpu->last_sample_time = 0;
-		icpu->next_sample_jiffies = 0;
-		cpufreq_add_update_util_hook(cpu, &icpu->update_util,
-					     update_util_handler);
-	}
-}
-
-static inline void gov_clear_update_util(struct cpufreq_policy *policy)
-{
-	int i;
-
-	for_each_cpu(i, policy->cpus)
-		cpufreq_remove_update_util_hook(i);
-
-	synchronize_sched();
-}
-
-static void icpu_cancel_work(struct interactive_cpu *icpu)
-{
-	irq_work_sync(&icpu->irq_work);
-	icpu->work_in_progress = false;
-	del_timer_sync(&icpu->slack_timer);
-}
-
-static struct interactive_policy *
-interactive_policy_alloc(struct cpufreq_policy *policy)
-{
-	struct interactive_policy *ipolicy;
-
-	ipolicy = kzalloc(sizeof(*ipolicy), GFP_KERNEL);
-	if (!ipolicy)
-		return NULL;
-
-	ipolicy->policy = policy;
-
-	return ipolicy;
-}
-
-static void interactive_policy_free(struct interactive_policy *ipolicy)
-{
-	kfree(ipolicy);
-}
-
-static struct interactive_tunables *
-interactive_tunables_alloc(struct interactive_policy *ipolicy)
-{
-	struct interactive_tunables *tunables;
-
-	tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
-	if (!tunables)
-		return NULL;
-
-	gov_attr_set_init(&tunables->attr_set, &ipolicy->tunables_hook);
-	if (!have_governor_per_policy())
-		global_tunables = tunables;
-
-	ipolicy->tunables = tunables;
-
-	return tunables;
-}
-
-static void interactive_tunables_free(struct interactive_tunables *tunables)
-{
-	if (!have_governor_per_policy())
-		global_tunables = NULL;
-
-	kfree(tunables);
-}
-
 int cpufreq_interactive_init(struct cpufreq_policy *policy)
 {
-	struct interactive_policy *ipolicy;
-	struct interactive_tunables *tunables;
-	int ret;
+	int rc;
+	struct cpufreq_interactive_policyinfo *ppol;
+	struct cpufreq_interactive_tunables *tunables;
 
-	/* State should be equivalent to EXIT */
-	if (policy->governor_data)
-		return -EBUSY;
+	if (have_governor_per_policy())
+		tunables = policy->governor_data;
+	else
+		tunables = common_tunables;
 
-	ipolicy = interactive_policy_alloc(policy);
-	if (!ipolicy)
-		return -ENOMEM;
+	ppol = get_policyinfo(policy);
+	if (IS_ERR(ppol))
+		return PTR_ERR(ppol);
 
-	mutex_lock(&global_tunables_lock);
-
-	if (global_tunables) {
-		if (WARN_ON(have_governor_per_policy())) {
-			ret = -EINVAL;
-			goto free_int_policy;
-		}
-
-		policy->governor_data = ipolicy;
-		ipolicy->tunables = global_tunables;
-
-		gov_attr_set_get(&global_tunables->attr_set,
-				 &ipolicy->tunables_hook);
-		goto out;
+	if (have_governor_per_policy()) {
+		WARN_ON(tunables);
+	} else if (tunables) {
+		tunables->usage_count++;
+		cpumask_or(&controlled_cpus, &controlled_cpus,
+			   policy->related_cpus);
+		sched_update_freq_max_load(policy->related_cpus);
+		policy->governor_data = tunables;
+		return 0;
 	}
 
-	tunables = interactive_tunables_alloc(ipolicy);
+	tunables = get_tunables(ppol);
 	if (!tunables) {
-		ret = -ENOMEM;
-		goto free_int_policy;
+		tunables = alloc_tunable(policy);
+		if (IS_ERR(tunables))
+			return PTR_ERR(tunables);
 	}
 
-	tunables->hispeed_freq = policy->max;
-	tunables->above_hispeed_delay = default_above_hispeed_delay;
-	tunables->nabove_hispeed_delay =
-		ARRAY_SIZE(default_above_hispeed_delay);
-	tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
-	tunables->target_loads = default_target_loads;
-	tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
-	tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
-	tunables->boostpulse_duration = DEFAULT_MIN_SAMPLE_TIME;
-	tunables->sampling_rate = DEFAULT_SAMPLING_RATE;
-	tunables->timer_slack = DEFAULT_TIMER_SLACK;
-	update_slack_delay(tunables);
+	tunables->usage_count = 1;
+	policy->governor_data = tunables;
+	if (!have_governor_per_policy())
+		common_tunables = tunables;
 
-	spin_lock_init(&tunables->target_loads_lock);
-	spin_lock_init(&tunables->above_hispeed_delay_lock);
+	rc = sysfs_create_group(get_governor_parent_kobj(policy),
+			get_sysfs_attr());
+	if (rc) {
+		kfree(tunables);
+		policy->governor_data = NULL;
+		if (!have_governor_per_policy())
+			common_tunables = NULL;
+		return rc;
+	}
 
-	policy->governor_data = ipolicy;
-
-	ret = kobject_init_and_add(&tunables->attr_set.kobj,
-				   &interactive_tunables_ktype,
-				   get_governor_parent_kobj(policy), "%s",
-				   interactive_gov.gov.name);
-	if (ret)
-		goto fail;
-
-	/* One time initialization for governor */
-	if (!interactive_gov.usage_count++) {
-		idle_notifier_register(&cpufreq_interactive_idle_nb);
+	if (!interactive_gov.usage_count++)
 		cpufreq_register_notifier(&cpufreq_notifier_block,
-					  CPUFREQ_TRANSITION_NOTIFIER);
-	}
+				CPUFREQ_TRANSITION_NOTIFIER);
 
- out:
-	mutex_unlock(&global_tunables_lock);
+	if (tunables->use_sched_load)
+		cpufreq_interactive_enable_sched_input(tunables);
+
+	cpumask_or(&controlled_cpus, &controlled_cpus,
+		   policy->related_cpus);
+	sched_update_freq_max_load(policy->related_cpus);
+
+	if (have_governor_per_policy())
+		ppol->cached_tunables = tunables;
+	else
+		cached_common_tunables = tunables;
+
 	return 0;
-
- fail:
-	policy->governor_data = NULL;
-	interactive_tunables_free(tunables);
-
- free_int_policy:
-	mutex_unlock(&global_tunables_lock);
-
-	interactive_policy_free(ipolicy);
-	pr_err("governor initialization failed (%d)\n", ret);
-
-	return ret;
 }
 
 void cpufreq_interactive_exit(struct cpufreq_policy *policy)
 {
-	struct interactive_policy *ipolicy = policy->governor_data;
-	struct interactive_tunables *tunables = ipolicy->tunables;
-	unsigned int count;
+	struct cpufreq_interactive_tunables *tunables;
 
-	mutex_lock(&global_tunables_lock);
+	if (have_governor_per_policy())
+		tunables = policy->governor_data;
+	else
+		tunables = common_tunables;
 
-	/* Last policy using the governor ? */
-	if (!--interactive_gov.usage_count) {
-		cpufreq_unregister_notifier(&cpufreq_notifier_block,
-					    CPUFREQ_TRANSITION_NOTIFIER);
-		idle_notifier_unregister(&cpufreq_interactive_idle_nb);
+	BUG_ON(!tunables);
+
+	cpumask_andnot(&controlled_cpus, &controlled_cpus,
+		       policy->related_cpus);
+	sched_update_freq_max_load(cpu_possible_mask);
+	if (!--tunables->usage_count) {
+		/* Last policy using the governor ? */
+		if (!--interactive_gov.usage_count)
+			cpufreq_unregister_notifier(&cpufreq_notifier_block,
+					CPUFREQ_TRANSITION_NOTIFIER);
+
+		sysfs_remove_group(get_governor_parent_kobj(policy),
+				get_sysfs_attr());
+
+		common_tunables = NULL;
 	}
 
-	count = gov_attr_set_put(&tunables->attr_set, &ipolicy->tunables_hook);
 	policy->governor_data = NULL;
-	if (!count)
-		interactive_tunables_free(tunables);
 
-	mutex_unlock(&global_tunables_lock);
-
-	interactive_policy_free(ipolicy);
+	if (tunables->use_sched_load)
+		cpufreq_interactive_disable_sched_input(tunables);
 }
 
 int cpufreq_interactive_start(struct cpufreq_policy *policy)
 {
-	struct interactive_policy *ipolicy = policy->governor_data;
-	struct interactive_cpu *icpu;
-	unsigned int cpu;
+	struct cpufreq_interactive_policyinfo *ppol;
+	struct cpufreq_frequency_table *freq_table;
+	struct cpufreq_interactive_tunables *tunables;
 
-	for_each_cpu(cpu, policy->cpus) {
-		icpu = &per_cpu(interactive_cpu, cpu);
+	if (have_governor_per_policy())
+		tunables = policy->governor_data;
+	else
+		tunables = common_tunables;
 
-		icpu->target_freq = policy->cur;
-		icpu->floor_freq = icpu->target_freq;
-		icpu->pol_floor_val_time = ktime_to_us(ktime_get());
-		icpu->loc_floor_val_time = icpu->pol_floor_val_time;
-		icpu->pol_hispeed_val_time = icpu->pol_floor_val_time;
-		icpu->loc_hispeed_val_time = icpu->pol_floor_val_time;
+	BUG_ON(!tunables);
+	mutex_lock(&gov_lock);
 
-		down_write(&icpu->enable_sem);
-		icpu->ipolicy = ipolicy;
-		up_write(&icpu->enable_sem);
+	freq_table = policy->freq_table;
+	if (!tunables->hispeed_freq)
+		tunables->hispeed_freq = policy->max;
 
-		slack_timer_resched(icpu, cpu, false);
-	}
+	ppol = per_cpu(polinfo, policy->cpu);
+	ppol->policy = policy;
+	ppol->target_freq = policy->cur;
+	ppol->freq_table = freq_table;
+	ppol->p_nolim = *policy;
+	ppol->p_nolim.min = policy->cpuinfo.min_freq;
+	ppol->p_nolim.max = policy->cpuinfo.max_freq;
+	ppol->floor_freq = ppol->target_freq;
+	ppol->floor_validate_time = ktime_to_us(ktime_get());
+	ppol->hispeed_validate_time = ppol->floor_validate_time;
+	ppol->min_freq = policy->min;
+	ppol->reject_notification = true;
+	ppol->notif_pending = false;
+	down_write(&ppol->enable_sem);
+	del_timer_sync(&ppol->policy_slack_timer);
+	ppol->last_evaluated_jiffy = get_jiffies_64();
+	cpufreq_interactive_timer_start(tunables, policy->cpu);
+	ppol->governor_enabled = 1;
+	up_write(&ppol->enable_sem);
+	ppol->reject_notification = false;
 
-	gov_set_update_util(ipolicy);
+	mutex_unlock(&gov_lock);
 	return 0;
 }
 
 void cpufreq_interactive_stop(struct cpufreq_policy *policy)
 {
-	struct interactive_policy *ipolicy = policy->governor_data;
-	struct interactive_cpu *icpu;
-	unsigned int cpu;
+	struct cpufreq_interactive_policyinfo *ppol;
+	struct cpufreq_interactive_tunables *tunables;
 
-	gov_clear_update_util(ipolicy->policy);
+	if (have_governor_per_policy())
+		tunables = policy->governor_data;
+	else
+		tunables = common_tunables;
 
-	for_each_cpu(cpu, policy->cpus) {
-		icpu = &per_cpu(interactive_cpu, cpu);
+	BUG_ON(!tunables);
 
-		icpu_cancel_work(icpu);
+	mutex_lock(&gov_lock);
 
-		down_write(&icpu->enable_sem);
-		icpu->ipolicy = NULL;
-		up_write(&icpu->enable_sem);
-	}
+	ppol = per_cpu(polinfo, policy->cpu);
+	ppol->reject_notification = true;
+	down_write(&ppol->enable_sem);
+	ppol->governor_enabled = 0;
+	ppol->target_freq = 0;
+	gov_clear_update_util(ppol->policy);
+	irq_work_sync(&ppol->irq_work);
+	ppol->work_in_progress = false;
+	del_timer_sync(&ppol->policy_slack_timer);
+	up_write(&ppol->enable_sem);
+	ppol->reject_notification = false;
+
+	mutex_unlock(&gov_lock);
 }
 
 void cpufreq_interactive_limits(struct cpufreq_policy *policy)
 {
-	struct interactive_cpu *icpu;
-	unsigned int cpu;
-	unsigned long flags;
+	struct cpufreq_interactive_policyinfo *ppol;
+	struct cpufreq_interactive_tunables *tunables;
 
-	cpufreq_policy_apply_limits(policy);
+	if (have_governor_per_policy())
+		tunables = policy->governor_data;
+	else
+		tunables = common_tunables;
 
-	for_each_cpu(cpu, policy->cpus) {
-		icpu = &per_cpu(interactive_cpu, cpu);
+	BUG_ON(!tunables);
+	ppol = per_cpu(polinfo, policy->cpu);
 
-		spin_lock_irqsave(&icpu->target_freq_lock, flags);
+	__cpufreq_driver_target(policy,
+			ppol->target_freq, CPUFREQ_RELATION_L);
 
-		if (policy->max < icpu->target_freq)
-			icpu->target_freq = policy->max;
-		else if (policy->min > icpu->target_freq)
-			icpu->target_freq = policy->min;
-
-		spin_unlock_irqrestore(&icpu->target_freq_lock, flags);
+	down_read(&ppol->enable_sem);
+	if (ppol->governor_enabled) {
+		if (policy->min < ppol->min_freq)
+			cpufreq_interactive_timer_resched(policy->cpu,
+							  true);
+		ppol->min_freq = policy->min;
 	}
+	up_read(&ppol->enable_sem);
 }
 
 static struct interactive_governor interactive_gov = {
 	.gov = {
 		.name			= "interactive",
-		.max_transition_latency	= TRANSITION_LATENCY_LIMIT,
+		.max_transition_latency	= 10000000,
 		.owner			= THIS_MODULE,
 		.init			= cpufreq_interactive_init,
 		.exit			= cpufreq_interactive_exit,
@@ -1341,47 +1909,24 @@
 	}
 };
 
-static void cpufreq_interactive_nop_timer(unsigned long data)
-{
-	/*
-	 * The purpose of slack-timer is to wake up the CPU from IDLE, in order
-	 * to decrease its frequency if it is not set to minimum already.
-	 *
-	 * This is important for platforms where CPU with higher frequencies
-	 * consume higher power even at IDLE.
-	 */
-}
-
 static int __init cpufreq_interactive_gov_init(void)
 {
-	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
-	struct interactive_cpu *icpu;
-	unsigned int cpu;
-
-	for_each_possible_cpu(cpu) {
-		icpu = &per_cpu(interactive_cpu, cpu);
-
-		init_irq_work(&icpu->irq_work, irq_work);
-		spin_lock_init(&icpu->load_lock);
-		spin_lock_init(&icpu->target_freq_lock);
-		init_rwsem(&icpu->enable_sem);
-
-		/* Initialize per-cpu slack-timer */
-		init_timer_pinned(&icpu->slack_timer);
-		icpu->slack_timer.function = cpufreq_interactive_nop_timer;
-	}
+	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
 
 	spin_lock_init(&speedchange_cpumask_lock);
-	speedchange_task = kthread_create(cpufreq_interactive_speedchange_task,
-					  NULL, "cfinteractive");
+	mutex_init(&gov_lock);
+	mutex_init(&sched_lock);
+	speedchange_task =
+		kthread_create(cpufreq_interactive_speedchange_task, NULL,
+			       "cfinteractive");
 	if (IS_ERR(speedchange_task))
 		return PTR_ERR(speedchange_task);
 
 	sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
 	get_task_struct(speedchange_task);
 
-	/* wake up so the thread does not look hung to the freezer */
-	wake_up_process(speedchange_task);
+	/* NB: wake up so the thread does not look hung to the freezer */
+	wake_up_process_no_notif(speedchange_task);
 
 	return cpufreq_register_governor(CPU_FREQ_GOV_INTERACTIVE);
 }
@@ -1399,12 +1944,19 @@
 
 static void __exit cpufreq_interactive_gov_exit(void)
 {
+	int cpu;
+
 	cpufreq_unregister_governor(CPU_FREQ_GOV_INTERACTIVE);
 	kthread_stop(speedchange_task);
 	put_task_struct(speedchange_task);
+
+	for_each_possible_cpu(cpu)
+		free_policyinfo(cpu);
 }
+
 module_exit(cpufreq_interactive_gov_exit);
 
 MODULE_AUTHOR("Mike Chan <mike@android.com>");
-MODULE_DESCRIPTION("'cpufreq_interactive' - A dynamic cpufreq governor for Latency sensitive workloads");
+MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
+	"Latency sensitive workloads");
 MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/qcom-cpufreq.c b/drivers/cpufreq/qcom-cpufreq.c
new file mode 100644
index 0000000..f968ffd9
--- /dev/null
+++ b/drivers/cpufreq/qcom-cpufreq.c
@@ -0,0 +1,503 @@
+/* drivers/cpufreq/qcom-cpufreq.c
+ *
+ * MSM architecture cpufreq driver
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2017, The Linux Foundation. All rights reserved.
+ * Author: Mike A. Chan <mikechan@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cpufreq.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/suspend.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <trace/events/power.h>
+
+static DEFINE_MUTEX(l2bw_lock);
+
+static struct clk *cpu_clk[NR_CPUS];
+static struct clk *l2_clk;
+static DEFINE_PER_CPU(struct cpufreq_frequency_table *, freq_table);
+static bool hotplug_ready;
+
+struct cpufreq_suspend_t {
+	struct mutex suspend_mutex;
+	int device_suspended;
+};
+
+static DEFINE_PER_CPU(struct cpufreq_suspend_t, suspend_data);
+
+static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq,
+			unsigned int index)
+{
+	int ret = 0;
+	struct cpufreq_freqs freqs;
+	unsigned long rate;
+
+	freqs.old = policy->cur;
+	freqs.new = new_freq;
+	freqs.cpu = policy->cpu;
+
+	trace_cpu_frequency_switch_start(freqs.old, freqs.new, policy->cpu);
+	cpufreq_freq_transition_begin(policy, &freqs);
+
+	rate = new_freq * 1000;
+	rate = clk_round_rate(cpu_clk[policy->cpu], rate);
+	ret = clk_set_rate(cpu_clk[policy->cpu], rate);
+	cpufreq_freq_transition_end(policy, &freqs, ret);
+	if (!ret)
+		trace_cpu_frequency_switch_end(policy->cpu);
+
+	return ret;
+}
+
+static int msm_cpufreq_target(struct cpufreq_policy *policy,
+				unsigned int target_freq,
+				unsigned int relation)
+{
+	int ret = 0;
+	int index;
+	struct cpufreq_frequency_table *table;
+
+	mutex_lock(&per_cpu(suspend_data, policy->cpu).suspend_mutex);
+
+	if (target_freq == policy->cur)
+		goto done;
+
+	if (per_cpu(suspend_data, policy->cpu).device_suspended) {
+		pr_debug("cpufreq: cpu%d scheduling frequency change in suspend.\n",
+			 policy->cpu);
+		ret = -EFAULT;
+		goto done;
+	}
+
+	table = policy->freq_table;
+	if (!table) {
+		pr_err("cpufreq: Failed to get frequency table for CPU%u\n",
+		       policy->cpu);
+		ret = -ENODEV;
+		goto done;
+	}
+	index = cpufreq_frequency_table_target(policy, target_freq, relation);
+
+	pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n",
+		policy->cpu, target_freq, relation,
+		policy->min, policy->max, table[index].frequency);
+
+	ret = set_cpu_freq(policy, table[index].frequency,
+			   table[index].driver_data);
+done:
+	mutex_unlock(&per_cpu(suspend_data, policy->cpu).suspend_mutex);
+	return ret;
+}
+
+static int msm_cpufreq_verify(struct cpufreq_policy *policy)
+{
+	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+			policy->cpuinfo.max_freq);
+	return 0;
+}
+
+static unsigned int msm_cpufreq_get_freq(unsigned int cpu)
+{
+	return clk_get_rate(cpu_clk[cpu]) / 1000;
+}
+
+static int msm_cpufreq_init(struct cpufreq_policy *policy)
+{
+	int cur_freq;
+	int index;
+	int ret = 0;
+	struct cpufreq_frequency_table *table =
+			per_cpu(freq_table, policy->cpu);
+	int cpu;
+
+	/*
+	 * In some SoC, some cores are clocked by same source, and their
+	 * frequencies can not be changed independently. Find all other
+	 * CPUs that share same clock, and mark them as controlled by
+	 * same policy.
+	 */
+	for_each_possible_cpu(cpu)
+		if (cpu_clk[cpu] == cpu_clk[policy->cpu])
+			cpumask_set_cpu(cpu, policy->cpus);
+
+	ret = cpufreq_table_validate_and_show(policy, table);
+	if (ret) {
+		pr_err("cpufreq: failed to get policy min/max\n");
+		return ret;
+	}
+
+	cur_freq = clk_get_rate(cpu_clk[policy->cpu])/1000;
+
+	index =  cpufreq_frequency_table_target(policy, cur_freq,
+						CPUFREQ_RELATION_H);
+	/*
+	 * Call set_cpu_freq unconditionally so that when cpu is set to
+	 * online, frequency limit will always be updated.
+	 */
+	ret = set_cpu_freq(policy, table[index].frequency,
+			   table[index].driver_data);
+	if (ret)
+		return ret;
+	pr_debug("cpufreq: cpu%d init at %d switching to %d\n",
+			policy->cpu, cur_freq, table[index].frequency);
+	policy->cur = table[index].frequency;
+
+	return 0;
+}
+
+static int qcom_cpufreq_dead_cpu(unsigned int cpu)
+{
+	/* Fail hotplug until this driver can get CPU clocks */
+	if (!hotplug_ready)
+		return -EINVAL;
+
+	clk_unprepare(cpu_clk[cpu]);
+	clk_unprepare(l2_clk);
+	return 0;
+}
+
+static int qcom_cpufreq_up_cpu(unsigned int cpu)
+{
+	int rc;
+
+	/* Fail hotplug until this driver can get CPU clocks */
+	if (!hotplug_ready)
+		return -EINVAL;
+
+	rc = clk_prepare(l2_clk);
+	if (rc < 0)
+		return rc;
+	rc = clk_prepare(cpu_clk[cpu]);
+	if (rc < 0)
+		clk_unprepare(l2_clk);
+	return rc;
+}
+
+static int qcom_cpufreq_dying_cpu(unsigned int cpu)
+{
+	/* Fail hotplug until this driver can get CPU clocks */
+	if (!hotplug_ready)
+		return -EINVAL;
+
+	clk_disable(cpu_clk[cpu]);
+	clk_disable(l2_clk);
+	return 0;
+}
+
+static int qcom_cpufreq_starting_cpu(unsigned int cpu)
+{
+	int rc;
+
+	/* Fail hotplug until this driver can get CPU clocks */
+	if (!hotplug_ready)
+		return -EINVAL;
+
+	rc = clk_enable(l2_clk);
+	if (rc < 0)
+		return rc;
+	rc = clk_enable(cpu_clk[cpu]);
+	if (rc < 0)
+		clk_disable(l2_clk);
+	return rc;
+}
+
+static int msm_cpufreq_suspend(void)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		mutex_lock(&per_cpu(suspend_data, cpu).suspend_mutex);
+		per_cpu(suspend_data, cpu).device_suspended = 1;
+		mutex_unlock(&per_cpu(suspend_data, cpu).suspend_mutex);
+	}
+
+	return NOTIFY_DONE;
+}
+
+static int msm_cpufreq_resume(void)
+{
+	int cpu, ret;
+	struct cpufreq_policy policy;
+
+	for_each_possible_cpu(cpu) {
+		per_cpu(suspend_data, cpu).device_suspended = 0;
+	}
+
+	/*
+	 * Freq request might be rejected during suspend, resulting
+	 * in policy->cur violating min/max constraint.
+	 * Correct the frequency as soon as possible.
+	 */
+	get_online_cpus();
+	for_each_online_cpu(cpu) {
+		ret = cpufreq_get_policy(&policy, cpu);
+		if (ret)
+			continue;
+		if (policy.cur <= policy.max && policy.cur >= policy.min)
+			continue;
+		ret = cpufreq_update_policy(cpu);
+		if (ret)
+			pr_info("cpufreq: Current frequency violates policy min/max for CPU%d\n",
+			       cpu);
+		else
+			pr_info("cpufreq: Frequency violation fixed for CPU%d\n",
+				cpu);
+	}
+	put_online_cpus();
+
+	return NOTIFY_DONE;
+}
+
+static int msm_cpufreq_pm_event(struct notifier_block *this,
+				unsigned long event, void *ptr)
+{
+	switch (event) {
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+		return msm_cpufreq_resume();
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		return msm_cpufreq_suspend();
+	default:
+		return NOTIFY_DONE;
+	}
+}
+
+static struct notifier_block msm_cpufreq_pm_notifier = {
+	.notifier_call = msm_cpufreq_pm_event,
+};
+
+static struct freq_attr *msm_freq_attr[] = {
+	&cpufreq_freq_attr_scaling_available_freqs,
+	NULL,
+};
+
+static struct cpufreq_driver msm_cpufreq_driver = {
+	/* lps calculations are handled here. */
+	.flags		= CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+	.init		= msm_cpufreq_init,
+	.verify		= msm_cpufreq_verify,
+	.target		= msm_cpufreq_target,
+	.get		= msm_cpufreq_get_freq,
+	.name		= "msm",
+	.attr		= msm_freq_attr,
+};
+
+static struct cpufreq_frequency_table *cpufreq_parse_dt(struct device *dev,
+						char *tbl_name, int cpu)
+{
+	int ret, nf, i, j;
+	u32 *data;
+	struct cpufreq_frequency_table *ftbl;
+
+	/* Parse list of usable CPU frequencies. */
+	if (!of_find_property(dev->of_node, tbl_name, &nf))
+		return ERR_PTR(-EINVAL);
+	nf /= sizeof(*data);
+
+	if (nf == 0)
+		return ERR_PTR(-EINVAL);
+
+	data = devm_kzalloc(dev, nf * sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return ERR_PTR(-ENOMEM);
+
+	ret = of_property_read_u32_array(dev->of_node, tbl_name, data, nf);
+	if (ret)
+		return ERR_PTR(ret);
+
+	ftbl = devm_kzalloc(dev, (nf + 1) * sizeof(*ftbl), GFP_KERNEL);
+	if (!ftbl)
+		return ERR_PTR(-ENOMEM);
+
+	j = 0;
+	for (i = 0; i < nf; i++) {
+		unsigned long f;
+
+		f = clk_round_rate(cpu_clk[cpu], data[i] * 1000);
+		if (IS_ERR_VALUE(f))
+			break;
+		f /= 1000;
+
+		/*
+		 * Don't repeat frequencies if they round up to the same clock
+		 * frequency.
+		 *
+		 */
+		if (j > 0 && f <= ftbl[j - 1].frequency)
+			continue;
+
+		ftbl[j].driver_data = j;
+		ftbl[j].frequency = f;
+		j++;
+	}
+
+	ftbl[j].driver_data = j;
+	ftbl[j].frequency = CPUFREQ_TABLE_END;
+
+	devm_kfree(dev, data);
+
+	return ftbl;
+}
+
+static int msm_cpufreq_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	char clk_name[] = "cpu??_clk";
+	char tbl_name[] = "qcom,cpufreq-table-??";
+	struct clk *c;
+	int cpu, ret;
+	struct cpufreq_frequency_table *ftbl;
+
+	l2_clk = devm_clk_get(dev, "l2_clk");
+	if (IS_ERR(l2_clk))
+		l2_clk = NULL;
+
+	for_each_possible_cpu(cpu) {
+		snprintf(clk_name, sizeof(clk_name), "cpu%d_clk", cpu);
+		c = devm_clk_get(dev, clk_name);
+		if (cpu == 0 && IS_ERR(c))
+			return PTR_ERR(c);
+		else if (IS_ERR(c))
+			c = cpu_clk[cpu-1];
+		cpu_clk[cpu] = c;
+	}
+	hotplug_ready = true;
+
+	/* Use per-policy governor tunable for some targets */
+	if (of_property_read_bool(dev->of_node, "qcom,governor-per-policy"))
+		msm_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
+
+	/* Parse commong cpufreq table for all CPUs */
+	ftbl = cpufreq_parse_dt(dev, "qcom,cpufreq-table", 0);
+	if (!IS_ERR(ftbl)) {
+		for_each_possible_cpu(cpu)
+			per_cpu(freq_table, cpu) = ftbl;
+		return 0;
+	}
+
+	/*
+	 * No common table. Parse individual tables for each unique
+	 * CPU clock.
+	 */
+	for_each_possible_cpu(cpu) {
+		snprintf(tbl_name, sizeof(tbl_name),
+			 "qcom,cpufreq-table-%d", cpu);
+		ftbl = cpufreq_parse_dt(dev, tbl_name, cpu);
+
+		/* CPU0 must contain freq table */
+		if (cpu == 0 && IS_ERR(ftbl)) {
+			dev_err(dev, "Failed to parse CPU0's freq table\n");
+			return PTR_ERR(ftbl);
+		}
+		if (cpu == 0) {
+			per_cpu(freq_table, cpu) = ftbl;
+			continue;
+		}
+
+		if (cpu_clk[cpu] != cpu_clk[cpu - 1] && IS_ERR(ftbl)) {
+			dev_err(dev, "Failed to parse CPU%d's freq table\n",
+				cpu);
+			return PTR_ERR(ftbl);
+		}
+
+		/* Use previous CPU's table if it shares same clock */
+		if (cpu_clk[cpu] == cpu_clk[cpu - 1]) {
+			if (!IS_ERR(ftbl)) {
+				dev_warn(dev, "Conflicting tables for CPU%d\n",
+					 cpu);
+				devm_kfree(dev, ftbl);
+			}
+			ftbl = per_cpu(freq_table, cpu - 1);
+		}
+		per_cpu(freq_table, cpu) = ftbl;
+	}
+
+	ret = register_pm_notifier(&msm_cpufreq_pm_notifier);
+	if (ret)
+		return ret;
+
+	ret = cpufreq_register_driver(&msm_cpufreq_driver);
+	if (ret)
+		unregister_pm_notifier(&msm_cpufreq_pm_notifier);
+
+	return ret;
+}
+
+static const struct of_device_id msm_cpufreq_match_table[] = {
+	{ .compatible = "qcom,msm-cpufreq" },
+	{}
+};
+
+static struct platform_driver msm_cpufreq_plat_driver = {
+	.probe = msm_cpufreq_probe,
+	.driver = {
+		.name = "msm-cpufreq",
+		.of_match_table = msm_cpufreq_match_table,
+	},
+};
+
+static int __init msm_cpufreq_register(void)
+{
+	int cpu, rc;
+
+	for_each_possible_cpu(cpu) {
+		mutex_init(&(per_cpu(suspend_data, cpu).suspend_mutex));
+		per_cpu(suspend_data, cpu).device_suspended = 0;
+	}
+
+	rc = platform_driver_register(&msm_cpufreq_plat_driver);
+	if (rc < 0) {
+		/* Unblock hotplug if msm-cpufreq probe fails */
+		cpuhp_remove_state_nocalls(CPUHP_QCOM_CPUFREQ_PREPARE);
+		cpuhp_remove_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING);
+		for_each_possible_cpu(cpu)
+			mutex_destroy(&(per_cpu(suspend_data, cpu).
+					suspend_mutex));
+		return rc;
+	}
+
+	return 0;
+}
+
+subsys_initcall(msm_cpufreq_register);
+
+static int __init msm_cpufreq_early_register(void)
+{
+	int ret;
+
+	ret = cpuhp_setup_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING,
+					"AP_QCOM_CPUFREQ_STARTING",
+					qcom_cpufreq_starting_cpu,
+					qcom_cpufreq_dying_cpu);
+	if (ret)
+		return ret;
+
+	ret = cpuhp_setup_state_nocalls(CPUHP_QCOM_CPUFREQ_PREPARE,
+					"QCOM_CPUFREQ_PREPARE",
+					qcom_cpufreq_up_cpu,
+					qcom_cpufreq_dead_cpu);
+	if (!ret)
+		return ret;
+	cpuhp_remove_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING);
+	return ret;
+}
+core_initcall(msm_cpufreq_early_register);
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index cafa633..f796e36 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -283,11 +283,14 @@
  */
 int ccp_enqueue_cmd(struct ccp_cmd *cmd)
 {
-	struct ccp_device *ccp = ccp_get_device();
+	struct ccp_device *ccp;
 	unsigned long flags;
 	unsigned int i;
 	int ret;
 
+	/* Some commands might need to be sent to a specific device */
+	ccp = cmd->ccp ? cmd->ccp : ccp_get_device();
+
 	if (!ccp)
 		return -ENODEV;
 
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index e5d9278..8d0eeb4 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -390,6 +390,7 @@
 			goto err;
 
 		ccp_cmd = &cmd->ccp_cmd;
+		ccp_cmd->ccp = chan->ccp;
 		ccp_pt = &ccp_cmd->u.passthru_nomap;
 		ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
 		ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 286447a..152552d 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -334,6 +334,7 @@
 	int rc = VM_FAULT_SIGBUS;
 	phys_addr_t phys;
 	pfn_t pfn;
+	unsigned int fault_size = PAGE_SIZE;
 
 	if (check_vma(dax_dev, vma, __func__))
 		return VM_FAULT_SIGBUS;
@@ -344,6 +345,9 @@
 		return VM_FAULT_SIGBUS;
 	}
 
+	if (fault_size != dax_region->align)
+		return VM_FAULT_SIGBUS;
+
 	phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
 	if (phys == -1) {
 		dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
@@ -389,6 +393,7 @@
 	phys_addr_t phys;
 	pgoff_t pgoff;
 	pfn_t pfn;
+	unsigned int fault_size = PMD_SIZE;
 
 	if (check_vma(dax_dev, vma, __func__))
 		return VM_FAULT_SIGBUS;
@@ -405,6 +410,16 @@
 		return VM_FAULT_SIGBUS;
 	}
 
+	if (fault_size < dax_region->align)
+		return VM_FAULT_SIGBUS;
+	else if (fault_size > dax_region->align)
+		return VM_FAULT_FALLBACK;
+
+	/* if we are outside of the VMA */
+	if (pmd_addr < vma->vm_start ||
+			(pmd_addr + PMD_SIZE) > vma->vm_end)
+		return VM_FAULT_SIGBUS;
+
 	pgoff = linear_page_index(vma, pmd_addr);
 	phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
 	if (phys == -1) {
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 3c24e57..6476c5e 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -72,6 +72,79 @@
 	  through sysfs entries. The passive governor recommends that
 	  devfreq device uses the OPP table to get the frequency/voltage.
 
+config DEVFREQ_GOV_CPUFREQ
+	tristate "CPUfreq"
+	depends on CPU_FREQ
+	help
+	  Chooses frequency based on the online CPUs' current frequency and a
+	  CPU frequency to device frequency mapping table(s). This governor
+	  can be useful for controlling devices such as DDR, cache, CCI, etc.
+
+config QCOM_BIMC_BWMON
+	tristate "QCOM BIMC Bandwidth monitor hardware"
+	depends on ARCH_QCOM
+	help
+	  The BIMC Bandwidth monitor hardware allows for monitoring the
+	  traffic coming from each master port connected to the BIMC. It also
+	  has the capability to raise an IRQ when the count exceeds a
+	  programmable limit.
+
+config ARM_MEMLAT_MON
+	tristate "ARM CPU Memory Latency monitor hardware"
+	depends on ARCH_QCOM
+	help
+	  The PMU present on these ARM cores allow for the use of counters to
+	  monitor the memory latency characteristics of an ARM CPU workload.
+	  This driver uses these counters to implement the APIs needed by
+	  the mem_latency devfreq governor.
+
+config QCOMCCI_HWMON
+	tristate "QTI CCI Cache monitor hardware"
+	depends on ARCH_QCOM
+	help
+	  QTI CCI has additional PMU counters that can be used to monitor
+	  cache requests. QTI CCI hardware monitor device configures these
+	  registers to monitor cache and inform governor. It can also set an
+	  IRQ when count exceeds a programmable limit.
+
+config QCOM_M4M_HWMON
+	tristate "QCOM M4M cache monitor hardware"
+	depends on ARCH_QCOM
+	help
+	  QCOM M4M has counters that can be used to monitor requests coming to
+	  M4M. QCOM M4M hardware monitor device programs corresponding registers
+	  to monitor cache and inform governor. It can also set an IRQ when
+	  count exceeds a programmable limit.
+
+config DEVFREQ_GOV_QCOM_BW_HWMON
+	tristate "HW monitor based governor for device BW"
+	depends on QCOM_BIMC_BWMON
+	help
+	  HW monitor based governor for device to DDR bandwidth voting.
+	  This governor sets the CPU BW vote by using BIMC counters to monitor
+	  the CPU's use of DDR. Since this uses target specific counters it
+	  can conflict with existing profiling tools.  This governor is unlikely
+	  to be useful for non-QCOM devices.
+
+config DEVFREQ_GOV_QCOM_CACHE_HWMON
+	tristate "HW monitor based governor for cache frequency"
+	help
+	  HW monitor based governor for cache frequency scaling. This
+	  governor sets the cache frequency by using PM counters to monitor the
+	  CPU's use of cache. Since this governor uses some of the PM counters
+	  it can conflict with existing profiling tools. This governor is
+	  unlikely to be useful for other devices.
+
+config DEVFREQ_GOV_MEMLAT
+	tristate "HW monitor based governor for device BW"
+	depends on ARM_MEMLAT_MON
+	help
+	  HW monitor based governor for device to DDR bandwidth voting.
+	  This governor sets the CPU BW vote based on stats obtained from memalat
+	  monitor if it determines that a workload is memory latency bound. Since
+	  this uses target specific counters it can conflict with existing profiling
+	  tools.
+
 comment "DEVFREQ Drivers"
 
 config DEVFREQ_GOV_QCOM_ADRENO_TZ
@@ -121,6 +194,30 @@
           It sets the frequency for the memory controller and reads the usage counts
           from hardware.
 
+config DEVFREQ_SIMPLE_DEV
+	tristate "Device driver for simple clock device with no status info"
+	select DEVFREQ_GOV_PERFORMANCE
+	select DEVFREQ_GOV_POWERSAVE
+	select DEVFREQ_GOV_USERSPACE
+	select DEVFREQ_GOV_CPUFREQ
+	help
+	  Device driver for simple devices that control their frequency using
+	  clock APIs and don't have any form of status reporting.
+
+config QCOM_DEVFREQ_DEVBW
+	bool "Qualcomm Technologies Inc. DEVFREQ device for device master <-> slave IB/AB BW voting"
+	depends on ARCH_QCOM
+	select DEVFREQ_GOV_PERFORMANCE
+	select DEVFREQ_GOV_POWERSAVE
+	select DEVFREQ_GOV_USERSPACE
+	select DEVFREQ_GOV_CPUFREQ
+	default n
+	help
+	  Different devfreq governors use this devfreq device to make CPU to
+	  DDR IB/AB bandwidth votes. This driver provides a SoC topology
+	  agnostic interface to so that some of the devfreq governors can be
+	  shared across SoCs.
+
 source "drivers/devfreq/event/Kconfig"
 
 endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 0fedc4c..f488f12 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -5,12 +5,23 @@
 obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE)	+= governor_powersave.o
 obj-$(CONFIG_DEVFREQ_GOV_USERSPACE)	+= governor_userspace.o
 obj-$(CONFIG_DEVFREQ_GOV_PASSIVE)	+= governor_passive.o
+obj-$(CONFIG_DEVFREQ_GOV_CPUFREQ)	+= governor_cpufreq.o
 obj-$(CONFIG_DEVFREQ_GOV_QCOM_ADRENO_TZ) += governor_msm_adreno_tz.o
 obj-$(CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON) += governor_bw_vbif.o
+obj-$(CONFIG_QCOM_BIMC_BWMON)		+= bimc-bwmon.o
+obj-$(CONFIG_ARM_MEMLAT_MON)		+= arm-memlat-mon.o
+obj-$(CONFIG_QCOMCCI_HWMON)		+= msmcci-hwmon.o
+obj-$(CONFIG_QCOM_M4M_HWMON)		+= m4m-hwmon.o
+obj-$(CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON)	+= governor_bw_hwmon.o
+obj-$(CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON)	+= governor_cache_hwmon.o
+obj-$(CONFIG_DEVFREQ_GOV_MEMLAT)       += governor_memlat.o
+
 # DEVFREQ Drivers
 obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ)	+= exynos-bus.o
 obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ)	+= rk3399_dmc.o
 obj-$(CONFIG_ARM_TEGRA_DEVFREQ)		+= tegra-devfreq.o
+obj-$(CONFIG_QCOM_DEVFREQ_DEVBW)		+= devfreq_devbw.o
+obj-$(CONFIG_DEVFREQ_SIMPLE_DEV)	+= devfreq_simple_dev.o
 
 # DEVFREQ Event Drivers
 obj-$(CONFIG_PM_DEVFREQ_EVENT)		+= event/
diff --git a/drivers/devfreq/arm-memlat-mon.c b/drivers/devfreq/arm-memlat-mon.c
new file mode 100644
index 0000000..ed83185
--- /dev/null
+++ b/drivers/devfreq/arm-memlat-mon.c
@@ -0,0 +1,378 @@
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "arm-memlat-mon: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpu.h>
+#include "governor.h"
+#include "governor_memlat.h"
+#include <linux/perf_event.h>
+
+enum ev_index {
+	INST_IDX,
+	CM_IDX,
+	CYC_IDX,
+	NUM_EVENTS
+};
+#define INST_EV		0x08
+#define L2DM_EV		0x17
+#define CYC_EV		0x11
+
+struct event_data {
+	struct perf_event *pevent;
+	unsigned long prev_count;
+};
+
+struct memlat_hwmon_data {
+	struct event_data events[NUM_EVENTS];
+	ktime_t prev_ts;
+	bool init_pending;
+	unsigned long cache_miss_event;
+	unsigned long inst_event;
+};
+static DEFINE_PER_CPU(struct memlat_hwmon_data, pm_data);
+
+struct cpu_grp_info {
+	cpumask_t cpus;
+	struct memlat_hwmon hw;
+	struct notifier_block arm_memlat_cpu_notif;
+};
+
+static unsigned long compute_freq(struct memlat_hwmon_data *hw_data,
+						unsigned long cyc_cnt)
+{
+	ktime_t ts;
+	unsigned int diff;
+	unsigned long freq = 0;
+
+	ts = ktime_get();
+	diff = ktime_to_us(ktime_sub(ts, hw_data->prev_ts));
+	if (!diff)
+		diff = 1;
+	hw_data->prev_ts = ts;
+	freq = cyc_cnt;
+	do_div(freq, diff);
+
+	return freq;
+}
+
+#define MAX_COUNT_LIM 0xFFFFFFFFFFFFFFFF
+static inline unsigned long read_event(struct event_data *event)
+{
+	unsigned long ev_count;
+	u64 total, enabled, running;
+
+	total = perf_event_read_value(event->pevent, &enabled, &running);
+	if (total >= event->prev_count)
+		ev_count = total - event->prev_count;
+	else
+		ev_count = (MAX_COUNT_LIM - event->prev_count) + total;
+
+	event->prev_count = total;
+
+	return ev_count;
+}
+
+static void read_perf_counters(int cpu, struct cpu_grp_info *cpu_grp)
+{
+	int cpu_idx;
+	struct memlat_hwmon_data *hw_data = &per_cpu(pm_data, cpu);
+	struct memlat_hwmon *hw = &cpu_grp->hw;
+	unsigned long cyc_cnt;
+
+	if (hw_data->init_pending)
+		return;
+
+	cpu_idx = cpu - cpumask_first(&cpu_grp->cpus);
+
+	hw->core_stats[cpu_idx].inst_count =
+			read_event(&hw_data->events[INST_IDX]);
+
+	hw->core_stats[cpu_idx].mem_count =
+			read_event(&hw_data->events[CM_IDX]);
+
+	cyc_cnt = read_event(&hw_data->events[CYC_IDX]);
+	hw->core_stats[cpu_idx].freq = compute_freq(hw_data, cyc_cnt);
+}
+
+static unsigned long get_cnt(struct memlat_hwmon *hw)
+{
+	int cpu;
+	struct cpu_grp_info *cpu_grp = container_of(hw,
+					struct cpu_grp_info, hw);
+
+	for_each_cpu(cpu, &cpu_grp->cpus)
+		read_perf_counters(cpu, cpu_grp);
+
+	return 0;
+}
+
+static void delete_events(struct memlat_hwmon_data *hw_data)
+{
+	int i;
+
+	for (i = 0; i < NUM_EVENTS; i++) {
+		hw_data->events[i].prev_count = 0;
+		perf_event_release_kernel(hw_data->events[i].pevent);
+	}
+}
+
+static void stop_hwmon(struct memlat_hwmon *hw)
+{
+	int cpu, idx;
+	struct memlat_hwmon_data *hw_data;
+	struct cpu_grp_info *cpu_grp = container_of(hw,
+					struct cpu_grp_info, hw);
+
+	get_online_cpus();
+	for_each_cpu(cpu, &cpu_grp->cpus) {
+		hw_data = &per_cpu(pm_data, cpu);
+		if (hw_data->init_pending)
+			hw_data->init_pending = false;
+		else
+			delete_events(hw_data);
+
+		/* Clear governor data */
+		idx = cpu - cpumask_first(&cpu_grp->cpus);
+		hw->core_stats[idx].inst_count = 0;
+		hw->core_stats[idx].mem_count = 0;
+		hw->core_stats[idx].freq = 0;
+	}
+	put_online_cpus();
+
+	unregister_cpu_notifier(&cpu_grp->arm_memlat_cpu_notif);
+}
+
+static struct perf_event_attr *alloc_attr(void)
+{
+	struct perf_event_attr *attr;
+
+	attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
+	if (!attr)
+		return ERR_PTR(-ENOMEM);
+
+	attr->type = PERF_TYPE_RAW;
+	attr->size = sizeof(struct perf_event_attr);
+	attr->pinned = 1;
+	attr->exclude_idle = 1;
+
+	return attr;
+}
+
+static int set_events(struct memlat_hwmon_data *hw_data, int cpu)
+{
+	struct perf_event *pevent;
+	struct perf_event_attr *attr;
+	int err;
+
+	/* Allocate an attribute for event initialization */
+	attr = alloc_attr();
+	if (IS_ERR(attr))
+		return PTR_ERR(attr);
+
+	attr->config = hw_data->inst_event;
+	pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
+	if (IS_ERR(pevent))
+		goto err_out;
+	hw_data->events[INST_IDX].pevent = pevent;
+	perf_event_enable(hw_data->events[INST_IDX].pevent);
+
+	attr->config = hw_data->cache_miss_event;
+	pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
+	if (IS_ERR(pevent))
+		goto err_out;
+	hw_data->events[CM_IDX].pevent = pevent;
+	perf_event_enable(hw_data->events[CM_IDX].pevent);
+
+	attr->config = CYC_EV;
+	pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
+	if (IS_ERR(pevent))
+		goto err_out;
+	hw_data->events[CYC_IDX].pevent = pevent;
+	perf_event_enable(hw_data->events[CYC_IDX].pevent);
+
+	kfree(attr);
+	return 0;
+
+err_out:
+	err = PTR_ERR(pevent);
+	kfree(attr);
+	return err;
+}
+
+static int arm_memlat_cpu_callback(struct notifier_block *nb,
+		unsigned long action, void *hcpu)
+{
+	unsigned long cpu = (unsigned long)hcpu;
+	struct memlat_hwmon_data *hw_data = &per_cpu(pm_data, cpu);
+
+	if ((action != CPU_ONLINE) || !hw_data->init_pending)
+		return NOTIFY_OK;
+
+	if (set_events(hw_data, cpu))
+		pr_warn("Failed to create perf event for CPU%lu\n", cpu);
+
+	hw_data->init_pending = false;
+
+	return NOTIFY_OK;
+}
+
+static int start_hwmon(struct memlat_hwmon *hw)
+{
+	int cpu, ret = 0;
+	struct memlat_hwmon_data *hw_data;
+	struct cpu_grp_info *cpu_grp = container_of(hw,
+					struct cpu_grp_info, hw);
+
+	register_cpu_notifier(&cpu_grp->arm_memlat_cpu_notif);
+
+	get_online_cpus();
+	for_each_cpu(cpu, &cpu_grp->cpus) {
+		hw_data = &per_cpu(pm_data, cpu);
+		ret = set_events(hw_data, cpu);
+		if (ret) {
+			if (!cpu_online(cpu)) {
+				hw_data->init_pending = true;
+				ret = 0;
+			} else {
+				pr_warn("Perf event init failed on CPU%d\n",
+					cpu);
+				break;
+			}
+		}
+	}
+
+	put_online_cpus();
+	return ret;
+}
+
+static int get_mask_from_dev_handle(struct platform_device *pdev,
+					cpumask_t *mask)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *dev_phandle;
+	struct device *cpu_dev;
+	int cpu, i = 0;
+	int ret = -ENOENT;
+
+	dev_phandle = of_parse_phandle(dev->of_node, "qcom,cpulist", i++);
+	while (dev_phandle) {
+		for_each_possible_cpu(cpu) {
+			cpu_dev = get_cpu_device(cpu);
+			if (cpu_dev && cpu_dev->of_node == dev_phandle) {
+				cpumask_set_cpu(cpu, mask);
+				ret = 0;
+				break;
+			}
+		}
+		dev_phandle = of_parse_phandle(dev->of_node,
+						"qcom,cpulist", i++);
+	}
+
+	return ret;
+}
+
+static int arm_memlat_mon_driver_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct memlat_hwmon *hw;
+	struct cpu_grp_info *cpu_grp;
+	int cpu, ret;
+	u32 cachemiss_ev, inst_ev;
+
+	cpu_grp = devm_kzalloc(dev, sizeof(*cpu_grp), GFP_KERNEL);
+	if (!cpu_grp)
+		return -ENOMEM;
+	cpu_grp->arm_memlat_cpu_notif.notifier_call = arm_memlat_cpu_callback;
+	hw = &cpu_grp->hw;
+
+	hw->dev = dev;
+	hw->of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
+	if (!hw->of_node) {
+		dev_err(dev, "Couldn't find a target device\n");
+		return -ENODEV;
+	}
+
+	if (get_mask_from_dev_handle(pdev, &cpu_grp->cpus)) {
+		dev_err(dev, "CPU list is empty\n");
+		return -ENODEV;
+	}
+
+	hw->num_cores = cpumask_weight(&cpu_grp->cpus);
+	hw->core_stats = devm_kzalloc(dev, hw->num_cores *
+				sizeof(*(hw->core_stats)), GFP_KERNEL);
+	if (!hw->core_stats)
+		return -ENOMEM;
+
+	ret = of_property_read_u32(dev->of_node, "qcom,cachemiss-ev",
+			&cachemiss_ev);
+	if (ret) {
+		dev_dbg(dev, "Cache Miss event not specified. Using def:0x%x\n",
+				L2DM_EV);
+		cachemiss_ev = L2DM_EV;
+	}
+
+	ret = of_property_read_u32(dev->of_node, "qcom,inst-ev", &inst_ev);
+	if (ret) {
+		dev_dbg(dev, "Inst event not specified. Using def:0x%x\n",
+				INST_EV);
+		inst_ev = INST_EV;
+	}
+
+	for_each_cpu(cpu, &cpu_grp->cpus) {
+		hw->core_stats[cpu - cpumask_first(&cpu_grp->cpus)].id = cpu;
+		(&per_cpu(pm_data, cpu))->cache_miss_event = cachemiss_ev;
+		(&per_cpu(pm_data, cpu))->inst_event = inst_ev;
+	}
+
+	hw->start_hwmon = &start_hwmon;
+	hw->stop_hwmon = &stop_hwmon;
+	hw->get_cnt = &get_cnt;
+
+	ret = register_memlat(dev, hw);
+	if (ret) {
+		pr_err("Mem Latency Gov registration failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id memlat_match_table[] = {
+	{ .compatible = "qcom,arm-memlat-mon" },
+	{}
+};
+
+static struct platform_driver arm_memlat_mon_driver = {
+	.probe = arm_memlat_mon_driver_probe,
+	.driver = {
+		.name = "arm-memlat-mon",
+		.of_match_table = memlat_match_table,
+	},
+};
+
+module_platform_driver(arm_memlat_mon_driver);
diff --git a/drivers/devfreq/bimc-bwmon.c b/drivers/devfreq/bimc-bwmon.c
new file mode 100644
index 0000000..a4a1cfb
--- /dev/null
+++ b/drivers/devfreq/bimc-bwmon.c
@@ -0,0 +1,676 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "bimc-bwmon: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/spinlock.h>
+#include "governor_bw_hwmon.h"
+
+#define GLB_INT_STATUS(m)	((m)->global_base + 0x100)
+#define GLB_INT_CLR(m)		((m)->global_base + 0x108)
+#define	GLB_INT_EN(m)		((m)->global_base + 0x10C)
+#define MON_INT_STATUS(m)	((m)->base + 0x100)
+#define MON_INT_CLR(m)		((m)->base + 0x108)
+#define	MON_INT_EN(m)		((m)->base + 0x10C)
+#define	MON_EN(m)		((m)->base + 0x280)
+#define MON_CLEAR(m)		((m)->base + 0x284)
+#define MON_CNT(m)		((m)->base + 0x288)
+#define MON_THRES(m)		((m)->base + 0x290)
+#define MON_MASK(m)		((m)->base + 0x298)
+#define MON_MATCH(m)		((m)->base + 0x29C)
+
+#define MON2_EN(m)		((m)->base + 0x2A0)
+#define MON2_CLEAR(m)		((m)->base + 0x2A4)
+#define MON2_SW(m)		((m)->base + 0x2A8)
+#define MON2_THRES_HI(m)	((m)->base + 0x2AC)
+#define MON2_THRES_MED(m)	((m)->base + 0x2B0)
+#define MON2_THRES_LO(m)	((m)->base + 0x2B4)
+#define MON2_ZONE_ACTIONS(m)	((m)->base + 0x2B8)
+#define MON2_ZONE_CNT_THRES(m)	((m)->base + 0x2BC)
+#define MON2_BYTE_CNT(m)	((m)->base + 0x2D0)
+#define MON2_WIN_TIMER(m)	((m)->base + 0x2D4)
+#define MON2_ZONE_CNT(m)	((m)->base + 0x2D8)
+#define MON2_ZONE_MAX(m, zone)	((m)->base + 0x2E0 + 0x4 * zone)
+
+struct bwmon_spec {
+	bool wrap_on_thres;
+	bool overflow;
+	bool throt_adj;
+	bool hw_sampling;
+};
+
+struct bwmon {
+	void __iomem *base;
+	void __iomem *global_base;
+	unsigned int mport;
+	unsigned int irq;
+	const struct bwmon_spec *spec;
+	struct device *dev;
+	struct bw_hwmon hw;
+	u32 hw_timer_hz;
+	u32 throttle_adj;
+	u32 sample_size_ms;
+	u32 intr_status;
+};
+
+#define to_bwmon(ptr)		container_of(ptr, struct bwmon, hw)
+#define has_hw_sampling(m)		(m->spec->hw_sampling)
+
+#define ENABLE_MASK BIT(0)
+#define THROTTLE_MASK 0x1F
+#define THROTTLE_SHIFT 16
+#define INT_ENABLE_V1	0x1
+#define INT_STATUS_MASK	0x03
+#define INT_STATUS_MASK_HWS	0xF0
+
+static DEFINE_SPINLOCK(glb_lock);
+static void mon_enable(struct bwmon *m)
+{
+	if (has_hw_sampling(m))
+		writel_relaxed((ENABLE_MASK | m->throttle_adj), MON2_EN(m));
+	else
+		writel_relaxed((ENABLE_MASK | m->throttle_adj), MON_EN(m));
+}
+
+static void mon_disable(struct bwmon *m)
+{
+	if (has_hw_sampling(m))
+		writel_relaxed(m->throttle_adj, MON2_EN(m));
+	else
+		writel_relaxed(m->throttle_adj, MON_EN(m));
+	/*
+	 * mon_disable() and mon_irq_clear(),
+	 * If latter goes first and count happen to trigger irq, we would
+	 * have the irq line high but no one handling it.
+	 */
+	mb();
+}
+
+#define MON_CLEAR_BIT	0x1
+#define MON_CLEAR_ALL_BIT	0x2
+static void mon_clear(struct bwmon *m, bool clear_all)
+{
+	if (!has_hw_sampling(m)) {
+		writel_relaxed(MON_CLEAR_BIT, MON_CLEAR(m));
+		goto out;
+	}
+
+	if (clear_all)
+		writel_relaxed(MON_CLEAR_ALL_BIT, MON2_CLEAR(m));
+	else
+		writel_relaxed(MON_CLEAR_BIT, MON2_CLEAR(m));
+
+	/*
+	 * The counter clear and IRQ clear bits are not in the same 4KB
+	 * region. So, we need to make sure the counter clear is completed
+	 * before we try to clear the IRQ or do any other counter operations.
+	 */
+out:
+	mb();
+}
+
+#define	SAMPLE_WIN_LIM	0xFFFFF
+static void mon_set_hw_sampling_window(struct bwmon *m, unsigned int sample_ms)
+{
+	u32 rate;
+
+	if (unlikely(sample_ms != m->sample_size_ms)) {
+		rate = mult_frac(sample_ms, m->hw_timer_hz, MSEC_PER_SEC);
+		m->sample_size_ms = sample_ms;
+		if (unlikely(rate > SAMPLE_WIN_LIM)) {
+			rate = SAMPLE_WIN_LIM;
+			pr_warn("Sample window %u larger than hw limit: %u\n",
+					rate, SAMPLE_WIN_LIM);
+		}
+		writel_relaxed(rate, MON2_SW(m));
+	}
+}
+
+static void mon_irq_enable(struct bwmon *m)
+{
+	u32 val;
+
+	spin_lock(&glb_lock);
+	val = readl_relaxed(GLB_INT_EN(m));
+	val |= 1 << m->mport;
+	writel_relaxed(val, GLB_INT_EN(m));
+
+	val = readl_relaxed(MON_INT_EN(m));
+	val |= has_hw_sampling(m) ? INT_STATUS_MASK_HWS : INT_ENABLE_V1;
+	writel_relaxed(val, MON_INT_EN(m));
+	spin_unlock(&glb_lock);
+	/*
+	 * make Sure irq enable complete for local and global
+	 * to avoid race with other monitor calls
+	 */
+	mb();
+}
+
+static void mon_irq_disable(struct bwmon *m)
+{
+	u32 val;
+
+	spin_lock(&glb_lock);
+	val = readl_relaxed(GLB_INT_EN(m));
+	val &= ~(1 << m->mport);
+	writel_relaxed(val, GLB_INT_EN(m));
+
+	val = readl_relaxed(MON_INT_EN(m));
+	val &= has_hw_sampling(m) ? ~INT_STATUS_MASK_HWS : ~INT_ENABLE_V1;
+	writel_relaxed(val, MON_INT_EN(m));
+	spin_unlock(&glb_lock);
+	/*
+	 * make Sure irq disable complete for local and global
+	 * to avoid race with other monitor calls
+	 */
+	mb();
+}
+
+static unsigned int mon_irq_status(struct bwmon *m)
+{
+	u32 mval;
+
+	mval = readl_relaxed(MON_INT_STATUS(m));
+
+	dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval,
+			readl_relaxed(GLB_INT_STATUS(m)));
+
+	mval &= has_hw_sampling(m) ? INT_STATUS_MASK_HWS : INT_STATUS_MASK;
+
+	return mval;
+}
+
+static void mon_irq_clear(struct bwmon *m)
+{
+	u32 intclr;
+
+	intclr = has_hw_sampling(m) ? INT_STATUS_MASK_HWS : INT_STATUS_MASK;
+
+	writel_relaxed(intclr, MON_INT_CLR(m));
+	mb();
+	writel_relaxed(1 << m->mport, GLB_INT_CLR(m));
+	mb();
+}
+
+static int mon_set_throttle_adj(struct bw_hwmon *hw, uint adj)
+{
+	struct bwmon *m = to_bwmon(hw);
+
+	if (adj > THROTTLE_MASK)
+		return -EINVAL;
+
+	adj = (adj & THROTTLE_MASK) << THROTTLE_SHIFT;
+	m->throttle_adj = adj;
+
+	return 0;
+}
+
+static u32 mon_get_throttle_adj(struct bw_hwmon *hw)
+{
+	struct bwmon *m = to_bwmon(hw);
+
+	return m->throttle_adj >> THROTTLE_SHIFT;
+}
+
+#define ZONE1_SHIFT	8
+#define ZONE2_SHIFT	16
+#define ZONE3_SHIFT	24
+#define ZONE0_ACTION	0x01	/* Increment zone 0 count */
+#define ZONE1_ACTION	0x09	/* Increment zone 1 & clear lower zones */
+#define ZONE2_ACTION	0x25	/* Increment zone 2 & clear lower zones */
+#define ZONE3_ACTION	0x95	/* Increment zone 3 & clear lower zones */
+static u32 calc_zone_actions(void)
+{
+	u32 zone_actions;
+
+	zone_actions = ZONE0_ACTION;
+	zone_actions |= ZONE1_ACTION << ZONE1_SHIFT;
+	zone_actions |= ZONE2_ACTION << ZONE2_SHIFT;
+	zone_actions |= ZONE3_ACTION << ZONE3_SHIFT;
+
+	return zone_actions;
+}
+
+#define ZONE_CNT_LIM	0xFFU
+#define UP_CNT_1	1
+static u32 calc_zone_counts(struct bw_hwmon *hw)
+{
+	u32 zone_counts;
+
+	zone_counts = ZONE_CNT_LIM;
+	zone_counts |= min(hw->down_cnt, ZONE_CNT_LIM) << ZONE1_SHIFT;
+	zone_counts |= ZONE_CNT_LIM << ZONE2_SHIFT;
+	zone_counts |= UP_CNT_1 << ZONE3_SHIFT;
+
+	return zone_counts;
+}
+
+static unsigned int mbps_to_mb(unsigned long mbps, unsigned int ms)
+{
+	mbps *= ms;
+	mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC);
+	return mbps;
+}
+
+/*
+ * Define the 4 zones using HI, MED & LO thresholds:
+ * Zone 0: byte count < THRES_LO
+ * Zone 1: THRES_LO < byte count < THRES_MED
+ * Zone 2: THRES_MED < byte count < THRES_HI
+ * Zone 3: byte count > THRES_HI
+ */
+#define	THRES_LIM	0x7FFU
+static void set_zone_thres(struct bwmon *m, unsigned int sample_ms)
+{
+	struct bw_hwmon *hw = &(m->hw);
+	u32 hi, med, lo;
+
+	hi = mbps_to_mb(hw->up_wake_mbps, sample_ms);
+	med = mbps_to_mb(hw->down_wake_mbps, sample_ms);
+	lo = 0;
+
+	if (unlikely((hi > THRES_LIM) || (med > hi) || (lo > med))) {
+		pr_warn("Zone thres larger than hw limit: hi:%u med:%u lo:%u\n",
+				hi, med, lo);
+		hi = min(hi, THRES_LIM);
+		med = min(med, hi - 1);
+		lo = min(lo, med-1);
+	}
+
+	writel_relaxed(hi, MON2_THRES_HI(m));
+	writel_relaxed(med, MON2_THRES_MED(m));
+	writel_relaxed(lo, MON2_THRES_LO(m));
+	dev_dbg(m->dev, "Thres: hi:%u med:%u lo:%u\n", hi, med, lo);
+}
+
+static void mon_set_zones(struct bwmon *m, unsigned int sample_ms)
+{
+	struct bw_hwmon *hw = &(m->hw);
+	u32 zone_cnt_thres = calc_zone_counts(hw);
+
+	mon_set_hw_sampling_window(m, sample_ms);
+	set_zone_thres(m, sample_ms);
+	/* Set the zone count thresholds for interrupts */
+	writel_relaxed(zone_cnt_thres, MON2_ZONE_CNT_THRES(m));
+
+	dev_dbg(m->dev, "Zone Count Thres: %0x\n", zone_cnt_thres);
+}
+
+static void mon_set_limit(struct bwmon *m, u32 count)
+{
+	writel_relaxed(count, MON_THRES(m));
+	dev_dbg(m->dev, "Thres: %08x\n", count);
+}
+
+static u32 mon_get_limit(struct bwmon *m)
+{
+	return readl_relaxed(MON_THRES(m));
+}
+
+#define THRES_HIT(status)	(status & BIT(0))
+#define OVERFLOW(status)	(status & BIT(1))
+static unsigned long mon_get_count(struct bwmon *m)
+{
+	unsigned long count, status;
+
+	count = readl_relaxed(MON_CNT(m));
+	status = mon_irq_status(m);
+
+	dev_dbg(m->dev, "Counter: %08lx\n", count);
+
+	if (OVERFLOW(status) && m->spec->overflow)
+		count += 0xFFFFFFFF;
+	if (THRES_HIT(status) && m->spec->wrap_on_thres)
+		count += mon_get_limit(m);
+
+	dev_dbg(m->dev, "Actual Count: %08lx\n", count);
+
+	return count;
+}
+
+static unsigned int get_zone(struct bwmon *m)
+{
+	u32 zone_counts;
+	u32 zone;
+
+	zone = get_bitmask_order((m->intr_status & INT_STATUS_MASK_HWS) >> 4);
+	if (zone) {
+		zone--;
+	} else {
+		zone_counts = readl_relaxed(MON2_ZONE_CNT(m));
+		if (zone_counts) {
+			zone = get_bitmask_order(zone_counts) - 1;
+			zone /= 8;
+		}
+	}
+
+	m->intr_status = 0;
+	return zone;
+}
+
+static unsigned long mon_get_zone_stats(struct bwmon *m)
+{
+	unsigned int zone;
+	unsigned long count = 0;
+
+	zone = get_zone(m);
+
+	count = readl_relaxed(MON2_ZONE_MAX(m, zone)) + 1;
+	count *= SZ_1M;
+
+	dev_dbg(m->dev, "Zone%d Max byte count: %08lx\n", zone, count);
+
+	return count;
+}
+
+/* ********** CPUBW specific code  ********** */
+
+/* Returns MBps of read/writes for the sampling window. */
+static unsigned int mbps_to_bytes(unsigned long mbps, unsigned int ms,
+				  unsigned int tolerance_percent)
+{
+	mbps *= (100 + tolerance_percent) * ms;
+	mbps /= 100;
+	mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC);
+	mbps *= SZ_1M;
+	return mbps;
+}
+
+static unsigned long get_bytes_and_clear(struct bw_hwmon *hw)
+{
+	struct bwmon *m = to_bwmon(hw);
+	unsigned long count;
+
+	mon_disable(m);
+	count = has_hw_sampling(m) ? mon_get_zone_stats(m) : mon_get_count(m);
+	mon_clear(m, false);
+	mon_irq_clear(m);
+	mon_enable(m);
+
+	return count;
+}
+
+static unsigned long set_thres(struct bw_hwmon *hw, unsigned long bytes)
+{
+	unsigned long count;
+	u32 limit;
+	struct bwmon *m = to_bwmon(hw);
+
+	mon_disable(m);
+	count = mon_get_count(m);
+	mon_clear(m, false);
+	mon_irq_clear(m);
+
+	if (likely(!m->spec->wrap_on_thres))
+		limit = bytes;
+	else
+		limit = max(bytes, 500000UL);
+
+	mon_set_limit(m, limit);
+	mon_enable(m);
+
+	return count;
+}
+
+static unsigned long set_hw_events(struct bw_hwmon *hw, unsigned int sample_ms)
+{
+	struct bwmon *m = to_bwmon(hw);
+
+	mon_disable(m);
+	mon_clear(m, false);
+	mon_irq_clear(m);
+
+	mon_set_zones(m, sample_ms);
+	mon_enable(m);
+
+	return 0;
+}
+
+static irqreturn_t bwmon_intr_handler(int irq, void *dev)
+{
+	struct bwmon *m = dev;
+
+	m->intr_status = mon_irq_status(m);
+	if (!m->intr_status)
+		return IRQ_NONE;
+
+	if (bw_hwmon_sample_end(&m->hw) > 0)
+		return IRQ_WAKE_THREAD;
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bwmon_intr_thread(int irq, void *dev)
+{
+	struct bwmon *m = dev;
+
+	update_bw_hwmon(&m->hw);
+	return IRQ_HANDLED;
+}
+
+static int start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps)
+{
+	struct bwmon *m = to_bwmon(hw);
+	u32 limit;
+	u32 zone_actions = calc_zone_actions();
+	int ret;
+
+	ret = request_threaded_irq(m->irq, bwmon_intr_handler,
+				  bwmon_intr_thread,
+				  IRQF_ONESHOT | IRQF_SHARED,
+				  dev_name(m->dev), m);
+	if (ret) {
+		dev_err(m->dev, "Unable to register interrupt handler! (%d)\n",
+				ret);
+		return ret;
+	}
+
+	mon_disable(m);
+
+	mon_clear(m, true);
+	limit = mbps_to_bytes(mbps, hw->df->profile->polling_ms, 0);
+	if (has_hw_sampling(m)) {
+		mon_set_zones(m, hw->df->profile->polling_ms);
+		/* Set the zone actions to increment appropriate counters */
+		writel_relaxed(zone_actions, MON2_ZONE_ACTIONS(m));
+	} else {
+		mon_set_limit(m, limit);
+	}
+
+	mon_irq_clear(m);
+	mon_irq_enable(m);
+	mon_enable(m);
+
+	return 0;
+}
+
+static void stop_bw_hwmon(struct bw_hwmon *hw)
+{
+	struct bwmon *m = to_bwmon(hw);
+
+	mon_irq_disable(m);
+	free_irq(m->irq, m);
+	mon_disable(m);
+	mon_clear(m, true);
+	mon_irq_clear(m);
+}
+
+static int suspend_bw_hwmon(struct bw_hwmon *hw)
+{
+	struct bwmon *m = to_bwmon(hw);
+
+	mon_irq_disable(m);
+	free_irq(m->irq, m);
+	mon_disable(m);
+	mon_irq_clear(m);
+
+	return 0;
+}
+
+static int resume_bw_hwmon(struct bw_hwmon *hw)
+{
+	struct bwmon *m = to_bwmon(hw);
+	int ret;
+
+	mon_clear(m, false);
+	ret = request_threaded_irq(m->irq, bwmon_intr_handler,
+				  bwmon_intr_thread,
+				  IRQF_ONESHOT | IRQF_SHARED,
+				  dev_name(m->dev), m);
+	if (ret) {
+		dev_err(m->dev, "Unable to register interrupt handler! (%d)\n",
+				ret);
+		return ret;
+	}
+
+	mon_irq_enable(m);
+	mon_enable(m);
+
+	return 0;
+}
+
+/*************************************************************************/
+
+static const struct bwmon_spec spec[] = {
+	{ .wrap_on_thres = true, .overflow = false, .throt_adj = false,
+		.hw_sampling = false},
+	{ .wrap_on_thres = false, .overflow = true, .throt_adj = false,
+		.hw_sampling = false},
+	{ .wrap_on_thres = false, .overflow = true, .throt_adj = true,
+		.hw_sampling = false},
+	{ .wrap_on_thres = false, .overflow = true, .throt_adj = true,
+		.hw_sampling = true},
+};
+
+static const struct of_device_id bimc_bwmon_match_table[] = {
+	{ .compatible = "qcom,bimc-bwmon", .data = &spec[0] },
+	{ .compatible = "qcom,bimc-bwmon2", .data = &spec[1] },
+	{ .compatible = "qcom,bimc-bwmon3", .data = &spec[2] },
+	{ .compatible = "qcom,bimc-bwmon4", .data = &spec[3] },
+	{}
+};
+
+static int bimc_bwmon_driver_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct bwmon *m;
+	const struct of_device_id *id;
+	int ret;
+	u32 data;
+
+	m = devm_kzalloc(dev, sizeof(*m), GFP_KERNEL);
+	if (!m)
+		return -ENOMEM;
+	m->dev = dev;
+
+	ret = of_property_read_u32(dev->of_node, "qcom,mport", &data);
+	if (ret) {
+		dev_err(dev, "mport not found!\n");
+		return ret;
+	}
+	m->mport = data;
+
+	id = of_match_device(bimc_bwmon_match_table, dev);
+	if (!id) {
+		dev_err(dev, "Unknown device type!\n");
+		return -ENODEV;
+	}
+	m->spec = id->data;
+
+	if (has_hw_sampling(m)) {
+		ret = of_property_read_u32(dev->of_node,
+				"qcom,hw-timer-hz", &data);
+		if (ret) {
+			dev_err(dev, "HW sampling rate not specified!\n");
+			return ret;
+		}
+		m->hw_timer_hz = data;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
+	if (!res) {
+		dev_err(dev, "base not found!\n");
+		return -EINVAL;
+	}
+	m->base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!m->base) {
+		dev_err(dev, "Unable map base!\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "global_base");
+	if (!res) {
+		dev_err(dev, "global_base not found!\n");
+		return -EINVAL;
+	}
+	m->global_base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!m->global_base) {
+		dev_err(dev, "Unable map global_base!\n");
+		return -ENOMEM;
+	}
+
+	m->irq = platform_get_irq(pdev, 0);
+	if (m->irq < 0) {
+		dev_err(dev, "Unable to get IRQ number\n");
+		return m->irq;
+	}
+
+	m->hw.of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
+	if (!m->hw.of_node)
+		return -EINVAL;
+	m->hw.start_hwmon = &start_bw_hwmon;
+	m->hw.stop_hwmon = &stop_bw_hwmon;
+	m->hw.suspend_hwmon = &suspend_bw_hwmon;
+	m->hw.resume_hwmon = &resume_bw_hwmon;
+	m->hw.get_bytes_and_clear = &get_bytes_and_clear;
+	m->hw.set_thres =  &set_thres;
+	if (has_hw_sampling(m))
+		m->hw.set_hw_events = &set_hw_events;
+	if (m->spec->throt_adj) {
+		m->hw.set_throttle_adj = &mon_set_throttle_adj;
+		m->hw.get_throttle_adj = &mon_get_throttle_adj;
+	}
+
+	ret = register_bw_hwmon(dev, &m->hw);
+	if (ret) {
+		dev_err(dev, "Dev BW hwmon registration failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct platform_driver bimc_bwmon_driver = {
+	.probe = bimc_bwmon_driver_probe,
+	.driver = {
+		.name = "bimc-bwmon",
+		.of_match_table = bimc_bwmon_match_table,
+	},
+};
+
+module_platform_driver(bimc_bwmon_driver);
+MODULE_DESCRIPTION("BIMC bandwidth monitor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 7309c08..5b85b8d 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -925,7 +925,7 @@
 	struct devfreq *df = to_devfreq(dev);
 	int ret;
 	char str_governor[DEVFREQ_NAME_LEN + 1];
-	struct devfreq_governor *governor;
+	const struct devfreq_governor *governor, *prev_gov;
 
 	ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
 	if (ret != 1)
@@ -953,12 +953,21 @@
 			goto out;
 		}
 	}
+	prev_gov = df->governor;
 	df->governor = governor;
 	strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
 	ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
-	if (ret)
+	if (ret) {
 		dev_warn(dev, "%s: Governor %s not started(%d)\n",
 			 __func__, df->governor->name, ret);
+		if (prev_gov) {
+			df->governor = prev_gov;
+			strlcpy(df->governor_name, prev_gov->name,
+				DEVFREQ_NAME_LEN);
+			df->governor->event_handler(df, DEVFREQ_GOV_START,
+						    NULL);
+		}
+	}
 out:
 	mutex_unlock(&devfreq_list_lock);
 
@@ -1133,19 +1142,26 @@
 	struct devfreq *df = to_devfreq(d);
 	struct device *dev = df->dev.parent;
 	struct dev_pm_opp *opp;
+	unsigned int i = 0, max_state = df->profile->max_state;
+	bool use_opp;
 	ssize_t count = 0;
 	unsigned long freq = 0;
 
 	rcu_read_lock();
-	do {
-		opp = dev_pm_opp_find_freq_ceil(dev, &freq);
-		if (IS_ERR(opp))
-			break;
+	use_opp = dev_pm_opp_get_opp_count(dev) > 0;
+	while (use_opp || (!use_opp && i < max_state)) {
+		if (use_opp) {
+			opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+			if (IS_ERR(opp))
+				break;
+		} else {
+			freq = df->profile->freq_table[i++];
+		}
 
 		count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
 				   "%lu ", freq);
 		freq++;
-	} while (1);
+	}
 	rcu_read_unlock();
 
 	/* Truncate the trailing space */
diff --git a/drivers/devfreq/devfreq_devbw.c b/drivers/devfreq/devfreq_devbw.c
new file mode 100644
index 0000000..5c7959c
--- /dev/null
+++ b/drivers/devfreq/devfreq_devbw.c
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "devbw: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/devfreq.h>
+#include <linux/of.h>
+#include <trace/events/power.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+
+/* Has to be ULL to prevent overflow where this macro is used. */
+#define MBYTE (1ULL << 20)
+#define MAX_PATHS	2
+#define DBL_BUF		2
+
+struct dev_data {
+	struct msm_bus_vectors vectors[MAX_PATHS * DBL_BUF];
+	struct msm_bus_paths bw_levels[DBL_BUF];
+	struct msm_bus_scale_pdata bw_data;
+	int num_paths;
+	u32 bus_client;
+	int cur_idx;
+	int cur_ab;
+	int cur_ib;
+	long gov_ab;
+	struct devfreq *df;
+	struct devfreq_dev_profile dp;
+};
+
+static int set_bw(struct device *dev, int new_ib, int new_ab)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+	int i, ret;
+
+	if (d->cur_ib == new_ib && d->cur_ab == new_ab)
+		return 0;
+
+	i = (d->cur_idx + 1) % DBL_BUF;
+
+	d->bw_levels[i].vectors[0].ib = new_ib * MBYTE;
+	d->bw_levels[i].vectors[0].ab = new_ab / d->num_paths * MBYTE;
+	d->bw_levels[i].vectors[1].ib = new_ib * MBYTE;
+	d->bw_levels[i].vectors[1].ab = new_ab / d->num_paths * MBYTE;
+
+	dev_dbg(dev, "BW MBps: AB: %d IB: %d\n", new_ab, new_ib);
+
+	ret = msm_bus_scale_client_update_request(d->bus_client, i);
+	if (ret) {
+		dev_err(dev, "bandwidth request failed (%d)\n", ret);
+	} else {
+		d->cur_idx = i;
+		d->cur_ib = new_ib;
+		d->cur_ab = new_ab;
+	}
+
+	return ret;
+}
+
+static void find_freq(struct devfreq_dev_profile *p, unsigned long *freq,
+			u32 flags)
+{
+	int i;
+	unsigned long atmost, atleast, f;
+
+	atmost = p->freq_table[0];
+	atleast = p->freq_table[p->max_state-1];
+	for (i = 0; i < p->max_state; i++) {
+		f = p->freq_table[i];
+		if (f <= *freq)
+			atmost = max(f, atmost);
+		if (f >= *freq)
+			atleast = min(f, atleast);
+	}
+
+	if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND)
+		*freq = atmost;
+	else
+		*freq = atleast;
+}
+
+static int devbw_target(struct device *dev, unsigned long *freq, u32 flags)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+
+	find_freq(&d->dp, freq, flags);
+	return set_bw(dev, *freq, d->gov_ab);
+}
+
+static int devbw_get_dev_status(struct device *dev,
+				struct devfreq_dev_status *stat)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+
+	stat->private_data = &d->gov_ab;
+	return 0;
+}
+
+#define PROP_PORTS "qcom,src-dst-ports"
+#define PROP_TBL "qcom,bw-tbl"
+#define PROP_ACTIVE "qcom,active-only"
+
+int devfreq_add_devbw(struct device *dev)
+{
+	struct dev_data *d;
+	struct devfreq_dev_profile *p;
+	u32 *data, ports[MAX_PATHS * 2];
+	const char *gov_name;
+	int ret, len, i, num_paths;
+
+	d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+	dev_set_drvdata(dev, d);
+
+	if (of_find_property(dev->of_node, PROP_PORTS, &len)) {
+		len /= sizeof(ports[0]);
+		if (len % 2 || len > ARRAY_SIZE(ports)) {
+			dev_err(dev, "Unexpected number of ports\n");
+			return -EINVAL;
+		}
+
+		ret = of_property_read_u32_array(dev->of_node, PROP_PORTS,
+						 ports, len);
+		if (ret)
+			return ret;
+
+		num_paths = len / 2;
+	} else {
+		return -EINVAL;
+	}
+
+	d->bw_levels[0].vectors = &d->vectors[0];
+	d->bw_levels[1].vectors = &d->vectors[MAX_PATHS];
+	d->bw_data.usecase = d->bw_levels;
+	d->bw_data.num_usecases = ARRAY_SIZE(d->bw_levels);
+	d->bw_data.name = dev_name(dev);
+	d->bw_data.active_only = of_property_read_bool(dev->of_node,
+							PROP_ACTIVE);
+
+	for (i = 0; i < num_paths; i++) {
+		d->bw_levels[0].vectors[i].src = ports[2 * i];
+		d->bw_levels[0].vectors[i].dst = ports[2 * i + 1];
+		d->bw_levels[1].vectors[i].src = ports[2 * i];
+		d->bw_levels[1].vectors[i].dst = ports[2 * i + 1];
+	}
+	d->bw_levels[0].num_paths = num_paths;
+	d->bw_levels[1].num_paths = num_paths;
+	d->num_paths = num_paths;
+
+	p = &d->dp;
+	p->polling_ms = 50;
+	p->target = devbw_target;
+	p->get_dev_status = devbw_get_dev_status;
+
+	if (of_find_property(dev->of_node, PROP_TBL, &len)) {
+		len /= sizeof(*data);
+		data = devm_kzalloc(dev, len * sizeof(*data), GFP_KERNEL);
+		if (!data)
+			return -ENOMEM;
+
+		p->freq_table = devm_kzalloc(dev,
+					     len * sizeof(*p->freq_table),
+					     GFP_KERNEL);
+		if (!p->freq_table)
+			return -ENOMEM;
+
+		ret = of_property_read_u32_array(dev->of_node, PROP_TBL,
+						 data, len);
+		if (ret)
+			return ret;
+
+		for (i = 0; i < len; i++)
+			p->freq_table[i] = data[i];
+		p->max_state = len;
+	}
+
+	d->bus_client = msm_bus_scale_register_client(&d->bw_data);
+	if (!d->bus_client) {
+		dev_err(dev, "Unable to register bus client\n");
+		return -ENODEV;
+	}
+
+	if (of_property_read_string(dev->of_node, "governor", &gov_name))
+		gov_name = "performance";
+
+	d->df = devfreq_add_device(dev, p, gov_name, NULL);
+	if (IS_ERR(d->df)) {
+		msm_bus_scale_unregister_client(d->bus_client);
+		return PTR_ERR(d->df);
+	}
+
+	return 0;
+}
+
+int devfreq_remove_devbw(struct device *dev)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+
+	msm_bus_scale_unregister_client(d->bus_client);
+	devfreq_remove_device(d->df);
+	return 0;
+}
+
+int devfreq_suspend_devbw(struct device *dev)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+
+	return devfreq_suspend_device(d->df);
+}
+
+int devfreq_resume_devbw(struct device *dev)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+
+	return devfreq_resume_device(d->df);
+}
+
+static int devfreq_devbw_probe(struct platform_device *pdev)
+{
+	return devfreq_add_devbw(&pdev->dev);
+}
+
+static int devfreq_devbw_remove(struct platform_device *pdev)
+{
+	return devfreq_remove_devbw(&pdev->dev);
+}
+
+static const struct of_device_id devbw_match_table[] = {
+	{ .compatible = "qcom,devbw" },
+	{}
+};
+
+static struct platform_driver devbw_driver = {
+	.probe = devfreq_devbw_probe,
+	.remove = devfreq_devbw_remove,
+	.driver = {
+		.name = "devbw",
+		.of_match_table = devbw_match_table,
+	},
+};
+
+module_platform_driver(devbw_driver);
+MODULE_DESCRIPTION("Device DDR bandwidth voting driver MSM SoCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/devfreq_simple_dev.c b/drivers/devfreq/devfreq_simple_dev.c
new file mode 100644
index 0000000..9c99fcf
--- /dev/null
+++ b/drivers/devfreq/devfreq_simple_dev.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "devfreq-simple-dev: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/devfreq.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <trace/events/power.h>
+
+struct dev_data {
+	struct clk *clk;
+	struct devfreq *df;
+	struct devfreq_dev_profile profile;
+};
+
+static void find_freq(struct devfreq_dev_profile *p, unsigned long *freq,
+			u32 flags)
+{
+	int i;
+	unsigned long atmost, atleast, f;
+
+	atmost = p->freq_table[0];
+	atleast = p->freq_table[p->max_state-1];
+	for (i = 0; i < p->max_state; i++) {
+		f = p->freq_table[i];
+		if (f <= *freq)
+			atmost = max(f, atmost);
+		if (f >= *freq)
+			atleast = min(f, atleast);
+	}
+
+	if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND)
+		*freq = atmost;
+	else
+		*freq = atleast;
+}
+
+static int dev_target(struct device *dev, unsigned long *freq, u32 flags)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+	unsigned long rfreq;
+
+	find_freq(&d->profile, freq, flags);
+
+	rfreq = clk_round_rate(d->clk, *freq * 1000);
+	if (IS_ERR_VALUE(rfreq)) {
+		dev_err(dev, "devfreq: Cannot find matching frequency for %lu\n",
+			*freq);
+		return rfreq;
+	}
+
+	return clk_set_rate(d->clk, rfreq);
+}
+
+static int dev_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+	unsigned long f;
+
+	f = clk_get_rate(d->clk);
+	if (IS_ERR_VALUE(f))
+		return f;
+	*freq = f / 1000;
+	return 0;
+}
+
+#define PROP_TBL "freq-tbl-khz"
+static int devfreq_clock_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dev_data *d;
+	struct devfreq_dev_profile *p;
+	u32 *data, poll;
+	const char *gov_name;
+	int ret, len, i, j;
+	unsigned long f;
+
+	d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, d);
+
+	d->clk = devm_clk_get(dev, "devfreq_clk");
+	if (IS_ERR(d->clk))
+		return PTR_ERR(d->clk);
+
+	if (!of_find_property(dev->of_node, PROP_TBL, &len))
+		return -EINVAL;
+
+	len /= sizeof(*data);
+	data = devm_kzalloc(dev, len * sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	p = &d->profile;
+	p->freq_table = devm_kzalloc(dev, len * sizeof(*p->freq_table),
+				     GFP_KERNEL);
+	if (!p->freq_table)
+		return -ENOMEM;
+
+	ret = of_property_read_u32_array(dev->of_node, PROP_TBL, data, len);
+	if (ret)
+		return ret;
+
+	j = 0;
+	for (i = 0; i < len; i++) {
+		f = clk_round_rate(d->clk, data[i] * 1000);
+		if (IS_ERR_VALUE(f))
+			dev_warn(dev, "Unable to find dev rate for %d KHz",
+				 data[i]);
+		else
+			p->freq_table[j++] = f / 1000;
+	}
+	p->max_state = j;
+	devm_kfree(dev, data);
+
+	if (p->max_state == 0) {
+		dev_err(dev, "Error parsing property %s!\n", PROP_TBL);
+		return -EINVAL;
+	}
+
+	p->target = dev_target;
+	p->get_cur_freq = dev_get_cur_freq;
+	ret = dev_get_cur_freq(dev, &p->initial_freq);
+	if (ret)
+		return ret;
+
+	p->polling_ms = 50;
+	if (!of_property_read_u32(dev->of_node, "polling-ms", &poll))
+		p->polling_ms = poll;
+
+	if (of_property_read_string(dev->of_node, "governor", &gov_name))
+		gov_name = "performance";
+
+	if (of_property_read_bool(dev->of_node, "qcom,prepare-clk")) {
+		ret = clk_prepare(d->clk);
+		if (ret)
+			return ret;
+	}
+
+	d->df = devfreq_add_device(dev, p, gov_name, NULL);
+	if (IS_ERR(d->df)) {
+		ret = PTR_ERR(d->df);
+		goto add_err;
+	}
+
+	return 0;
+add_err:
+	if (of_property_read_bool(dev->of_node, "qcom,prepare-clk"))
+		clk_unprepare(d->clk);
+	return ret;
+}
+
+static int devfreq_clock_remove(struct platform_device *pdev)
+{
+	struct dev_data *d = platform_get_drvdata(pdev);
+
+	devfreq_remove_device(d->df);
+
+	return 0;
+}
+
+static const struct of_device_id devfreq_simple_match_table[] = {
+	{ .compatible = "devfreq-simple-dev" },
+	{}
+};
+
+static struct platform_driver devfreq_clock_driver = {
+	.probe = devfreq_clock_probe,
+	.remove = devfreq_clock_remove,
+	.driver = {
+		.name = "devfreq-simple-dev",
+		.of_match_table = devfreq_simple_match_table,
+	},
+};
+module_platform_driver(devfreq_clock_driver);
+MODULE_DESCRIPTION("Devfreq driver for setting generic device clock frequency");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/governor_bw_hwmon.c b/drivers/devfreq/governor_bw_hwmon.c
new file mode 100644
index 0000000..d7cc425
--- /dev/null
+++ b/drivers/devfreq/governor_bw_hwmon.c
@@ -0,0 +1,976 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "bw-hwmon: " fmt
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/devfreq.h>
+#include <trace/events/power.h>
+#include "governor.h"
+#include "governor_bw_hwmon.h"
+
+#define NUM_MBPS_ZONES		10
+struct hwmon_node {
+	unsigned int guard_band_mbps;
+	unsigned int decay_rate;
+	unsigned int io_percent;
+	unsigned int bw_step;
+	unsigned int sample_ms;
+	unsigned int up_scale;
+	unsigned int up_thres;
+	unsigned int down_thres;
+	unsigned int down_count;
+	unsigned int hist_memory;
+	unsigned int hyst_trigger_count;
+	unsigned int hyst_length;
+	unsigned int idle_mbps;
+	unsigned int low_power_ceil_mbps;
+	unsigned int low_power_io_percent;
+	unsigned int low_power_delay;
+	unsigned int mbps_zones[NUM_MBPS_ZONES];
+
+	unsigned long prev_ab;
+	unsigned long *dev_ab;
+	unsigned long resume_freq;
+	unsigned long resume_ab;
+	unsigned long bytes;
+	unsigned long max_mbps;
+	unsigned long hist_max_mbps;
+	unsigned long hist_mem;
+	unsigned long hyst_peak;
+	unsigned long hyst_mbps;
+	unsigned long hyst_trig_win;
+	unsigned long hyst_en;
+	unsigned long above_low_power;
+	unsigned long prev_req;
+	unsigned int wake;
+	unsigned int down_cnt;
+	ktime_t prev_ts;
+	ktime_t hist_max_ts;
+	bool sampled;
+	bool mon_started;
+	struct list_head list;
+	void *orig_data;
+	struct bw_hwmon *hw;
+	struct devfreq_governor *gov;
+	struct attribute_group *attr_grp;
+};
+
+#define UP_WAKE 1
+#define DOWN_WAKE 2
+static DEFINE_SPINLOCK(irq_lock);
+
+static LIST_HEAD(hwmon_list);
+static DEFINE_MUTEX(list_lock);
+
+static int use_cnt;
+static DEFINE_MUTEX(state_lock);
+
+#define show_attr(name) \
+static ssize_t show_##name(struct device *dev,				\
+			struct device_attribute *attr, char *buf)	\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct hwmon_node *hw = df->data;				\
+	return snprintf(buf, PAGE_SIZE, "%u\n", hw->name);		\
+}
+
+#define store_attr(name, _min, _max) \
+static ssize_t store_##name(struct device *dev,				\
+			struct device_attribute *attr, const char *buf,	\
+			size_t count)					\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct hwmon_node *hw = df->data;				\
+	int ret;							\
+	unsigned int val;						\
+	ret = kstrtoint(buf, 10, &val);					\
+	if (ret)							\
+		return ret;						\
+	val = max(val, _min);						\
+	val = min(val, _max);						\
+	hw->name = val;							\
+	return count;							\
+}
+
+#define gov_attr(__attr, min, max)	\
+show_attr(__attr)			\
+store_attr(__attr, (min), (max))	\
+static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
+
+#define show_list_attr(name, n) \
+static ssize_t show_list_##name(struct device *dev,			\
+			struct device_attribute *attr, char *buf)	\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct hwmon_node *hw = df->data;				\
+	unsigned int i, cnt = 0;					\
+									\
+	for (i = 0; i < n && hw->name[i]; i++)				\
+		cnt += snprintf(buf + cnt, PAGE_SIZE, "%u ", hw->name[i]);\
+	cnt += snprintf(buf + cnt, PAGE_SIZE, "\n");			\
+	return cnt;							\
+}
+
+#define store_list_attr(name, n, _min, _max) \
+static ssize_t store_list_##name(struct device *dev,			\
+			struct device_attribute *attr, const char *buf,	\
+			size_t count)					\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct hwmon_node *hw = df->data;				\
+	int ret;							\
+	unsigned int i = 0, val;					\
+									\
+	do {								\
+		ret = kstrtoint(buf, 10, &val);				\
+		if (ret)						\
+			break;						\
+		buf = strnchr(buf, PAGE_SIZE, ' ');			\
+		if (buf)						\
+			buf++;						\
+		val = max(val, _min);					\
+		val = min(val, _max);					\
+		hw->name[i] = val;					\
+		i++;							\
+	} while (buf && i < n - 1);					\
+	if (i < 1)							\
+		return -EINVAL;						\
+	hw->name[i] = 0;						\
+	return count;							\
+}
+
+#define gov_list_attr(__attr, n, min, max)	\
+show_list_attr(__attr, n)			\
+store_list_attr(__attr, n, (min), (max))	\
+static DEVICE_ATTR(__attr, 0644, show_list_##__attr, store_list_##__attr)
+
+#define MIN_MS	10U
+#define MAX_MS	500U
+
+/* Returns MBps of read/writes for the sampling window. */
+static unsigned int bytes_to_mbps(long long bytes, unsigned int us)
+{
+	bytes *= USEC_PER_SEC;
+	do_div(bytes, us);
+	bytes = DIV_ROUND_UP_ULL(bytes, SZ_1M);
+	return bytes;
+}
+
+static unsigned int mbps_to_bytes(unsigned long mbps, unsigned int ms)
+{
+	mbps *= ms;
+	mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC);
+	mbps *= SZ_1M;
+	return mbps;
+}
+
+static int __bw_hwmon_sw_sample_end(struct bw_hwmon *hwmon)
+{
+	struct devfreq *df;
+	struct hwmon_node *node;
+	ktime_t ts;
+	unsigned long bytes, mbps;
+	unsigned int us;
+	int wake = 0;
+
+	df = hwmon->df;
+	node = df->data;
+
+	ts = ktime_get();
+	us = ktime_to_us(ktime_sub(ts, node->prev_ts));
+
+	bytes = hwmon->get_bytes_and_clear(hwmon);
+	bytes += node->bytes;
+	node->bytes = 0;
+
+	mbps = bytes_to_mbps(bytes, us);
+	node->max_mbps = max(node->max_mbps, mbps);
+
+	/*
+	 * If the measured bandwidth in a micro sample is greater than the
+	 * wake up threshold, it indicates an increase in load that's non
+	 * trivial. So, have the governor ignore historical idle time or low
+	 * bandwidth usage and do the bandwidth calculation based on just
+	 * this micro sample.
+	 */
+	if (mbps > node->hw->up_wake_mbps) {
+		wake = UP_WAKE;
+	} else if (mbps < node->hw->down_wake_mbps) {
+		if (node->down_cnt)
+			node->down_cnt--;
+		if (node->down_cnt <= 0)
+			wake = DOWN_WAKE;
+	}
+
+	node->prev_ts = ts;
+	node->wake = wake;
+	node->sampled = true;
+
+	trace_bw_hwmon_meas(dev_name(df->dev.parent),
+				mbps,
+				us,
+				wake);
+
+	return wake;
+}
+
+static int __bw_hwmon_hw_sample_end(struct bw_hwmon *hwmon)
+{
+	struct devfreq *df;
+	struct hwmon_node *node;
+	unsigned long bytes, mbps;
+	int wake = 0;
+
+	df = hwmon->df;
+	node = df->data;
+
+	/*
+	 * If this read is in response to an IRQ, the HW monitor should
+	 * return the measurement in the micro sample that triggered the IRQ.
+	 * Otherwise, it should return the maximum measured value in any
+	 * micro sample since the last time we called get_bytes_and_clear()
+	 */
+	bytes = hwmon->get_bytes_and_clear(hwmon);
+	mbps = bytes_to_mbps(bytes, node->sample_ms * USEC_PER_MSEC);
+	node->max_mbps = mbps;
+
+	if (mbps > node->hw->up_wake_mbps)
+		wake = UP_WAKE;
+	else if (mbps < node->hw->down_wake_mbps)
+		wake = DOWN_WAKE;
+
+	node->wake = wake;
+	node->sampled = true;
+
+	trace_bw_hwmon_meas(dev_name(df->dev.parent),
+				mbps,
+				node->sample_ms * USEC_PER_MSEC,
+				wake);
+
+	return 1;
+}
+
+static int __bw_hwmon_sample_end(struct bw_hwmon *hwmon)
+{
+	if (hwmon->set_hw_events)
+		return __bw_hwmon_hw_sample_end(hwmon);
+	else
+		return __bw_hwmon_sw_sample_end(hwmon);
+}
+
+int bw_hwmon_sample_end(struct bw_hwmon *hwmon)
+{
+	unsigned long flags;
+	int wake;
+
+	spin_lock_irqsave(&irq_lock, flags);
+	wake = __bw_hwmon_sample_end(hwmon);
+	spin_unlock_irqrestore(&irq_lock, flags);
+
+	return wake;
+}
+
+unsigned long to_mbps_zone(struct hwmon_node *node, unsigned long mbps)
+{
+	int i;
+
+	for (i = 0; i < NUM_MBPS_ZONES && node->mbps_zones[i]; i++)
+		if (node->mbps_zones[i] >= mbps)
+			return node->mbps_zones[i];
+
+	return node->hw->df->max_freq;
+}
+
+#define MIN_MBPS	500UL
+#define HIST_PEAK_TOL	60
+static unsigned long get_bw_and_set_irq(struct hwmon_node *node,
+					unsigned long *freq, unsigned long *ab)
+{
+	unsigned long meas_mbps, thres, flags, req_mbps, adj_mbps;
+	unsigned long meas_mbps_zone;
+	unsigned long hist_lo_tol, hyst_lo_tol;
+	struct bw_hwmon *hw = node->hw;
+	unsigned int new_bw, io_percent;
+	ktime_t ts;
+	unsigned int ms = 0;
+
+	spin_lock_irqsave(&irq_lock, flags);
+
+	if (!hw->set_hw_events) {
+		ts = ktime_get();
+		ms = ktime_to_ms(ktime_sub(ts, node->prev_ts));
+	}
+	if (!node->sampled || ms >= node->sample_ms)
+		__bw_hwmon_sample_end(node->hw);
+	node->sampled = false;
+
+	req_mbps = meas_mbps = node->max_mbps;
+	node->max_mbps = 0;
+
+	hist_lo_tol = (node->hist_max_mbps * HIST_PEAK_TOL) / 100;
+	/* Remember historic peak in the past hist_mem decision windows. */
+	if (meas_mbps > node->hist_max_mbps || !node->hist_mem) {
+		/* If new max or no history */
+		node->hist_max_mbps = meas_mbps;
+		node->hist_mem = node->hist_memory;
+	} else if (meas_mbps >= hist_lo_tol) {
+		/*
+		 * If subsequent peaks come close (within tolerance) to but
+		 * less than the historic peak, then reset the history start,
+		 * but not the peak value.
+		 */
+		node->hist_mem = node->hist_memory;
+	} else {
+		/* Count down history expiration. */
+		if (node->hist_mem)
+			node->hist_mem--;
+	}
+
+	/* Keep track of whether we are in low power mode consistently. */
+	if (meas_mbps > node->low_power_ceil_mbps)
+		node->above_low_power = node->low_power_delay;
+	if (node->above_low_power)
+		node->above_low_power--;
+
+	if (node->above_low_power)
+		io_percent = node->io_percent;
+	else
+		io_percent = node->low_power_io_percent;
+
+	/*
+	 * The AB value that corresponds to the lowest mbps zone greater than
+	 * or equal to the "frequency" the current measurement will pick.
+	 * This upper limit is useful for balancing out any prediction
+	 * mechanisms to be power friendly.
+	 */
+	meas_mbps_zone = (meas_mbps * 100) / io_percent;
+	meas_mbps_zone = to_mbps_zone(node, meas_mbps_zone);
+	meas_mbps_zone = (meas_mbps_zone * io_percent) / 100;
+	meas_mbps_zone = max(meas_mbps, meas_mbps_zone);
+
+	/*
+	 * If this is a wake up due to BW increase, vote much higher BW than
+	 * what we measure to stay ahead of increasing traffic and then set
+	 * it up to vote for measured BW if we see down_count short sample
+	 * windows of low traffic.
+	 */
+	if (node->wake == UP_WAKE) {
+		req_mbps += ((meas_mbps - node->prev_req)
+				* node->up_scale) / 100;
+		/*
+		 * However if the measured load is less than the historic
+		 * peak, but the over request is higher than the historic
+		 * peak, then we could limit the over requesting to the
+		 * historic peak.
+		 */
+		if (req_mbps > node->hist_max_mbps
+		    && meas_mbps < node->hist_max_mbps)
+			req_mbps = node->hist_max_mbps;
+
+		req_mbps = min(req_mbps, meas_mbps_zone);
+	}
+
+	hyst_lo_tol = (node->hyst_mbps * HIST_PEAK_TOL) / 100;
+	if (meas_mbps > node->hyst_mbps && meas_mbps > MIN_MBPS) {
+		hyst_lo_tol = (meas_mbps * HIST_PEAK_TOL) / 100;
+		node->hyst_peak = 0;
+		node->hyst_trig_win = node->hyst_length;
+		node->hyst_mbps = meas_mbps;
+	}
+
+	/*
+	 * Check node->max_mbps to avoid double counting peaks that cause
+	 * early termination of a window.
+	 */
+	if (meas_mbps >= hyst_lo_tol && meas_mbps > MIN_MBPS
+	    && !node->max_mbps) {
+		node->hyst_peak++;
+		if (node->hyst_peak >= node->hyst_trigger_count
+		    || node->hyst_en)
+			node->hyst_en = node->hyst_length;
+	}
+
+	if (node->hyst_trig_win)
+		node->hyst_trig_win--;
+	if (node->hyst_en)
+		node->hyst_en--;
+
+	if (!node->hyst_trig_win && !node->hyst_en) {
+		node->hyst_peak = 0;
+		node->hyst_mbps = 0;
+	}
+
+	if (node->hyst_en) {
+		if (meas_mbps > node->idle_mbps)
+			req_mbps = max(req_mbps, node->hyst_mbps);
+	}
+
+	/* Stretch the short sample window size, if the traffic is too low */
+	if (meas_mbps < MIN_MBPS) {
+		hw->up_wake_mbps = (max(MIN_MBPS, req_mbps)
+					* (100 + node->up_thres)) / 100;
+		hw->down_wake_mbps = 0;
+		hw->undo_over_req_mbps = 0;
+		thres = mbps_to_bytes(max(MIN_MBPS, req_mbps / 2),
+					node->sample_ms);
+	} else {
+		/*
+		 * Up wake vs down wake are intentionally a percentage of
+		 * req_mbps vs meas_mbps to make sure the over requesting
+		 * phase is handled properly. We only want to wake up and
+		 * reduce the vote based on the measured mbps being less than
+		 * the previous measurement that caused the "over request".
+		 */
+		hw->up_wake_mbps = (req_mbps * (100 + node->up_thres)) / 100;
+		hw->down_wake_mbps = (meas_mbps * node->down_thres) / 100;
+		if (node->wake == UP_WAKE)
+			hw->undo_over_req_mbps = min(req_mbps, meas_mbps_zone);
+		else
+			hw->undo_over_req_mbps = 0;
+		thres = mbps_to_bytes(meas_mbps, node->sample_ms);
+	}
+
+	if (hw->set_hw_events) {
+		hw->down_cnt = node->down_count;
+		hw->set_hw_events(hw, node->sample_ms);
+	} else {
+		node->down_cnt = node->down_count;
+		node->bytes = hw->set_thres(hw, thres);
+	}
+
+	node->wake = 0;
+	node->prev_req = req_mbps;
+
+	spin_unlock_irqrestore(&irq_lock, flags);
+
+	adj_mbps = req_mbps + node->guard_band_mbps;
+
+	if (adj_mbps > node->prev_ab) {
+		new_bw = adj_mbps;
+	} else {
+		new_bw = adj_mbps * node->decay_rate
+			+ node->prev_ab * (100 - node->decay_rate);
+		new_bw /= 100;
+	}
+
+	node->prev_ab = new_bw;
+	if (ab)
+		*ab = roundup(new_bw, node->bw_step);
+
+	*freq = (new_bw * 100) / io_percent;
+	trace_bw_hwmon_update(dev_name(node->hw->df->dev.parent),
+				new_bw,
+				*freq,
+				hw->up_wake_mbps,
+				hw->down_wake_mbps);
+	return req_mbps;
+}
+
+static struct hwmon_node *find_hwmon_node(struct devfreq *df)
+{
+	struct hwmon_node *node, *found = NULL;
+
+	mutex_lock(&list_lock);
+	list_for_each_entry(node, &hwmon_list, list)
+		if (node->hw->dev == df->dev.parent ||
+		    node->hw->of_node == df->dev.parent->of_node ||
+		    (!node->hw->dev && !node->hw->of_node &&
+		     node->gov == df->governor)) {
+			found = node;
+			break;
+		}
+	mutex_unlock(&list_lock);
+
+	return found;
+}
+
+int update_bw_hwmon(struct bw_hwmon *hwmon)
+{
+	struct devfreq *df;
+	struct hwmon_node *node;
+	int ret;
+
+	if (!hwmon)
+		return -EINVAL;
+	df = hwmon->df;
+	if (!df)
+		return -ENODEV;
+	node = df->data;
+	if (!node)
+		return -ENODEV;
+
+	if (!node->mon_started)
+		return -EBUSY;
+
+	dev_dbg(df->dev.parent, "Got update request\n");
+	devfreq_monitor_stop(df);
+
+	mutex_lock(&df->lock);
+	ret = update_devfreq(df);
+	if (ret)
+		dev_err(df->dev.parent,
+			"Unable to update freq on request!\n");
+	mutex_unlock(&df->lock);
+
+	devfreq_monitor_start(df);
+
+	return 0;
+}
+
+static int start_monitor(struct devfreq *df, bool init)
+{
+	struct hwmon_node *node = df->data;
+	struct bw_hwmon *hw = node->hw;
+	struct device *dev = df->dev.parent;
+	unsigned long mbps;
+	int ret;
+
+	node->prev_ts = ktime_get();
+
+	if (init) {
+		node->prev_ab = 0;
+		node->resume_freq = 0;
+		node->resume_ab = 0;
+		mbps = (df->previous_freq * node->io_percent) / 100;
+		hw->up_wake_mbps = mbps;
+		hw->down_wake_mbps = MIN_MBPS;
+		hw->undo_over_req_mbps = 0;
+		ret = hw->start_hwmon(hw, mbps);
+	} else {
+		ret = hw->resume_hwmon(hw);
+	}
+
+	if (ret) {
+		dev_err(dev, "Unable to start HW monitor! (%d)\n", ret);
+		return ret;
+	}
+
+	if (init)
+		devfreq_monitor_start(df);
+	else
+		devfreq_monitor_resume(df);
+
+	node->mon_started = true;
+
+	return 0;
+}
+
+static void stop_monitor(struct devfreq *df, bool init)
+{
+	struct hwmon_node *node = df->data;
+	struct bw_hwmon *hw = node->hw;
+
+	node->mon_started = false;
+
+	if (init) {
+		devfreq_monitor_stop(df);
+		hw->stop_hwmon(hw);
+	} else {
+		devfreq_monitor_suspend(df);
+		hw->suspend_hwmon(hw);
+	}
+
+}
+
+static int gov_start(struct devfreq *df)
+{
+	int ret = 0;
+	struct device *dev = df->dev.parent;
+	struct hwmon_node *node;
+	struct bw_hwmon *hw;
+	struct devfreq_dev_status stat;
+
+	node = find_hwmon_node(df);
+	if (!node) {
+		dev_err(dev, "Unable to find HW monitor!\n");
+		return -ENODEV;
+	}
+	hw = node->hw;
+
+	stat.private_data = NULL;
+	if (df->profile->get_dev_status)
+		ret = df->profile->get_dev_status(df->dev.parent, &stat);
+	if (ret || !stat.private_data)
+		dev_warn(dev, "Device doesn't take AB votes!\n");
+	else
+		node->dev_ab = stat.private_data;
+
+	hw->df = df;
+	node->orig_data = df->data;
+	df->data = node;
+
+	if (start_monitor(df, true))
+		goto err_start;
+
+	ret = sysfs_create_group(&df->dev.kobj, node->attr_grp);
+	if (ret)
+		goto err_sysfs;
+
+	return 0;
+
+err_sysfs:
+	stop_monitor(df, true);
+err_start:
+	df->data = node->orig_data;
+	node->orig_data = NULL;
+	hw->df = NULL;
+	node->dev_ab = NULL;
+	return ret;
+}
+
+static void gov_stop(struct devfreq *df)
+{
+	struct hwmon_node *node = df->data;
+	struct bw_hwmon *hw = node->hw;
+
+	sysfs_remove_group(&df->dev.kobj, node->attr_grp);
+	stop_monitor(df, true);
+	df->data = node->orig_data;
+	node->orig_data = NULL;
+	hw->df = NULL;
+	/*
+	 * Not all governors know about this additional extended device
+	 * configuration. To avoid leaving the extended configuration at a
+	 * stale state, set it to 0 and let the next governor take it from
+	 * there.
+	 */
+	if (node->dev_ab)
+		*node->dev_ab = 0;
+	node->dev_ab = NULL;
+}
+
+static int gov_suspend(struct devfreq *df)
+{
+	struct hwmon_node *node = df->data;
+	unsigned long resume_freq = df->previous_freq;
+	unsigned long resume_ab = *node->dev_ab;
+
+	if (!node->hw->suspend_hwmon)
+		return -ENOSYS;
+
+	if (node->resume_freq) {
+		dev_warn(df->dev.parent, "Governor already suspended!\n");
+		return -EBUSY;
+	}
+
+	stop_monitor(df, false);
+
+	mutex_lock(&df->lock);
+	update_devfreq(df);
+	mutex_unlock(&df->lock);
+
+	node->resume_freq = resume_freq;
+	node->resume_ab = resume_ab;
+
+	return 0;
+}
+
+static int gov_resume(struct devfreq *df)
+{
+	struct hwmon_node *node = df->data;
+
+	if (!node->hw->resume_hwmon)
+		return -ENOSYS;
+
+	if (!node->resume_freq) {
+		dev_warn(df->dev.parent, "Governor already resumed!\n");
+		return -EBUSY;
+	}
+
+	mutex_lock(&df->lock);
+	update_devfreq(df);
+	mutex_unlock(&df->lock);
+
+	node->resume_freq = 0;
+	node->resume_ab = 0;
+
+	return start_monitor(df, false);
+}
+
+static int devfreq_bw_hwmon_get_freq(struct devfreq *df,
+					unsigned long *freq)
+{
+	struct hwmon_node *node = df->data;
+
+	/* Suspend/resume sequence */
+	if (!node->mon_started) {
+		*freq = node->resume_freq;
+		*node->dev_ab = node->resume_ab;
+		return 0;
+	}
+
+	get_bw_and_set_irq(node, freq, node->dev_ab);
+
+	return 0;
+}
+
+static ssize_t store_throttle_adj(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct devfreq *df = to_devfreq(dev);
+	struct hwmon_node *node = df->data;
+	int ret;
+	unsigned int val;
+
+	if (!node->hw->set_throttle_adj)
+		return -ENOSYS;
+
+	ret = kstrtouint(buf, 10, &val);
+	if (ret)
+		return ret;
+
+	ret = node->hw->set_throttle_adj(node->hw, val);
+
+	if (!ret)
+		return count;
+	else
+		return ret;
+}
+
+static ssize_t show_throttle_adj(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct devfreq *df = to_devfreq(dev);
+	struct hwmon_node *node = df->data;
+	unsigned int val;
+
+	if (!node->hw->get_throttle_adj)
+		val = 0;
+	else
+		val = node->hw->get_throttle_adj(node->hw);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static DEVICE_ATTR(throttle_adj, 0644, show_throttle_adj,
+						store_throttle_adj);
+
+gov_attr(guard_band_mbps, 0U, 2000U);
+gov_attr(decay_rate, 0U, 100U);
+gov_attr(io_percent, 1U, 100U);
+gov_attr(bw_step, 50U, 1000U);
+gov_attr(sample_ms, 1U, 50U);
+gov_attr(up_scale, 0U, 500U);
+gov_attr(up_thres, 1U, 100U);
+gov_attr(down_thres, 0U, 90U);
+gov_attr(down_count, 0U, 90U);
+gov_attr(hist_memory, 0U, 90U);
+gov_attr(hyst_trigger_count, 0U, 90U);
+gov_attr(hyst_length, 0U, 90U);
+gov_attr(idle_mbps, 0U, 2000U);
+gov_attr(low_power_ceil_mbps, 0U, 2500U);
+gov_attr(low_power_io_percent, 1U, 100U);
+gov_attr(low_power_delay, 1U, 60U);
+gov_list_attr(mbps_zones, NUM_MBPS_ZONES, 0U, UINT_MAX);
+
+static struct attribute *dev_attr[] = {
+	&dev_attr_guard_band_mbps.attr,
+	&dev_attr_decay_rate.attr,
+	&dev_attr_io_percent.attr,
+	&dev_attr_bw_step.attr,
+	&dev_attr_sample_ms.attr,
+	&dev_attr_up_scale.attr,
+	&dev_attr_up_thres.attr,
+	&dev_attr_down_thres.attr,
+	&dev_attr_down_count.attr,
+	&dev_attr_hist_memory.attr,
+	&dev_attr_hyst_trigger_count.attr,
+	&dev_attr_hyst_length.attr,
+	&dev_attr_idle_mbps.attr,
+	&dev_attr_low_power_ceil_mbps.attr,
+	&dev_attr_low_power_io_percent.attr,
+	&dev_attr_low_power_delay.attr,
+	&dev_attr_mbps_zones.attr,
+	&dev_attr_throttle_adj.attr,
+	NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+	.name = "bw_hwmon",
+	.attrs = dev_attr,
+};
+
+static int devfreq_bw_hwmon_ev_handler(struct devfreq *df,
+					unsigned int event, void *data)
+{
+	int ret;
+	unsigned int sample_ms;
+	struct hwmon_node *node;
+	struct bw_hwmon *hw;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		sample_ms = df->profile->polling_ms;
+		sample_ms = max(MIN_MS, sample_ms);
+		sample_ms = min(MAX_MS, sample_ms);
+		df->profile->polling_ms = sample_ms;
+
+		ret = gov_start(df);
+		if (ret)
+			return ret;
+
+		dev_dbg(df->dev.parent,
+			"Enabled dev BW HW monitor governor\n");
+		break;
+
+	case DEVFREQ_GOV_STOP:
+		gov_stop(df);
+		dev_dbg(df->dev.parent,
+			"Disabled dev BW HW monitor governor\n");
+		break;
+
+	case DEVFREQ_GOV_INTERVAL:
+		sample_ms = *(unsigned int *)data;
+		sample_ms = max(MIN_MS, sample_ms);
+		sample_ms = min(MAX_MS, sample_ms);
+		/*
+		 * Suspend/resume the HW monitor around the interval update
+		 * to prevent the HW monitor IRQ from trying to change
+		 * stop/start the delayed workqueue while the interval update
+		 * is happening.
+		 */
+		node = df->data;
+		hw = node->hw;
+		hw->suspend_hwmon(hw);
+		devfreq_interval_update(df, &sample_ms);
+		ret = hw->resume_hwmon(hw);
+		if (ret) {
+			dev_err(df->dev.parent,
+				"Unable to resume HW monitor (%d)\n", ret);
+			return ret;
+		}
+		break;
+
+	case DEVFREQ_GOV_SUSPEND:
+		ret = gov_suspend(df);
+		if (ret) {
+			dev_err(df->dev.parent,
+				"Unable to suspend BW HW mon governor (%d)\n",
+				ret);
+			return ret;
+		}
+
+		dev_dbg(df->dev.parent, "Suspended BW HW mon governor\n");
+		break;
+
+	case DEVFREQ_GOV_RESUME:
+		ret = gov_resume(df);
+		if (ret) {
+			dev_err(df->dev.parent,
+				"Unable to resume BW HW mon governor (%d)\n",
+				ret);
+			return ret;
+		}
+
+		dev_dbg(df->dev.parent, "Resumed BW HW mon governor\n");
+		break;
+	}
+
+	return 0;
+}
+
+static struct devfreq_governor devfreq_gov_bw_hwmon = {
+	.name = "bw_hwmon",
+	.get_target_freq = devfreq_bw_hwmon_get_freq,
+	.event_handler = devfreq_bw_hwmon_ev_handler,
+};
+
+int register_bw_hwmon(struct device *dev, struct bw_hwmon *hwmon)
+{
+	int ret = 0;
+	struct hwmon_node *node;
+	struct attribute_group *attr_grp;
+
+	if (!hwmon->gov && !hwmon->dev && !hwmon->of_node)
+		return -EINVAL;
+
+	node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	if (hwmon->gov) {
+		attr_grp = devm_kzalloc(dev, sizeof(*attr_grp), GFP_KERNEL);
+		if (!attr_grp)
+			return -ENOMEM;
+
+		hwmon->gov->get_target_freq = devfreq_bw_hwmon_get_freq;
+		hwmon->gov->event_handler = devfreq_bw_hwmon_ev_handler;
+		attr_grp->name = hwmon->gov->name;
+		attr_grp->attrs = dev_attr;
+
+		node->gov = hwmon->gov;
+		node->attr_grp = attr_grp;
+	} else {
+		node->gov = &devfreq_gov_bw_hwmon;
+		node->attr_grp = &dev_attr_group;
+	}
+
+	node->guard_band_mbps = 100;
+	node->decay_rate = 90;
+	node->io_percent = 16;
+	node->low_power_ceil_mbps = 0;
+	node->low_power_io_percent = 16;
+	node->low_power_delay = 60;
+	node->bw_step = 190;
+	node->sample_ms = 50;
+	node->up_scale = 0;
+	node->up_thres = 10;
+	node->down_thres = 0;
+	node->down_count = 3;
+	node->hist_memory = 0;
+	node->hyst_trigger_count = 3;
+	node->hyst_length = 0;
+	node->idle_mbps = 400;
+	node->mbps_zones[0] = 0;
+	node->hw = hwmon;
+
+	mutex_lock(&list_lock);
+	list_add_tail(&node->list, &hwmon_list);
+	mutex_unlock(&list_lock);
+
+	if (hwmon->gov) {
+		ret = devfreq_add_governor(hwmon->gov);
+	} else {
+		mutex_lock(&state_lock);
+		if (!use_cnt)
+			ret = devfreq_add_governor(&devfreq_gov_bw_hwmon);
+		if (!ret)
+			use_cnt++;
+		mutex_unlock(&state_lock);
+	}
+
+	if (!ret)
+		dev_info(dev, "BW HWmon governor registered.\n");
+	else
+		dev_err(dev, "BW HWmon governor registration failed!\n");
+
+	return ret;
+}
+
+MODULE_DESCRIPTION("HW monitor based dev DDR bandwidth voting driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/governor_bw_hwmon.h b/drivers/devfreq/governor_bw_hwmon.h
new file mode 100644
index 0000000..7578399
--- /dev/null
+++ b/drivers/devfreq/governor_bw_hwmon.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _GOVERNOR_BW_HWMON_H
+#define _GOVERNOR_BW_HWMON_H
+
+#include <linux/kernel.h>
+#include <linux/devfreq.h>
+
+/**
+ * struct bw_hwmon - dev BW HW monitor info
+ * @start_hwmon:		Start the HW monitoring of the dev BW
+ * @stop_hwmon:			Stop the HW monitoring of dev BW
+ * @set_thres:			Set the count threshold to generate an IRQ
+ * @get_bytes_and_clear:	Get the bytes transferred since the last call
+ *				and reset the counter to start over.
+ * @set_throttle_adj:		Set throttle adjust field to the given value
+ * @get_throttle_adj:		Get the value written to throttle adjust field
+ * @dev:			Pointer to device that this HW monitor can
+ *				monitor.
+ * @of_node:			OF node of device that this HW monitor can
+ *				monitor.
+ * @gov:			devfreq_governor struct that should be used
+ *				when registering this HW monitor with devfreq.
+ *				Only the name field is expected to be
+ *				initialized.
+ * @df:				Devfreq node that this HW monitor is being
+ *				used for. NULL when not actively in use and
+ *				non-NULL when in use.
+ *
+ * One of dev, of_node or governor_name needs to be specified for a
+ * successful registration.
+ *
+ */
+struct bw_hwmon {
+	int (*start_hwmon)(struct bw_hwmon *hw, unsigned long mbps);
+	void (*stop_hwmon)(struct bw_hwmon *hw);
+	int (*suspend_hwmon)(struct bw_hwmon *hw);
+	int (*resume_hwmon)(struct bw_hwmon *hw);
+	unsigned long (*set_thres)(struct bw_hwmon *hw, unsigned long bytes);
+	unsigned long (*set_hw_events)(struct bw_hwmon *hw,
+					unsigned int sample_ms);
+	unsigned long (*get_bytes_and_clear)(struct bw_hwmon *hw);
+	int (*set_throttle_adj)(struct bw_hwmon *hw, uint adj);
+	u32 (*get_throttle_adj)(struct bw_hwmon *hw);
+	struct device *dev;
+	struct device_node *of_node;
+	struct devfreq_governor *gov;
+
+	unsigned long up_wake_mbps;
+	unsigned long undo_over_req_mbps;
+	unsigned long down_wake_mbps;
+	unsigned int down_cnt;
+
+	struct devfreq *df;
+};
+
+#ifdef CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON
+int register_bw_hwmon(struct device *dev, struct bw_hwmon *hwmon);
+int update_bw_hwmon(struct bw_hwmon *hwmon);
+int bw_hwmon_sample_end(struct bw_hwmon *hwmon);
+#else
+static inline int register_bw_hwmon(struct device *dev,
+					struct bw_hwmon *hwmon)
+{
+	return 0;
+}
+static inline int update_bw_hwmon(struct bw_hwmon *hwmon)
+{
+	return 0;
+}
+static inline int bw_hwmon_sample_end(struct bw_hwmon *hwmon)
+{
+	return 0;
+}
+#endif
+
+#endif /* _GOVERNOR_BW_HWMON_H */
diff --git a/drivers/devfreq/governor_cache_hwmon.c b/drivers/devfreq/governor_cache_hwmon.c
new file mode 100644
index 0000000..3dfafef
--- /dev/null
+++ b/drivers/devfreq/governor_cache_hwmon.c
@@ -0,0 +1,426 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "cache-hwmon: " fmt
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/devfreq.h>
+#include <trace/events/power.h>
+#include "governor.h"
+#include "governor_cache_hwmon.h"
+
+struct cache_hwmon_node {
+	unsigned int cycles_per_low_req;
+	unsigned int cycles_per_med_req;
+	unsigned int cycles_per_high_req;
+	unsigned int min_busy;
+	unsigned int max_busy;
+	unsigned int tolerance_mrps;
+	unsigned int guard_band_mhz;
+	unsigned int decay_rate;
+	unsigned long prev_mhz;
+	ktime_t prev_ts;
+	bool mon_started;
+	struct list_head list;
+	void *orig_data;
+	struct cache_hwmon *hw;
+	struct attribute_group *attr_grp;
+};
+
+static LIST_HEAD(cache_hwmon_list);
+static DEFINE_MUTEX(list_lock);
+
+static int use_cnt;
+static DEFINE_MUTEX(register_lock);
+
+static DEFINE_MUTEX(monitor_lock);
+
+#define show_attr(name) \
+static ssize_t show_##name(struct device *dev,				\
+			struct device_attribute *attr, char *buf)	\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct cache_hwmon_node *hw = df->data;				\
+	return snprintf(buf, PAGE_SIZE, "%u\n", hw->name);		\
+}
+
+#define store_attr(name, _min, _max) \
+static ssize_t store_##name(struct device *dev,				\
+			struct device_attribute *attr, const char *buf,	\
+			size_t count)					\
+{									\
+	int ret;							\
+	unsigned int val;						\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct cache_hwmon_node *hw = df->data;				\
+	ret = kstrtoint(buf, 10, &val);					\
+	if (ret)							\
+		return ret;						\
+	val = max(val, _min);						\
+	val = min(val, _max);						\
+	hw->name = val;							\
+	return count;							\
+}
+
+#define gov_attr(__attr, min, max)	\
+show_attr(__attr)			\
+store_attr(__attr, (min), (max))	\
+static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
+
+#define MIN_MS	10U
+#define MAX_MS	500U
+
+static struct cache_hwmon_node *find_hwmon_node(struct devfreq *df)
+{
+	struct cache_hwmon_node *node, *found = NULL;
+
+	mutex_lock(&list_lock);
+	list_for_each_entry(node, &cache_hwmon_list, list)
+		if (node->hw->dev == df->dev.parent ||
+		    node->hw->of_node == df->dev.parent->of_node) {
+			found = node;
+			break;
+		}
+	mutex_unlock(&list_lock);
+
+	return found;
+}
+
+static unsigned long measure_mrps_and_set_irq(struct cache_hwmon_node *node,
+			struct mrps_stats *stat)
+{
+	ktime_t ts;
+	unsigned int us;
+	struct cache_hwmon *hw = node->hw;
+
+	/*
+	 * Since we are stopping the counters, we don't want this short work
+	 * to be interrupted by other tasks and cause the measurements to be
+	 * wrong. Not blocking interrupts to avoid affecting interrupt
+	 * latency and since they should be short anyway because they run in
+	 * atomic context.
+	 */
+	preempt_disable();
+
+	ts = ktime_get();
+	us = ktime_to_us(ktime_sub(ts, node->prev_ts));
+	if (!us)
+		us = 1;
+
+	hw->meas_mrps_and_set_irq(hw, node->tolerance_mrps, us, stat);
+	node->prev_ts = ts;
+
+	preempt_enable();
+
+	trace_cache_hwmon_meas(dev_name(hw->df->dev.parent), stat->mrps[HIGH],
+			       stat->mrps[MED], stat->mrps[LOW],
+			       stat->busy_percent, us);
+	return 0;
+}
+
+static void compute_cache_freq(struct cache_hwmon_node *node,
+		struct mrps_stats *mrps, unsigned long *freq)
+{
+	unsigned long new_mhz;
+	unsigned int busy;
+
+	new_mhz = mrps->mrps[HIGH] * node->cycles_per_high_req
+		+ mrps->mrps[MED] * node->cycles_per_med_req
+		+ mrps->mrps[LOW] * node->cycles_per_low_req;
+
+	busy = max(node->min_busy, mrps->busy_percent);
+	busy = min(node->max_busy, busy);
+
+	new_mhz *= 100;
+	new_mhz /= busy;
+
+	if (new_mhz < node->prev_mhz) {
+		new_mhz = new_mhz * node->decay_rate + node->prev_mhz
+				* (100 - node->decay_rate);
+		new_mhz /= 100;
+	}
+	node->prev_mhz = new_mhz;
+
+	new_mhz += node->guard_band_mhz;
+	*freq = new_mhz * 1000;
+	trace_cache_hwmon_update(dev_name(node->hw->df->dev.parent), *freq);
+}
+
+#define TOO_SOON_US	(1 * USEC_PER_MSEC)
+int update_cache_hwmon(struct cache_hwmon *hwmon)
+{
+	struct cache_hwmon_node *node;
+	struct devfreq *df;
+	ktime_t ts;
+	unsigned int us;
+	int ret;
+
+	if (!hwmon)
+		return -EINVAL;
+	df = hwmon->df;
+	if (!df)
+		return -ENODEV;
+	node = df->data;
+	if (!node)
+		return -ENODEV;
+
+	mutex_lock(&monitor_lock);
+	if (!node->mon_started) {
+		mutex_unlock(&monitor_lock);
+		return -EBUSY;
+	}
+
+	dev_dbg(df->dev.parent, "Got update request\n");
+	devfreq_monitor_stop(df);
+
+	/*
+	 * Don't recalc cache freq if the interrupt comes right after a
+	 * previous cache freq calculation.  This is done for two reasons:
+	 *
+	 * 1. Sampling the cache request during a very short duration can
+	 *    result in a very inaccurate measurement due to very short
+	 *    bursts.
+	 * 2. This can only happen if the limit was hit very close to the end
+	 *    of the previous sample period. Which means the current cache
+	 *    request estimate is not very off and doesn't need to be
+	 *    readjusted.
+	 */
+	ts = ktime_get();
+	us = ktime_to_us(ktime_sub(ts, node->prev_ts));
+	if (us > TOO_SOON_US) {
+		mutex_lock(&df->lock);
+		ret = update_devfreq(df);
+		if (ret)
+			dev_err(df->dev.parent,
+				"Unable to update freq on request!\n");
+		mutex_unlock(&df->lock);
+	}
+
+	devfreq_monitor_start(df);
+
+	mutex_unlock(&monitor_lock);
+	return 0;
+}
+
+static int devfreq_cache_hwmon_get_freq(struct devfreq *df,
+					unsigned long *freq)
+{
+	struct mrps_stats stat;
+	struct cache_hwmon_node *node = df->data;
+
+	memset(&stat, 0, sizeof(stat));
+	measure_mrps_and_set_irq(node, &stat);
+	compute_cache_freq(node, &stat, freq);
+
+	return 0;
+}
+
+gov_attr(cycles_per_low_req, 1U, 100U);
+gov_attr(cycles_per_med_req, 1U, 100U);
+gov_attr(cycles_per_high_req, 1U, 100U);
+gov_attr(min_busy, 1U, 100U);
+gov_attr(max_busy, 1U, 100U);
+gov_attr(tolerance_mrps, 0U, 100U);
+gov_attr(guard_band_mhz, 0U, 500U);
+gov_attr(decay_rate, 0U, 100U);
+
+static struct attribute *dev_attr[] = {
+	&dev_attr_cycles_per_low_req.attr,
+	&dev_attr_cycles_per_med_req.attr,
+	&dev_attr_cycles_per_high_req.attr,
+	&dev_attr_min_busy.attr,
+	&dev_attr_max_busy.attr,
+	&dev_attr_tolerance_mrps.attr,
+	&dev_attr_guard_band_mhz.attr,
+	&dev_attr_decay_rate.attr,
+	NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+	.name = "cache_hwmon",
+	.attrs = dev_attr,
+};
+
+static int start_monitoring(struct devfreq *df)
+{
+	int ret;
+	struct mrps_stats mrps;
+	struct device *dev = df->dev.parent;
+	struct cache_hwmon_node *node;
+	struct cache_hwmon *hw;
+
+	node = find_hwmon_node(df);
+	if (!node) {
+		dev_err(dev, "Unable to find HW monitor!\n");
+		return -ENODEV;
+	}
+	hw = node->hw;
+	hw->df = df;
+	node->orig_data = df->data;
+	df->data = node;
+
+	node->prev_ts = ktime_get();
+	node->prev_mhz = 0;
+	mrps.mrps[HIGH] = (df->previous_freq / 1000) - node->guard_band_mhz;
+	mrps.mrps[HIGH] /= node->cycles_per_high_req;
+	mrps.mrps[MED] = mrps.mrps[LOW] = 0;
+
+	ret = hw->start_hwmon(hw, &mrps);
+	if (ret) {
+		dev_err(dev, "Unable to start HW monitor!\n");
+		goto err_start;
+	}
+
+	mutex_lock(&monitor_lock);
+	devfreq_monitor_start(df);
+	node->mon_started = true;
+	mutex_unlock(&monitor_lock);
+
+	ret = sysfs_create_group(&df->dev.kobj, &dev_attr_group);
+	if (ret) {
+		dev_err(dev, "Error creating sys entries!\n");
+		goto sysfs_fail;
+	}
+
+	return 0;
+
+sysfs_fail:
+	mutex_lock(&monitor_lock);
+	node->mon_started = false;
+	devfreq_monitor_stop(df);
+	mutex_unlock(&monitor_lock);
+	hw->stop_hwmon(hw);
+err_start:
+	df->data = node->orig_data;
+	node->orig_data = NULL;
+	hw->df = NULL;
+	return ret;
+}
+
+static void stop_monitoring(struct devfreq *df)
+{
+	struct cache_hwmon_node *node = df->data;
+	struct cache_hwmon *hw = node->hw;
+
+	sysfs_remove_group(&df->dev.kobj, &dev_attr_group);
+	mutex_lock(&monitor_lock);
+	node->mon_started = false;
+	devfreq_monitor_stop(df);
+	mutex_unlock(&monitor_lock);
+	hw->stop_hwmon(hw);
+	df->data = node->orig_data;
+	node->orig_data = NULL;
+	hw->df = NULL;
+}
+
+static int devfreq_cache_hwmon_ev_handler(struct devfreq *df,
+					unsigned int event, void *data)
+{
+	int ret;
+	unsigned int sample_ms;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		sample_ms = df->profile->polling_ms;
+		sample_ms = max(MIN_MS, sample_ms);
+		sample_ms = min(MAX_MS, sample_ms);
+		df->profile->polling_ms = sample_ms;
+
+		ret = start_monitoring(df);
+		if (ret)
+			return ret;
+
+		dev_dbg(df->dev.parent, "Enabled Cache HW monitor governor\n");
+		break;
+
+	case DEVFREQ_GOV_STOP:
+		stop_monitoring(df);
+		dev_dbg(df->dev.parent, "Disabled Cache HW monitor governor\n");
+		break;
+
+	case DEVFREQ_GOV_INTERVAL:
+		sample_ms = *(unsigned int *)data;
+		sample_ms = max(MIN_MS, sample_ms);
+		sample_ms = min(MAX_MS, sample_ms);
+		devfreq_interval_update(df, &sample_ms);
+		break;
+	}
+
+	return 0;
+}
+
+static struct devfreq_governor devfreq_cache_hwmon = {
+	.name = "cache_hwmon",
+	.get_target_freq = devfreq_cache_hwmon_get_freq,
+	.event_handler = devfreq_cache_hwmon_ev_handler,
+};
+
+int register_cache_hwmon(struct device *dev, struct cache_hwmon *hwmon)
+{
+	int ret = 0;
+	struct cache_hwmon_node *node;
+
+	if (!hwmon->dev && !hwmon->of_node)
+		return -EINVAL;
+
+	node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	node->cycles_per_med_req = 20;
+	node->cycles_per_high_req = 35;
+	node->min_busy = 100;
+	node->max_busy = 100;
+	node->tolerance_mrps = 5;
+	node->guard_band_mhz = 100;
+	node->decay_rate = 90;
+	node->hw = hwmon;
+	node->attr_grp = &dev_attr_group;
+
+	mutex_lock(&register_lock);
+	if (!use_cnt) {
+		ret = devfreq_add_governor(&devfreq_cache_hwmon);
+		if (!ret)
+			use_cnt++;
+	}
+	mutex_unlock(&register_lock);
+
+	if (!ret) {
+		dev_info(dev, "Cache HWmon governor registered.\n");
+	} else {
+		dev_err(dev, "Failed to add Cache HWmon governor\n");
+		return ret;
+	}
+
+	mutex_lock(&list_lock);
+	list_add_tail(&node->list, &cache_hwmon_list);
+	mutex_unlock(&list_lock);
+
+	return ret;
+}
+
+MODULE_DESCRIPTION("HW monitor based cache freq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/governor_cache_hwmon.h b/drivers/devfreq/governor_cache_hwmon.h
new file mode 100644
index 0000000..01b5a75
--- /dev/null
+++ b/drivers/devfreq/governor_cache_hwmon.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _GOVERNOR_CACHE_HWMON_H
+#define _GOVERNOR_CACHE_HWMON_H
+
+#include <linux/kernel.h>
+#include <linux/devfreq.h>
+
+enum request_group {
+	HIGH,
+	MED,
+	LOW,
+	MAX_NUM_GROUPS,
+};
+
+struct mrps_stats {
+	unsigned long mrps[MAX_NUM_GROUPS];
+	unsigned int busy_percent;
+};
+
+/**
+ * struct cache_hwmon - devfreq Cache HW monitor info
+ * @start_hwmon:	Start the HW monitoring
+ * @stop_hwmon:		Stop the HW monitoring
+ * @meas_mrps_and_set_irq:	Return the measured count and set up the
+ *				IRQ to fire if usage exceeds current
+ *				measurement by @tol percent.
+ * @dev:		device that this HW monitor can monitor.
+ * @of_node:		OF node of device that this HW monitor can monitor.
+ * @df:			Devfreq node that this HW montior is being used
+ *			for. NULL when not actively in use, and non-NULL
+ *			when in use.
+ */
+struct cache_hwmon {
+	int (*start_hwmon)(struct cache_hwmon *hw, struct mrps_stats *mrps);
+	void (*stop_hwmon)(struct cache_hwmon *hw);
+	unsigned long (*meas_mrps_and_set_irq)(struct cache_hwmon *hw,
+					unsigned int tol, unsigned int us,
+					struct mrps_stats *mrps);
+	struct device *dev;
+	struct device_node *of_node;
+	struct devfreq *df;
+};
+
+#ifdef CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON
+int register_cache_hwmon(struct device *dev, struct cache_hwmon *hwmon);
+int update_cache_hwmon(struct cache_hwmon *hwmon);
+#else
+static inline int register_cache_hwmon(struct device *dev,
+				       struct cache_hwmon *hwmon)
+{
+	return 0;
+}
+int update_cache_hwmon(struct cache_hwmon *hwmon)
+{
+	return 0;
+}
+#endif
+
+#endif /* _GOVERNOR_CACHE_HWMON_H */
diff --git a/drivers/devfreq/governor_cpufreq.c b/drivers/devfreq/governor_cpufreq.c
new file mode 100644
index 0000000..03ec792
--- /dev/null
+++ b/drivers/devfreq/governor_cpufreq.c
@@ -0,0 +1,716 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "dev-cpufreq: " fmt
+
+#include <linux/devfreq.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include "governor.h"
+
+struct cpu_state {
+	unsigned int freq;
+	unsigned int min_freq;
+	unsigned int max_freq;
+	bool on;
+	unsigned int first_cpu;
+};
+static struct cpu_state *state[NR_CPUS];
+static int cpufreq_cnt;
+
+struct freq_map {
+	unsigned int cpu_khz;
+	unsigned int target_freq;
+};
+
+struct devfreq_node {
+	struct devfreq *df;
+	void *orig_data;
+	struct device *dev;
+	struct device_node *of_node;
+	struct list_head list;
+	struct freq_map **map;
+	struct freq_map *common_map;
+	unsigned int timeout;
+	struct delayed_work dwork;
+	bool drop;
+	unsigned long prev_tgt;
+};
+static LIST_HEAD(devfreq_list);
+static DEFINE_MUTEX(state_lock);
+static DEFINE_MUTEX(cpufreq_reg_lock);
+
+#define show_attr(name) \
+static ssize_t show_##name(struct device *dev,				\
+			struct device_attribute *attr, char *buf)	\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct devfreq_node *n = df->data;				\
+	return snprintf(buf, PAGE_SIZE, "%u\n", n->name);		\
+}
+
+#define store_attr(name, _min, _max) \
+static ssize_t store_##name(struct device *dev,				\
+			struct device_attribute *attr, const char *buf,	\
+			size_t count)					\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct devfreq_node *n = df->data;				\
+	int ret;							\
+	unsigned int val;						\
+	ret = kstrtoint(buf, 10, &val);					\
+	if (ret)							\
+		return ret;						\
+	val = max(val, _min);						\
+	val = min(val, _max);						\
+	n->name = val;							\
+	return count;							\
+}
+
+#define gov_attr(__attr, min, max)	\
+show_attr(__attr)			\
+store_attr(__attr, (min), (max))	\
+static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
+
+static int update_node(struct devfreq_node *node)
+{
+	int ret;
+	struct devfreq *df = node->df;
+
+	if (!df)
+		return 0;
+
+	cancel_delayed_work_sync(&node->dwork);
+
+	mutex_lock(&df->lock);
+	node->drop = false;
+	ret = update_devfreq(df);
+	if (ret) {
+		dev_err(df->dev.parent, "Unable to update frequency\n");
+		goto out;
+	}
+
+	if (!node->timeout)
+		goto out;
+
+	if (df->previous_freq <= df->min_freq)
+		goto out;
+
+	schedule_delayed_work(&node->dwork,
+			      msecs_to_jiffies(node->timeout));
+out:
+	mutex_unlock(&df->lock);
+	return ret;
+}
+
+static void update_all_devfreqs(void)
+{
+	struct devfreq_node *node;
+
+	list_for_each_entry(node, &devfreq_list, list) {
+		update_node(node);
+	}
+}
+
+static void do_timeout(struct work_struct *work)
+{
+	struct devfreq_node *node = container_of(to_delayed_work(work),
+						struct devfreq_node, dwork);
+	struct devfreq *df = node->df;
+
+	mutex_lock(&df->lock);
+	node->drop = true;
+	update_devfreq(df);
+	mutex_unlock(&df->lock);
+}
+
+static struct devfreq_node *find_devfreq_node(struct device *dev)
+{
+	struct devfreq_node *node;
+
+	list_for_each_entry(node, &devfreq_list, list)
+		if (node->dev == dev || node->of_node == dev->of_node)
+			return node;
+
+	return NULL;
+}
+
+/* ==================== cpufreq part ==================== */
+static void add_policy(struct cpufreq_policy *policy)
+{
+	struct cpu_state *new_state;
+	unsigned int cpu, first_cpu;
+
+	if (state[policy->cpu]) {
+		state[policy->cpu]->freq = policy->cur;
+		state[policy->cpu]->on = true;
+	} else {
+		new_state = kzalloc(sizeof(struct cpu_state), GFP_KERNEL);
+		if (!new_state)
+			return;
+
+		first_cpu = cpumask_first(policy->related_cpus);
+		new_state->first_cpu = first_cpu;
+		new_state->freq = policy->cur;
+		new_state->min_freq = policy->cpuinfo.min_freq;
+		new_state->max_freq = policy->cpuinfo.max_freq;
+		new_state->on = true;
+
+		for_each_cpu(cpu, policy->related_cpus)
+			state[cpu] = new_state;
+	}
+}
+
+static int cpufreq_policy_notifier(struct notifier_block *nb,
+		unsigned long event, void *data)
+{
+	struct cpufreq_policy *policy = data;
+
+	switch (event) {
+	case CPUFREQ_CREATE_POLICY:
+		mutex_lock(&state_lock);
+		add_policy(policy);
+		update_all_devfreqs();
+		mutex_unlock(&state_lock);
+		break;
+
+	case CPUFREQ_REMOVE_POLICY:
+		mutex_lock(&state_lock);
+		if (state[policy->cpu]) {
+			state[policy->cpu]->on = false;
+			update_all_devfreqs();
+		}
+		mutex_unlock(&state_lock);
+		break;
+	}
+
+	return 0;
+}
+
+static struct notifier_block cpufreq_policy_nb = {
+	.notifier_call = cpufreq_policy_notifier
+};
+
+static int cpufreq_trans_notifier(struct notifier_block *nb,
+		unsigned long event, void *data)
+{
+	struct cpufreq_freqs *freq = data;
+	struct cpu_state *s;
+
+	if (event != CPUFREQ_POSTCHANGE)
+		return 0;
+
+	mutex_lock(&state_lock);
+
+	s = state[freq->cpu];
+	if (!s)
+		goto out;
+
+	if (s->freq != freq->new) {
+		s->freq = freq->new;
+		update_all_devfreqs();
+	}
+
+out:
+	mutex_unlock(&state_lock);
+	return 0;
+}
+
+static struct notifier_block cpufreq_trans_nb = {
+	.notifier_call = cpufreq_trans_notifier
+};
+
+static int register_cpufreq(void)
+{
+	int ret = 0;
+	unsigned int cpu;
+	struct cpufreq_policy *policy;
+
+	mutex_lock(&cpufreq_reg_lock);
+
+	if (cpufreq_cnt)
+		goto cnt_not_zero;
+
+	get_online_cpus();
+	ret = cpufreq_register_notifier(&cpufreq_policy_nb,
+				CPUFREQ_POLICY_NOTIFIER);
+	if (ret)
+		goto out;
+
+	ret = cpufreq_register_notifier(&cpufreq_trans_nb,
+				CPUFREQ_TRANSITION_NOTIFIER);
+	if (ret) {
+		cpufreq_unregister_notifier(&cpufreq_policy_nb,
+				CPUFREQ_POLICY_NOTIFIER);
+		goto out;
+	}
+
+	for_each_online_cpu(cpu) {
+		policy = cpufreq_cpu_get(cpu);
+		if (policy) {
+			add_policy(policy);
+			cpufreq_cpu_put(policy);
+		}
+	}
+out:
+	put_online_cpus();
+cnt_not_zero:
+	if (!ret)
+		cpufreq_cnt++;
+	mutex_unlock(&cpufreq_reg_lock);
+	return ret;
+}
+
+static int unregister_cpufreq(void)
+{
+	int ret = 0;
+	int cpu;
+
+	mutex_lock(&cpufreq_reg_lock);
+
+	if (cpufreq_cnt > 1)
+		goto out;
+
+	cpufreq_unregister_notifier(&cpufreq_policy_nb,
+				CPUFREQ_POLICY_NOTIFIER);
+	cpufreq_unregister_notifier(&cpufreq_trans_nb,
+				CPUFREQ_TRANSITION_NOTIFIER);
+
+	for (cpu = ARRAY_SIZE(state) - 1; cpu >= 0; cpu--) {
+		if (!state[cpu])
+			continue;
+		if (state[cpu]->first_cpu == cpu)
+			kfree(state[cpu]);
+		state[cpu] = NULL;
+	}
+
+out:
+	cpufreq_cnt--;
+	mutex_unlock(&cpufreq_reg_lock);
+	return ret;
+}
+
+/* ==================== devfreq part ==================== */
+
+static unsigned int interpolate_freq(struct devfreq *df, unsigned int cpu)
+{
+	unsigned long *freq_table = df->profile->freq_table;
+	unsigned int cpu_min = state[cpu]->min_freq;
+	unsigned int cpu_max = state[cpu]->max_freq;
+	unsigned int cpu_freq = state[cpu]->freq;
+	unsigned int dev_min, dev_max, cpu_percent;
+
+	if (freq_table) {
+		dev_min = freq_table[0];
+		dev_max = freq_table[df->profile->max_state - 1];
+	} else {
+		if (df->max_freq <= df->min_freq)
+			return 0;
+		dev_min = df->min_freq;
+		dev_max = df->max_freq;
+	}
+
+	cpu_percent = ((cpu_freq - cpu_min) * 100) / (cpu_max - cpu_min);
+	return dev_min + mult_frac(dev_max - dev_min, cpu_percent, 100);
+}
+
+static unsigned int cpu_to_dev_freq(struct devfreq *df, unsigned int cpu)
+{
+	struct freq_map *map = NULL;
+	unsigned int cpu_khz = 0, freq;
+	struct devfreq_node *n = df->data;
+
+	if (!state[cpu] || !state[cpu]->on || state[cpu]->first_cpu != cpu) {
+		freq = 0;
+		goto out;
+	}
+
+	if (n->common_map)
+		map = n->common_map;
+	else if (n->map)
+		map = n->map[cpu];
+
+	cpu_khz = state[cpu]->freq;
+
+	if (!map) {
+		freq = interpolate_freq(df, cpu);
+		goto out;
+	}
+
+	while (map->cpu_khz && map->cpu_khz < cpu_khz)
+		map++;
+	if (!map->cpu_khz)
+		map--;
+	freq = map->target_freq;
+
+out:
+	dev_dbg(df->dev.parent, "CPU%u: %d -> dev: %u\n", cpu, cpu_khz, freq);
+	return freq;
+}
+
+static int devfreq_cpufreq_get_freq(struct devfreq *df,
+					unsigned long *freq)
+{
+	unsigned int cpu, tgt_freq = 0;
+	struct devfreq_node *node;
+
+	node = df->data;
+	if (!node) {
+		pr_err("Unable to find devfreq node!\n");
+		return -ENODEV;
+	}
+
+	if (node->drop) {
+		*freq = 0;
+		return 0;
+	}
+
+	for_each_possible_cpu(cpu)
+		tgt_freq = max(tgt_freq, cpu_to_dev_freq(df, cpu));
+
+	if (node->timeout && tgt_freq < node->prev_tgt)
+		*freq = 0;
+	else
+		*freq = tgt_freq;
+
+	node->prev_tgt = tgt_freq;
+
+	return 0;
+}
+
+static unsigned int show_table(char *buf, unsigned int len,
+				struct freq_map *map)
+{
+	unsigned int cnt = 0;
+
+	cnt += snprintf(buf + cnt, len - cnt, "CPU freq\tDevice freq\n");
+
+	while (map->cpu_khz && cnt < len) {
+		cnt += snprintf(buf + cnt, len - cnt, "%8u\t%11u\n",
+				map->cpu_khz, map->target_freq);
+		map++;
+	}
+	if (cnt < len)
+		cnt += snprintf(buf + cnt, len - cnt, "\n");
+
+	return cnt;
+}
+
+static ssize_t show_map(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct devfreq *df = to_devfreq(dev);
+	struct devfreq_node *n = df->data;
+	struct freq_map *map;
+	unsigned int cnt = 0, cpu;
+
+	mutex_lock(&state_lock);
+	if (n->common_map) {
+		map = n->common_map;
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"Common table for all CPUs:\n");
+		cnt += show_table(buf + cnt, PAGE_SIZE - cnt, map);
+	} else if (n->map) {
+		for_each_possible_cpu(cpu) {
+			map = n->map[cpu];
+			if (!map)
+				continue;
+			cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+					"CPU %u:\n", cpu);
+			if (cnt >= PAGE_SIZE)
+				break;
+			cnt += show_table(buf + cnt, PAGE_SIZE - cnt, map);
+			if (cnt >= PAGE_SIZE)
+				break;
+		}
+	} else {
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"Device freq interpolated based on CPU freq\n");
+	}
+	mutex_unlock(&state_lock);
+
+	return cnt;
+}
+
+static DEVICE_ATTR(freq_map, 0444, show_map, NULL);
+gov_attr(timeout, 0U, 100U);
+
+static struct attribute *dev_attr[] = {
+	&dev_attr_freq_map.attr,
+	&dev_attr_timeout.attr,
+	NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+	.name = "cpufreq",
+	.attrs = dev_attr,
+};
+
+static int devfreq_cpufreq_gov_start(struct devfreq *devfreq)
+{
+	int ret = 0;
+	struct devfreq_node *node;
+	bool alloc = false;
+
+	ret = register_cpufreq();
+	if (ret)
+		return ret;
+
+	ret = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group);
+	if (ret) {
+		unregister_cpufreq();
+		return ret;
+	}
+
+	mutex_lock(&state_lock);
+
+	node = find_devfreq_node(devfreq->dev.parent);
+	if (node == NULL) {
+		node = kzalloc(sizeof(struct devfreq_node), GFP_KERNEL);
+		if (!node) {
+			pr_err("Out of memory!\n");
+			ret = -ENOMEM;
+			goto alloc_fail;
+		}
+		alloc = true;
+		node->dev = devfreq->dev.parent;
+		list_add_tail(&node->list, &devfreq_list);
+	}
+
+	INIT_DELAYED_WORK(&node->dwork, do_timeout);
+
+	node->df = devfreq;
+	node->orig_data = devfreq->data;
+	devfreq->data = node;
+
+	ret = update_node(node);
+	if (ret)
+		goto update_fail;
+
+	mutex_unlock(&state_lock);
+	return 0;
+
+update_fail:
+	devfreq->data = node->orig_data;
+	if (alloc) {
+		list_del(&node->list);
+		kfree(node);
+	}
+alloc_fail:
+	mutex_unlock(&state_lock);
+	sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
+	unregister_cpufreq();
+	return ret;
+}
+
+static void devfreq_cpufreq_gov_stop(struct devfreq *devfreq)
+{
+	struct devfreq_node *node = devfreq->data;
+
+	cancel_delayed_work_sync(&node->dwork);
+
+	mutex_lock(&state_lock);
+	devfreq->data = node->orig_data;
+	if (node->map || node->common_map) {
+		node->df = NULL;
+	} else {
+		list_del(&node->list);
+		kfree(node);
+	}
+	mutex_unlock(&state_lock);
+
+	sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
+	unregister_cpufreq();
+}
+
+static int devfreq_cpufreq_ev_handler(struct devfreq *devfreq,
+					unsigned int event, void *data)
+{
+	int ret;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+
+		ret = devfreq_cpufreq_gov_start(devfreq);
+		if (ret) {
+			pr_err("Governor start failed!\n");
+			return ret;
+		}
+		pr_debug("Enabled dev CPUfreq governor\n");
+		break;
+
+	case DEVFREQ_GOV_STOP:
+
+		devfreq_cpufreq_gov_stop(devfreq);
+		pr_debug("Disabled dev CPUfreq governor\n");
+		break;
+	}
+
+	return 0;
+}
+
+static struct devfreq_governor devfreq_cpufreq = {
+	.name = "cpufreq",
+	.get_target_freq = devfreq_cpufreq_get_freq,
+	.event_handler = devfreq_cpufreq_ev_handler,
+};
+
+#define NUM_COLS	2
+static struct freq_map *read_tbl(struct device_node *of_node, char *prop_name)
+{
+	int len, nf, i, j;
+	u32 data;
+	struct freq_map *tbl;
+
+	if (!of_find_property(of_node, prop_name, &len))
+		return NULL;
+	len /= sizeof(data);
+
+	if (len % NUM_COLS || len == 0)
+		return NULL;
+	nf = len / NUM_COLS;
+
+	tbl = kzalloc((nf + 1) * sizeof(*tbl), GFP_KERNEL);
+	if (!tbl)
+		return NULL;
+
+	for (i = 0, j = 0; i < nf; i++, j += 2) {
+		of_property_read_u32_index(of_node, prop_name, j, &data);
+		tbl[i].cpu_khz = data;
+
+		of_property_read_u32_index(of_node, prop_name, j + 1, &data);
+		tbl[i].target_freq = data;
+	}
+	tbl[i].cpu_khz = 0;
+
+	return tbl;
+}
+
+#define PROP_TARGET "target-dev"
+#define PROP_TABLE "cpu-to-dev-map"
+static int add_table_from_of(struct device_node *of_node)
+{
+	struct device_node *target_of_node;
+	struct devfreq_node *node;
+	struct freq_map *common_tbl;
+	struct freq_map **tbl_list = NULL;
+	static char prop_name[] = PROP_TABLE "-999999";
+	int cpu, ret, cnt = 0, prop_sz = ARRAY_SIZE(prop_name);
+
+	target_of_node = of_parse_phandle(of_node, PROP_TARGET, 0);
+	if (!target_of_node)
+		return -EINVAL;
+
+	node = kzalloc(sizeof(struct devfreq_node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	common_tbl = read_tbl(of_node, PROP_TABLE);
+	if (!common_tbl) {
+		tbl_list = kzalloc(sizeof(*tbl_list) * NR_CPUS, GFP_KERNEL);
+		if (!tbl_list) {
+			ret = -ENOMEM;
+			goto err_list;
+		}
+
+		for_each_possible_cpu(cpu) {
+			ret = snprintf(prop_name, prop_sz, "%s-%d",
+					PROP_TABLE, cpu);
+			if (ret >= prop_sz) {
+				pr_warn("More CPUs than I can handle!\n");
+				pr_warn("Skipping rest of the tables!\n");
+				break;
+			}
+			tbl_list[cpu] = read_tbl(of_node, prop_name);
+			if (tbl_list[cpu])
+				cnt++;
+		}
+	}
+	if (!common_tbl && !cnt) {
+		ret = -EINVAL;
+		goto err_tbl;
+	}
+
+	mutex_lock(&state_lock);
+	node->of_node = target_of_node;
+	node->map = tbl_list;
+	node->common_map = common_tbl;
+	list_add_tail(&node->list, &devfreq_list);
+	mutex_unlock(&state_lock);
+
+	return 0;
+err_tbl:
+	kfree(tbl_list);
+err_list:
+	kfree(node);
+	return ret;
+}
+
+static int __init devfreq_cpufreq_init(void)
+{
+	int ret;
+	struct device_node *of_par, *of_child;
+
+	of_par = of_find_node_by_name(NULL, "devfreq-cpufreq");
+	if (of_par) {
+		for_each_child_of_node(of_par, of_child) {
+			ret = add_table_from_of(of_child);
+			if (ret)
+				pr_err("Parsing %s failed!\n", of_child->name);
+			else
+				pr_debug("Parsed %s.\n", of_child->name);
+		}
+		of_node_put(of_par);
+	} else {
+		pr_info("No tables parsed from DT.\n");
+	}
+
+	ret = devfreq_add_governor(&devfreq_cpufreq);
+	if (ret) {
+		pr_err("Governor add failed!\n");
+		return ret;
+	}
+
+	return 0;
+}
+subsys_initcall(devfreq_cpufreq_init);
+
+static void __exit devfreq_cpufreq_exit(void)
+{
+	int ret, cpu;
+	struct devfreq_node *node, *tmp;
+
+	ret = devfreq_remove_governor(&devfreq_cpufreq);
+	if (ret)
+		pr_err("Governor remove failed!\n");
+
+	mutex_lock(&state_lock);
+	list_for_each_entry_safe(node, tmp, &devfreq_list, list) {
+		kfree(node->common_map);
+		for_each_possible_cpu(cpu)
+			kfree(node->map[cpu]);
+		kfree(node->map);
+		list_del(&node->list);
+		kfree(node);
+	}
+	mutex_unlock(&state_lock);
+}
+module_exit(devfreq_cpufreq_exit);
+
+MODULE_DESCRIPTION("CPU freq based generic governor for devfreq devices");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/governor_memlat.c b/drivers/devfreq/governor_memlat.c
new file mode 100644
index 0000000..e1afa60
--- /dev/null
+++ b/drivers/devfreq/governor_memlat.c
@@ -0,0 +1,413 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "mem_lat: " fmt
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/devfreq.h>
+#include "governor.h"
+#include "governor_memlat.h"
+
+#include <trace/events/power.h>
+
+struct memlat_node {
+	unsigned int ratio_ceil;
+	bool mon_started;
+	struct list_head list;
+	void *orig_data;
+	struct memlat_hwmon *hw;
+	struct devfreq_governor *gov;
+	struct attribute_group *attr_grp;
+};
+
+static LIST_HEAD(memlat_list);
+static DEFINE_MUTEX(list_lock);
+
+static int use_cnt;
+static DEFINE_MUTEX(state_lock);
+
+#define show_attr(name) \
+static ssize_t show_##name(struct device *dev,				\
+			struct device_attribute *attr, char *buf)	\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct memlat_node *hw = df->data;				\
+	return snprintf(buf, PAGE_SIZE, "%u\n", hw->name);		\
+}
+
+#define store_attr(name, _min, _max) \
+static ssize_t store_##name(struct device *dev,				\
+			struct device_attribute *attr, const char *buf,	\
+			size_t count)					\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct memlat_node *hw = df->data;				\
+	int ret;							\
+	unsigned int val;						\
+	ret = kstrtouint(buf, 10, &val);				\
+	if (ret)							\
+		return ret;						\
+	val = max(val, _min);						\
+	val = min(val, _max);						\
+	hw->name = val;							\
+	return count;							\
+}
+
+#define gov_attr(__attr, min, max)	\
+show_attr(__attr)			\
+store_attr(__attr, min, max)		\
+static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
+
+static ssize_t show_map(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct devfreq *df = to_devfreq(dev);
+	struct memlat_node *n = df->data;
+	struct core_dev_map *map = n->hw->freq_map;
+	unsigned int cnt = 0;
+
+	cnt += snprintf(buf, PAGE_SIZE, "Core freq (MHz)\tDevice BW\n");
+
+	while (map->core_mhz && cnt < PAGE_SIZE) {
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "%15u\t%9u\n",
+				map->core_mhz, map->target_freq);
+		map++;
+	}
+	if (cnt < PAGE_SIZE)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
+
+	return cnt;
+}
+
+static DEVICE_ATTR(freq_map, 0444, show_map, NULL);
+
+static unsigned long core_to_dev_freq(struct memlat_node *node,
+		unsigned long coref)
+{
+	struct memlat_hwmon *hw = node->hw;
+	struct core_dev_map *map = hw->freq_map;
+	unsigned long freq = 0;
+
+	if (!map)
+		goto out;
+
+	while (map->core_mhz && map->core_mhz < coref)
+		map++;
+	if (!map->core_mhz)
+		map--;
+	freq = map->target_freq;
+
+out:
+	pr_debug("freq: %lu -> dev: %lu\n", coref, freq);
+	return freq;
+}
+
+static struct memlat_node *find_memlat_node(struct devfreq *df)
+{
+	struct memlat_node *node, *found = NULL;
+
+	mutex_lock(&list_lock);
+	list_for_each_entry(node, &memlat_list, list)
+		if (node->hw->dev == df->dev.parent ||
+		    node->hw->of_node == df->dev.parent->of_node) {
+			found = node;
+			break;
+		}
+	mutex_unlock(&list_lock);
+
+	return found;
+}
+
+static int start_monitor(struct devfreq *df)
+{
+	struct memlat_node *node = df->data;
+	struct memlat_hwmon *hw = node->hw;
+	struct device *dev = df->dev.parent;
+	int ret;
+
+	ret = hw->start_hwmon(hw);
+
+	if (ret) {
+		dev_err(dev, "Unable to start HW monitor! (%d)\n", ret);
+		return ret;
+	}
+
+	devfreq_monitor_start(df);
+
+	node->mon_started = true;
+
+	return 0;
+}
+
+static void stop_monitor(struct devfreq *df)
+{
+	struct memlat_node *node = df->data;
+	struct memlat_hwmon *hw = node->hw;
+
+	node->mon_started = false;
+
+	devfreq_monitor_stop(df);
+	hw->stop_hwmon(hw);
+}
+
+static int gov_start(struct devfreq *df)
+{
+	int ret = 0;
+	struct device *dev = df->dev.parent;
+	struct memlat_node *node;
+	struct memlat_hwmon *hw;
+
+	node = find_memlat_node(df);
+	if (!node) {
+		dev_err(dev, "Unable to find HW monitor!\n");
+		return -ENODEV;
+	}
+	hw = node->hw;
+
+	hw->df = df;
+	node->orig_data = df->data;
+	df->data = node;
+
+	if (start_monitor(df))
+		goto err_start;
+
+	ret = sysfs_create_group(&df->dev.kobj, node->attr_grp);
+	if (ret)
+		goto err_sysfs;
+
+	return 0;
+
+err_sysfs:
+	stop_monitor(df);
+err_start:
+	df->data = node->orig_data;
+	node->orig_data = NULL;
+	hw->df = NULL;
+	return ret;
+}
+
+static void gov_stop(struct devfreq *df)
+{
+	struct memlat_node *node = df->data;
+	struct memlat_hwmon *hw = node->hw;
+
+	sysfs_remove_group(&df->dev.kobj, node->attr_grp);
+	stop_monitor(df);
+	df->data = node->orig_data;
+	node->orig_data = NULL;
+	hw->df = NULL;
+}
+
+static int devfreq_memlat_get_freq(struct devfreq *df,
+					unsigned long *freq)
+{
+	int i, lat_dev;
+	struct memlat_node *node = df->data;
+	struct memlat_hwmon *hw = node->hw;
+	unsigned long max_freq = 0;
+	unsigned int ratio;
+
+	hw->get_cnt(hw);
+
+	for (i = 0; i < hw->num_cores; i++) {
+		ratio = hw->core_stats[i].inst_count;
+
+		if (hw->core_stats[i].mem_count)
+			ratio /= hw->core_stats[i].mem_count;
+
+		trace_memlat_dev_meas(dev_name(df->dev.parent),
+					hw->core_stats[i].id,
+					hw->core_stats[i].inst_count,
+					hw->core_stats[i].mem_count,
+					hw->core_stats[i].freq, ratio);
+
+		if (ratio && ratio <= node->ratio_ceil
+		    && hw->core_stats[i].freq > max_freq) {
+			lat_dev = i;
+			max_freq = hw->core_stats[i].freq;
+		}
+	}
+
+	if (max_freq) {
+		max_freq = core_to_dev_freq(node, max_freq);
+		trace_memlat_dev_update(dev_name(df->dev.parent),
+					hw->core_stats[lat_dev].id,
+					hw->core_stats[lat_dev].inst_count,
+					hw->core_stats[lat_dev].mem_count,
+					hw->core_stats[lat_dev].freq,
+					max_freq);
+	}
+
+	*freq = max_freq;
+	return 0;
+}
+
+gov_attr(ratio_ceil, 1U, 10000U);
+
+static struct attribute *dev_attr[] = {
+	&dev_attr_ratio_ceil.attr,
+	&dev_attr_freq_map.attr,
+	NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+	.name = "mem_latency",
+	.attrs = dev_attr,
+};
+
+#define MIN_MS	10U
+#define MAX_MS	500U
+static int devfreq_memlat_ev_handler(struct devfreq *df,
+					unsigned int event, void *data)
+{
+	int ret;
+	unsigned int sample_ms;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		sample_ms = df->profile->polling_ms;
+		sample_ms = max(MIN_MS, sample_ms);
+		sample_ms = min(MAX_MS, sample_ms);
+		df->profile->polling_ms = sample_ms;
+
+		ret = gov_start(df);
+		if (ret)
+			return ret;
+
+		dev_dbg(df->dev.parent,
+			"Enabled Memory Latency governor\n");
+		break;
+
+	case DEVFREQ_GOV_STOP:
+		gov_stop(df);
+		dev_dbg(df->dev.parent,
+			"Disabled Memory Latency governor\n");
+		break;
+
+	case DEVFREQ_GOV_INTERVAL:
+		sample_ms = *(unsigned int *)data;
+		sample_ms = max(MIN_MS, sample_ms);
+		sample_ms = min(MAX_MS, sample_ms);
+		devfreq_interval_update(df, &sample_ms);
+		break;
+	}
+
+	return 0;
+}
+
+static struct devfreq_governor devfreq_gov_memlat = {
+	.name = "mem_latency",
+	.get_target_freq = devfreq_memlat_get_freq,
+	.event_handler = devfreq_memlat_ev_handler,
+};
+
+#define NUM_COLS	2
+static struct core_dev_map *init_core_dev_map(struct device *dev,
+		char *prop_name)
+{
+	int len, nf, i, j;
+	u32 data;
+	struct core_dev_map *tbl;
+	int ret;
+
+	if (!of_find_property(dev->of_node, prop_name, &len))
+		return NULL;
+	len /= sizeof(data);
+
+	if (len % NUM_COLS || len == 0)
+		return NULL;
+	nf = len / NUM_COLS;
+
+	tbl = devm_kzalloc(dev, (nf + 1) * sizeof(struct core_dev_map),
+			GFP_KERNEL);
+	if (!tbl)
+		return NULL;
+
+	for (i = 0, j = 0; i < nf; i++, j += 2) {
+		ret = of_property_read_u32_index(dev->of_node, prop_name, j,
+				&data);
+		if (ret)
+			return NULL;
+		tbl[i].core_mhz = data / 1000;
+
+		ret = of_property_read_u32_index(dev->of_node, prop_name, j + 1,
+				&data);
+		if (ret)
+			return NULL;
+		tbl[i].target_freq = data;
+		pr_debug("Entry%d CPU:%u, Dev:%u\n", i, tbl[i].core_mhz,
+				tbl[i].target_freq);
+	}
+	tbl[i].core_mhz = 0;
+
+	return tbl;
+}
+
+int register_memlat(struct device *dev, struct memlat_hwmon *hw)
+{
+	int ret = 0;
+	struct memlat_node *node;
+
+	if (!hw->dev && !hw->of_node)
+		return -EINVAL;
+
+	node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	node->gov = &devfreq_gov_memlat;
+	node->attr_grp = &dev_attr_group;
+
+	node->ratio_ceil = 10;
+	node->hw = hw;
+
+	hw->freq_map = init_core_dev_map(dev, "qcom,core-dev-table");
+	if (!hw->freq_map) {
+		dev_err(dev, "Couldn't find the core-dev freq table!\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&list_lock);
+	list_add_tail(&node->list, &memlat_list);
+	mutex_unlock(&list_lock);
+
+	mutex_lock(&state_lock);
+	if (!use_cnt)
+		ret = devfreq_add_governor(&devfreq_gov_memlat);
+	if (!ret)
+		use_cnt++;
+	mutex_unlock(&state_lock);
+
+	if (!ret)
+		dev_info(dev, "Memory Latency governor registered.\n");
+	else
+		dev_err(dev, "Memory Latency governor registration failed!\n");
+
+	return ret;
+}
+
+MODULE_DESCRIPTION("HW monitor based dev DDR bandwidth voting driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/governor_memlat.h b/drivers/devfreq/governor_memlat.h
new file mode 100644
index 0000000..a0e52a0
--- /dev/null
+++ b/drivers/devfreq/governor_memlat.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _GOVERNOR_BW_HWMON_H
+#define _GOVERNOR_BW_HWMON_H
+
+#include <linux/kernel.h>
+#include <linux/devfreq.h>
+
+/**
+ * struct dev_stats - Device stats
+ * @inst_count:			Number of instructions executed.
+ * @mem_count:			Number of memory accesses made.
+ * @freq:			Effective frequency of the device in the
+ *				last interval.
+ */
+struct dev_stats {
+	int id;
+	unsigned long inst_count;
+	unsigned long mem_count;
+	unsigned long freq;
+};
+
+struct core_dev_map {
+	unsigned int core_mhz;
+	unsigned int target_freq;
+};
+
+/**
+ * struct memlat_hwmon - Memory Latency HW monitor info
+ * @start_hwmon:		Start the HW monitoring
+ * @stop_hwmon:			Stop the HW monitoring
+ * @get_cnt:			Return the number of intructions executed,
+ *				memory accesses and effective frequency
+ * @dev:			Pointer to device that this HW monitor can
+ *				monitor.
+ * @of_node:			OF node of device that this HW monitor can
+ *				monitor.
+ * @df:				Devfreq node that this HW monitor is being
+ *				used for. NULL when not actively in use and
+ *				non-NULL when in use.
+ * @num_cores:			Number of cores that are monitored by the
+ *				hardware monitor.
+ * @core_stats:			Array containing instruction count, memory
+ *				accesses and effective frequency for each core.
+ *
+ * One of dev or of_node needs to be specified for a successful registration.
+ *
+ */
+struct memlat_hwmon {
+	int (*start_hwmon)(struct memlat_hwmon *hw);
+	void (*stop_hwmon)(struct memlat_hwmon *hw);
+	unsigned long (*get_cnt)(struct memlat_hwmon *hw);
+	struct device *dev;
+	struct device_node *of_node;
+
+	unsigned int num_cores;
+	struct dev_stats *core_stats;
+
+	struct devfreq *df;
+	struct core_dev_map *freq_map;
+};
+
+#ifdef CONFIG_DEVFREQ_GOV_MEMLAT
+int register_memlat(struct device *dev, struct memlat_hwmon *hw);
+int update_memlat(struct memlat_hwmon *hw);
+#else
+static inline int register_memlat(struct device *dev,
+					struct memlat_hwmon *hw)
+{
+	return 0;
+}
+static inline int update_memlat(struct memlat_hwmon *hw)
+{
+	return 0;
+}
+#endif
+
+#endif /* _GOVERNOR_BW_HWMON_H */
diff --git a/drivers/devfreq/m4m-hwmon.c b/drivers/devfreq/m4m-hwmon.c
new file mode 100644
index 0000000..a9fd470
--- /dev/null
+++ b/drivers/devfreq/m4m-hwmon.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "m4m-hwmon: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/spinlock.h>
+#include "governor_cache_hwmon.h"
+
+#define cntr_offset(idx) (sizeof(u32) * idx)
+
+/* register offsets from base address */
+#define DCVS_VERSION(m)		((m)->base + 0x0)
+#define GLOBAL_CR_CTL(m)	((m)->base + 0x8)
+#define GLOBAL_CR_RESET(m)	((m)->base + 0xC)
+#define OVSTAT(m)		((m)->base + 0x30)
+#define OVCLR(m)		((m)->base + 0x34)
+#define OVSET(m)		((m)->base + 0x3C) /* unused */
+#define EVCNTR(m, x)		((m)->base + 0x40 + cntr_offset(x))
+#define CNTCTL(m, x)		((m)->base + 0x100 + cntr_offset(x))
+/* counter 0/1 does not have type control */
+#define EVTYPER_START	2
+#define EVTYPER(x)	((m)->base + 0x140 + cntr_offset(x))
+
+/* bitmasks for GLOBAL_CR_CTL and CNTCTLx */
+#define CNT_EN	BIT(0)
+#define IRQ_EN	BIT(1)
+
+/* non-configurable counters */
+#define CYC_CNTR_IDX		0
+#define WASTED_CYC_CNTR_IDX	1
+
+/* counter is 28-bit */
+#define CNT_MAX 0x0FFFFFFFU
+
+struct m4m_counter {
+	int idx;
+	u32 event_mask;
+	unsigned int last_start;
+};
+
+struct m4m_hwmon {
+	void __iomem *base;
+	struct m4m_counter cntr[MAX_NUM_GROUPS];
+	int num_cntr;
+	int irq;
+	struct cache_hwmon hw;
+	struct device *dev;
+};
+
+#define to_mon(ptr) container_of(ptr, struct m4m_hwmon, hw)
+
+static DEFINE_SPINLOCK(init_lock);
+
+/* Should only be called once while HW is in POR state */
+static inline void mon_global_init(struct m4m_hwmon *m)
+{
+	writel_relaxed(CNT_EN | IRQ_EN, GLOBAL_CR_CTL(m));
+}
+
+static inline void _mon_disable_cntr_and_irq(struct m4m_hwmon *m, int cntr_idx)
+{
+	writel_relaxed(0, CNTCTL(m, cntr_idx));
+}
+
+static inline void _mon_enable_cntr_and_irq(struct m4m_hwmon *m, int cntr_idx)
+{
+	writel_relaxed(CNT_EN | IRQ_EN, CNTCTL(m, cntr_idx));
+}
+
+static void mon_disable(struct m4m_hwmon *m)
+{
+	int i;
+
+	for (i = 0; i < m->num_cntr; i++)
+		_mon_disable_cntr_and_irq(m, m->cntr[i].idx);
+	/* make sure all counter/irq are indeed disabled */
+	mb();
+}
+
+static void mon_enable(struct m4m_hwmon *m)
+{
+	int i;
+
+	for (i = 0; i < m->num_cntr; i++)
+		_mon_enable_cntr_and_irq(m, m->cntr[i].idx);
+}
+
+static inline void _mon_ov_clear(struct m4m_hwmon *m, int cntr_idx)
+{
+	writel_relaxed(BIT(cntr_idx), OVCLR(m));
+}
+
+static void mon_ov_clear(struct m4m_hwmon *m, enum request_group grp)
+{
+	_mon_ov_clear(m, m->cntr[grp].idx);
+}
+
+static inline u32 mon_irq_status(struct m4m_hwmon *m)
+{
+	return readl_relaxed(OVSTAT(m));
+}
+
+static bool mon_is_ovstat_set(struct m4m_hwmon *m)
+{
+	int i;
+	u32 status = mon_irq_status(m);
+
+	for (i = 0; i < m->num_cntr; i++)
+		if (status & BIT(m->cntr[i].idx))
+			return true;
+	return false;
+}
+
+/* counter must be stopped first */
+static unsigned long _mon_get_count(struct m4m_hwmon *m,
+				    int cntr_idx, unsigned int start)
+{
+	unsigned long cnt;
+	u32 cur_cnt = readl_relaxed(EVCNTR(m, cntr_idx));
+	u32 ov = readl_relaxed(OVSTAT(m)) & BIT(cntr_idx);
+
+	if (!ov && cur_cnt < start) {
+		dev_warn(m->dev, "Counter%d overflowed but not detected\n",
+			 cntr_idx);
+		ov = 1;
+	}
+
+	if (ov)
+		cnt = CNT_MAX - start + cur_cnt;
+	else
+		cnt = cur_cnt - start;
+
+	return cnt;
+}
+
+static unsigned long mon_get_count(struct m4m_hwmon *m,
+				   enum request_group grp)
+{
+	return _mon_get_count(m, m->cntr[grp].idx, m->cntr[grp].last_start);
+}
+
+static inline void mon_set_limit(struct m4m_hwmon *m, enum request_group grp,
+			  unsigned int limit)
+{
+	u32 start;
+
+	if (limit >= CNT_MAX)
+		limit = CNT_MAX;
+	start = CNT_MAX - limit;
+
+	writel_relaxed(start, EVCNTR(m, m->cntr[grp].idx));
+	m->cntr[grp].last_start = start;
+}
+
+static inline void mon_enable_cycle_cntr(struct m4m_hwmon *m)
+{
+	writel_relaxed(CNT_EN, CNTCTL(m, CYC_CNTR_IDX));
+}
+
+static inline void mon_disable_cycle_cntr(struct m4m_hwmon *m)
+{
+	_mon_disable_cntr_and_irq(m, CYC_CNTR_IDX);
+}
+
+static inline unsigned long mon_get_cycle_count(struct m4m_hwmon *m)
+{
+	return _mon_get_count(m, CYC_CNTR_IDX, 0);
+}
+
+static inline void mon_clear_cycle_cntr(struct m4m_hwmon *m)
+{
+	writel_relaxed(0, EVCNTR(m, CYC_CNTR_IDX));
+	_mon_ov_clear(m, CYC_CNTR_IDX);
+}
+
+static void mon_init(struct m4m_hwmon *m)
+{
+	static bool mon_inited;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&init_lock, flags);
+	if (!mon_inited)
+		mon_global_init(m);
+	spin_unlock_irqrestore(&init_lock, flags);
+
+	/* configure counter events */
+	for (i = 0; i < m->num_cntr; i++)
+		writel_relaxed(m->cntr[i].event_mask, EVTYPER(m->cntr[i].idx));
+}
+
+static irqreturn_t m4m_hwmon_intr_handler(int irq, void *dev)
+{
+	struct m4m_hwmon *m = dev;
+
+	if (mon_is_ovstat_set(m)) {
+		update_cache_hwmon(&m->hw);
+		return IRQ_HANDLED;
+	}
+	return IRQ_NONE;
+}
+
+static int count_to_mrps(unsigned long count, unsigned int us)
+{
+	do_div(count, us);
+	count++;
+	return count;
+}
+
+static unsigned int mrps_to_count(unsigned int mrps, unsigned int ms,
+				  unsigned int tolerance)
+{
+	mrps += tolerance;
+	mrps *= ms * USEC_PER_MSEC;
+	return mrps;
+}
+
+static unsigned long m4m_meas_mrps_and_set_irq(struct cache_hwmon *hw,
+		unsigned int tol, unsigned int us, struct mrps_stats *mrps)
+{
+	struct m4m_hwmon *m = to_mon(hw);
+	unsigned long count, cyc_count;
+	unsigned long f = hw->df->previous_freq;
+	unsigned int sample_ms = hw->df->profile->polling_ms;
+	int i;
+	u32 limit;
+
+	mon_disable(m);
+	mon_disable_cycle_cntr(m);
+
+	/* calculate mrps and set limit */
+	for (i = 0; i < m->num_cntr; i++) {
+		count = mon_get_count(m, i);
+		mrps->mrps[i] = count_to_mrps(count, us);
+		limit = mrps_to_count(mrps->mrps[i], sample_ms, tol);
+		mon_ov_clear(m, i);
+		mon_set_limit(m, i, limit);
+		dev_dbg(m->dev, "Counter[%d] count 0x%lx, limit 0x%x\n",
+			m->cntr[i].idx, count, limit);
+	}
+
+	/* get cycle count and calculate busy percent */
+	cyc_count = mon_get_cycle_count(m);
+	mrps->busy_percent = mult_frac(cyc_count, 1000, us) * 100 / f;
+	mon_clear_cycle_cntr(m);
+	dev_dbg(m->dev, "Cycle count 0x%lx\n", cyc_count);
+
+	/* re-enable monitor */
+	mon_enable(m);
+	mon_enable_cycle_cntr(m);
+
+	return 0;
+}
+
+static int m4m_start_hwmon(struct cache_hwmon *hw, struct mrps_stats *mrps)
+{
+	struct m4m_hwmon *m = to_mon(hw);
+	unsigned int sample_ms = hw->df->profile->polling_ms;
+	int ret, i;
+	u32 limit;
+
+	ret = request_threaded_irq(m->irq, NULL, m4m_hwmon_intr_handler,
+				  IRQF_ONESHOT | IRQF_SHARED,
+				  dev_name(m->dev), m);
+	if (ret) {
+		dev_err(m->dev, "Unable to register for irq\n");
+		return ret;
+	}
+
+	mon_init(m);
+	mon_disable(m);
+	mon_disable_cycle_cntr(m);
+	for (i = 0; i < m->num_cntr; i++) {
+		mon_ov_clear(m, i);
+		limit = mrps_to_count(mrps->mrps[i], sample_ms, 0);
+		mon_set_limit(m, i, limit);
+	}
+	mon_clear_cycle_cntr(m);
+	mon_enable(m);
+	mon_enable_cycle_cntr(m);
+
+	return 0;
+}
+
+static void m4m_stop_hwmon(struct cache_hwmon *hw)
+{
+	struct m4m_hwmon *m = to_mon(hw);
+	int i;
+
+	mon_disable(m);
+	free_irq(m->irq, m);
+	for (i = 0; i < m->num_cntr; i++)
+		mon_ov_clear(m, i);
+}
+
+/* device probe functions */
+static const struct of_device_id m4m_match_table[] = {
+	{ .compatible = "qcom,m4m-hwmon" },
+	{}
+};
+
+static int m4m_hwmon_parse_cntr(struct device *dev,
+				struct m4m_hwmon *m)
+{
+	u32 *data;
+	const char *prop_name = "qcom,counter-event-sel";
+	int ret, len, i;
+
+	if (!of_find_property(dev->of_node, prop_name, &len))
+		return -EINVAL;
+	len /= sizeof(*data);
+
+	if (len % 2 || len > MAX_NUM_GROUPS * 2)
+		return -EINVAL;
+
+	data = devm_kcalloc(dev, len, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+	ret = of_property_read_u32_array(dev->of_node, prop_name, data, len);
+	if (ret)
+		return ret;
+
+	len /= 2;
+	m->num_cntr = len;
+	for (i = 0; i < len; i++) {
+		/* disallow non-configurable counters */
+		if (data[i * 2] < EVTYPER_START)
+			return -EINVAL;
+		m->cntr[i].idx = data[i * 2];
+		m->cntr[i].event_mask = data[i * 2 + 1];
+	}
+
+	devm_kfree(dev, data);
+	return 0;
+}
+
+static int m4m_hwmon_driver_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct m4m_hwmon *m;
+	int ret;
+
+	m = devm_kzalloc(dev, sizeof(*m), GFP_KERNEL);
+	if (!m)
+		return -ENOMEM;
+	m->dev = dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "base not found!\n");
+		return -EINVAL;
+	}
+	m->base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!m->base)
+		return -ENOMEM;
+
+	m->irq = platform_get_irq(pdev, 0);
+	if (m->irq < 0) {
+		dev_err(dev, "Unable to get IRQ number\n");
+		return m->irq;
+	}
+
+	ret = m4m_hwmon_parse_cntr(dev, m);
+	if (ret) {
+		dev_err(dev, "Unable to parse counter events\n");
+		return ret;
+	}
+
+	m->hw.of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
+	if (!m->hw.of_node)
+		return -EINVAL;
+	m->hw.start_hwmon = &m4m_start_hwmon;
+	m->hw.stop_hwmon = &m4m_stop_hwmon;
+	m->hw.meas_mrps_and_set_irq = &m4m_meas_mrps_and_set_irq;
+
+	ret = register_cache_hwmon(dev, &m->hw);
+	if (ret) {
+		dev_err(dev, "Dev BW hwmon registration failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct platform_driver m4m_hwmon_driver = {
+	.probe = m4m_hwmon_driver_probe,
+	.driver = {
+		.name = "m4m-hwmon",
+		.of_match_table = m4m_match_table,
+	},
+};
+
+module_platform_driver(m4m_hwmon_driver);
+MODULE_DESCRIPTION("M4M hardware monitor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/msmcci-hwmon.c b/drivers/devfreq/msmcci-hwmon.c
new file mode 100644
index 0000000..c16175c
--- /dev/null
+++ b/drivers/devfreq/msmcci-hwmon.c
@@ -0,0 +1,615 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "msmcci-hwmon: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/spinlock.h>
+#include <linux/cpu_pm.h>
+#include <soc/qcom/scm.h>
+#include "governor_cache_hwmon.h"
+
+#define	EVNT_SEL		 0x0
+#define	EVNT_CNT_MATCH_VAL	 0x18
+#define	MATCH_FLG		 0x30
+#define	MATCH_FLG_CLR		 0x48
+#define	OVR_FLG			 0x60
+#define	OVR_FLG_CLR		 0x78
+#define	CNT_CTRL		 0x94
+#define	CNT_VALUE		 0xAC
+
+#define ENABLE_OVR_FLG		BIT(4)
+#define ENABLE_MATCH_FLG	BIT(5)
+#define ENABLE_EVNT_CNT		BIT(0)
+#define RESET_EVNT_CNT		BIT(1)
+
+#define CNT_DISABLE	(ENABLE_OVR_FLG | ENABLE_MATCH_FLG)
+#define CNT_RESET_CLR	(ENABLE_OVR_FLG | ENABLE_MATCH_FLG)
+#define CNT_ENABLE	(ENABLE_OVR_FLG | ENABLE_MATCH_FLG | ENABLE_EVNT_CNT)
+#define CNT_RESET	(ENABLE_OVR_FLG | ENABLE_MATCH_FLG | RESET_EVNT_CNT)
+
+struct msmcci_hwmon {
+	struct list_head list;
+
+	union {
+		phys_addr_t phys_base[MAX_NUM_GROUPS];
+		void __iomem *virt_base[MAX_NUM_GROUPS];
+	};
+	int irq[MAX_NUM_GROUPS];
+	u32 event_sel[MAX_NUM_GROUPS];
+	int num_counters;
+
+	/*
+	 * Multiple interrupts might fire together for one device.
+	 * In that case, only one re-evaluation needs to be done.
+	 */
+	struct mutex update_lock;
+
+	/* For counter state save and restore */
+	unsigned long cur_limit[MAX_NUM_GROUPS];
+	unsigned long cur_count[MAX_NUM_GROUPS];
+	bool mon_enabled;
+
+	struct cache_hwmon hw;
+	struct device *dev;
+	bool secure_io;
+	bool irq_shared;
+};
+
+#define to_mon(ptr) container_of(ptr, struct msmcci_hwmon, hw)
+
+static LIST_HEAD(msmcci_hwmon_list);
+static DEFINE_MUTEX(list_lock);
+
+static int use_cnt;
+static DEFINE_MUTEX(notifier_reg_lock);
+
+static inline int write_mon_reg(struct msmcci_hwmon *m, int idx,
+				unsigned long offset, u32 value)
+{
+	int ret = 0;
+
+	if (m->secure_io)
+		ret = scm_io_write(m->phys_base[idx] + offset, value);
+	else
+		writel_relaxed(value, m->virt_base[idx] + offset);
+
+	return ret;
+}
+
+static inline u32 read_mon_reg(struct msmcci_hwmon *m, int idx,
+			       unsigned long offset)
+{
+	if (m->secure_io)
+		return scm_io_read(m->phys_base[idx] + offset);
+	else
+		return readl_relaxed(m->virt_base[idx] + offset);
+}
+
+static int mon_init(struct msmcci_hwmon *m)
+{
+	int ret, i;
+
+	for (i = 0; i < m->num_counters; i++) {
+		ret = write_mon_reg(m, i, EVNT_SEL, m->event_sel[i]);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+static void mon_enable(struct msmcci_hwmon *m)
+{
+	int i;
+
+	for (i = 0; i < m->num_counters; i++)
+		write_mon_reg(m, i, CNT_CTRL, CNT_ENABLE);
+}
+
+static void mon_disable(struct msmcci_hwmon *m)
+{
+	int i;
+
+	for (i = 0; i < m->num_counters; i++)
+		write_mon_reg(m, i, CNT_CTRL, CNT_DISABLE);
+}
+
+static bool mon_is_match_flag_set(struct msmcci_hwmon *m, int idx)
+{
+	return (bool)read_mon_reg(m, idx, MATCH_FLG);
+}
+
+/* mon_clear_single() can only be called when monitor is disabled */
+static void mon_clear_single(struct msmcci_hwmon *m, int idx)
+{
+	write_mon_reg(m, idx, CNT_CTRL, CNT_RESET);
+	write_mon_reg(m, idx, CNT_CTRL, CNT_RESET_CLR);
+	/* reset counter before match/overflow flags are cleared */
+	mb();
+	write_mon_reg(m, idx, MATCH_FLG_CLR, 1);
+	write_mon_reg(m, idx, MATCH_FLG_CLR, 0);
+	write_mon_reg(m, idx, OVR_FLG_CLR, 1);
+	write_mon_reg(m, idx, OVR_FLG_CLR, 0);
+}
+
+static void mon_set_limit_single(struct msmcci_hwmon *m, int idx, u32 limit)
+{
+	write_mon_reg(m, idx, EVNT_CNT_MATCH_VAL, limit);
+}
+
+static irqreturn_t msmcci_hwmon_shared_intr_handler(int irq, void *dev)
+{
+	struct msmcci_hwmon *m = dev;
+	int idx = -1, i;
+
+	for (i = 0; i < m->num_counters; i++) {
+		if (mon_is_match_flag_set(m, i)) {
+			idx = i;
+			break;
+		}
+	}
+	if (idx == -1)
+		return IRQ_NONE;
+
+	update_cache_hwmon(&m->hw);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t msmcci_hwmon_intr_handler(int irq, void *dev)
+{
+	struct msmcci_hwmon *m = dev;
+	int idx = -1, i;
+
+	for (i = 0; i < m->num_counters; i++) {
+		if (m->irq[i] == irq) {
+			idx = i;
+			break;
+		}
+	}
+	BUG_ON(idx == -1);
+
+	/*
+	 * Multiple independent interrupts could fire together and trigger
+	 * update_cache_hwmon() for same device. If we don't lock, we
+	 * could end up calling devfreq_monitor_start/stop()
+	 * concurrently, which would cause timer/workqueue object
+	 * corruption. However, we can't re-evaluate a few times back to
+	 * back either because the very short window won't be
+	 * representative. Since update_cache_hwmon() will clear match
+	 * flags for all counters, interrupts for other counters can
+	 * simply return if their match flags have already been cleared.
+	 */
+	mutex_lock(&m->update_lock);
+	if (mon_is_match_flag_set(m, idx))
+		update_cache_hwmon(&m->hw);
+	mutex_unlock(&m->update_lock);
+	return IRQ_HANDLED;
+}
+
+static unsigned long mon_read_count_single(struct msmcci_hwmon *m, int idx)
+{
+	unsigned long count, ovr;
+
+	count = read_mon_reg(m, idx, CNT_VALUE);
+	ovr = read_mon_reg(m, idx, OVR_FLG);
+	if (ovr == 1) {
+		count += 0xFFFFFFFFUL;
+		dev_warn(m->dev, "Counter[%d]: overflowed\n", idx);
+	}
+	return count;
+}
+
+static int count_to_mrps(unsigned long count, unsigned int us)
+{
+	do_div(count, us);
+	count++;
+	return count;
+}
+
+static unsigned int mrps_to_count(unsigned int mrps, unsigned int ms,
+				  unsigned int tolerance)
+{
+	mrps += tolerance;
+	mrps *= ms * USEC_PER_MSEC;
+	return mrps;
+}
+
+static unsigned long meas_mrps_and_set_irq(struct cache_hwmon *hw,
+		unsigned int tol, unsigned int us, struct mrps_stats *mrps)
+{
+	struct msmcci_hwmon *m = to_mon(hw);
+	unsigned long count;
+	unsigned int sample_ms = hw->df->profile->polling_ms;
+	int i;
+	u32 limit;
+
+	mon_disable(m);
+
+	/* calculate mrps and set limit */
+	for (i = 0; i < m->num_counters; i++) {
+		count = mon_read_count_single(m, i);
+		/*
+		 * When CCI is power collapsed, counters are cleared. Add
+		 * saved count to the current reading and clear saved count
+		 * to ensure we won't apply it more than once.
+		 */
+		count += m->cur_count[i];
+		m->cur_count[i] = 0;
+
+		mrps->mrps[i] = count_to_mrps(count, us);
+		limit = mrps_to_count(mrps->mrps[i], sample_ms, tol);
+
+		mon_clear_single(m, i);
+		mon_set_limit_single(m, i, limit);
+		/* save current limit for restoring after power collapse */
+		m->cur_limit[i] = limit;
+
+		dev_dbg(m->dev, "Counter[%d] count 0x%lx, limit 0x%x\n",
+			i, count, limit);
+	}
+
+	/*
+	 * There is no cycle counter for this device.
+	 * Treat all cycles as busy.
+	 */
+	mrps->busy_percent = 100;
+
+	/* re-enable monitor */
+	mon_enable(m);
+
+	return 0;
+}
+
+static void msmcci_hwmon_save_state(void)
+{
+	int i;
+	struct msmcci_hwmon *m;
+
+	list_for_each_entry(m, &msmcci_hwmon_list, list) {
+		if (!m->mon_enabled)
+			continue;
+		mon_disable(m);
+		/*
+		 * Power collapse might happen multiple times before
+		 * re-evaluation is done. Accumulate the saved count.
+		 * Clear counter after read in case power collapse is
+		 * aborted and register values are not wiped.
+		 */
+		for (i = 0; i < m->num_counters; i++) {
+			m->cur_count[i] += mon_read_count_single(m, i);
+			mon_clear_single(m, i);
+		}
+	}
+}
+
+static void msmcci_hwmon_restore_limit(struct msmcci_hwmon *m, int i)
+{
+	u32 new_limit;
+
+	if (m->cur_count[i] < m->cur_limit[i]) {
+		new_limit = m->cur_limit[i] - m->cur_count[i];
+	} else {
+		/*
+		 * If counter is larger than limit, interrupt should have
+		 * fired and prevented power collapse from happening. Just
+		 * in case the interrupt does not come, restore previous
+		 * limit so that interrupt will be triggered at some point.
+		 */
+		new_limit = m->cur_limit[i];
+	}
+	mon_set_limit_single(m, i, new_limit);
+	dev_dbg(m->dev, "Counter[%d] restore limit to 0x%x, saved count 0x%lx\n",
+		i, new_limit, m->cur_count[i]);
+}
+
+static void msmcci_hwmon_restore_state(void)
+{
+	int i;
+	struct msmcci_hwmon *m;
+
+	list_for_each_entry(m, &msmcci_hwmon_list, list) {
+		if (!m->mon_enabled)
+			continue;
+		mon_init(m);
+		for (i = 0; i < m->num_counters; i++)
+			msmcci_hwmon_restore_limit(m, i);
+		mon_enable(m);
+	}
+}
+
+#define CCI_LEVEL 2
+static int msmcci_hwmon_pm_callback(struct notifier_block *nb,
+					unsigned long val, void *data)
+{
+	unsigned int level = (unsigned long) data;
+
+	if (level != CCI_LEVEL)
+		return NOTIFY_DONE;
+
+	/*
+	 * When CCI power collapse callback happens, only current CPU
+	 * would be executing code. Thus there is no need to hold
+	 * mutex or spinlock.
+	 */
+	switch (val) {
+	case CPU_CLUSTER_PM_ENTER:
+		msmcci_hwmon_save_state();
+		break;
+	case CPU_CLUSTER_PM_ENTER_FAILED:
+	case CPU_CLUSTER_PM_EXIT:
+		msmcci_hwmon_restore_state();
+		break;
+	default:
+		return NOTIFY_DONE;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block pm_notifier_block = {
+	.notifier_call = msmcci_hwmon_pm_callback,
+};
+
+static int register_pm_notifier(struct msmcci_hwmon *m)
+{
+	int ret;
+
+	mutex_lock(&notifier_reg_lock);
+	if (!use_cnt) {
+		ret = cpu_pm_register_notifier(&pm_notifier_block);
+		if (ret) {
+			dev_err(m->dev, "Failed to register for PM notification\n");
+			mutex_unlock(&notifier_reg_lock);
+			return ret;
+		}
+	}
+	use_cnt++;
+	mutex_unlock(&notifier_reg_lock);
+
+	return 0;
+}
+
+static void unregister_pm_nofitifier(void)
+{
+	mutex_lock(&notifier_reg_lock);
+	use_cnt--;
+	if (!use_cnt)
+		cpu_pm_unregister_notifier(&pm_notifier_block);
+	mutex_unlock(&notifier_reg_lock);
+}
+
+static int request_shared_interrupt(struct msmcci_hwmon *m)
+{
+	int ret;
+
+	ret = request_threaded_irq(m->irq[HIGH], NULL,
+			msmcci_hwmon_shared_intr_handler,
+			IRQF_ONESHOT | IRQF_SHARED,
+			dev_name(m->dev), m);
+	if (ret)
+		dev_err(m->dev, "Unable to register shared interrupt handler for irq %d\n",
+			m->irq[HIGH]);
+
+	return ret;
+}
+
+static int request_interrupts(struct msmcci_hwmon  *m)
+{
+	int i, ret;
+
+	for (i = 0; i < m->num_counters; i++) {
+		ret = request_threaded_irq(m->irq[i], NULL,
+				msmcci_hwmon_intr_handler, IRQF_ONESHOT,
+				dev_name(m->dev), m);
+		if (ret) {
+			dev_err(m->dev, "Unable to register interrupt handler for irq %d\n",
+				m->irq[i]);
+			goto irq_failure;
+		}
+	}
+	return 0;
+
+irq_failure:
+	for (i--; i > 0; i--) {
+		disable_irq(m->irq[i]);
+		free_irq(m->irq[i], m);
+	}
+	return ret;
+}
+
+static int start_hwmon(struct cache_hwmon *hw, struct mrps_stats *mrps)
+{
+	struct msmcci_hwmon *m = to_mon(hw);
+	unsigned int sample_ms = hw->df->profile->polling_ms;
+	int ret, i;
+	u32 limit;
+
+	ret = register_pm_notifier(m);
+	if (ret)
+		return ret;
+
+	if (m->irq_shared)
+		ret = request_shared_interrupt(m);
+	else
+		ret = request_interrupts(m);
+
+	if (ret) {
+		unregister_pm_nofitifier();
+		return ret;
+	}
+	mon_init(m);
+	mon_disable(m);
+	for (i = 0; i < m->num_counters; i++) {
+		mon_clear_single(m, i);
+		limit = mrps_to_count(mrps->mrps[i], sample_ms, 0);
+		mon_set_limit_single(m, i, limit);
+	}
+	mon_enable(m);
+	m->mon_enabled = true;
+
+	return 0;
+}
+
+static void stop_hwmon(struct cache_hwmon *hw)
+{
+	struct msmcci_hwmon *m = to_mon(hw);
+	int i;
+
+	m->mon_enabled = false;
+	mon_disable(m);
+
+	for (i = 0; i < m->num_counters; i++) {
+		if (!m->irq_shared || i == HIGH) {
+			disable_irq(m->irq[i]);
+			free_irq(m->irq[i], m);
+		}
+		mon_clear_single(m, i);
+	}
+
+	unregister_pm_nofitifier();
+}
+
+static int msmcci_hwmon_parse_dt(struct platform_device *pdev,
+				 struct msmcci_hwmon *m, int idx)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	u32 sel;
+	int ret;
+
+	if (idx >= MAX_NUM_GROUPS)
+		return -EINVAL;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, idx);
+	if (!res)
+		return (idx == HIGH) ? -EINVAL : 0;
+
+	if (m->secure_io)
+		m->phys_base[idx] = res->start;
+	else {
+		m->virt_base[idx] = devm_ioremap(&pdev->dev, res->start,
+						resource_size(res));
+		if (!m->virt_base[idx]) {
+			dev_err(dev, "failed to ioremap\n");
+			return -ENOMEM;
+		}
+	}
+
+	ret = of_property_read_u32_index(pdev->dev.of_node,
+				"qcom,counter-event-sel", idx, &sel);
+	if (ret) {
+		dev_err(dev, "Counter[%d] failed to read event sel\n", idx);
+		return ret;
+	}
+	m->event_sel[idx] = sel;
+
+	if (!m->irq_shared || idx == HIGH) {
+		m->irq[idx] = platform_get_irq(pdev, idx);
+		if (m->irq[idx] < 0) {
+			dev_err(dev, "Counter[%d] failed to get IRQ number\n",
+									idx);
+			return m->irq[idx];
+		}
+	}
+	m->num_counters++;
+	return 0;
+}
+
+static int msmcci_hwmon_driver_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct msmcci_hwmon *m;
+	int ret;
+
+	m = devm_kzalloc(dev, sizeof(*m), GFP_KERNEL);
+	if (!m)
+		return -ENOMEM;
+	m->dev = &pdev->dev;
+
+	m->secure_io = of_property_read_bool(pdev->dev.of_node,
+					"qcom,secure-io");
+
+	m->irq_shared = of_property_read_bool(pdev->dev.of_node,
+						"qcom,shared-irq");
+
+	ret = msmcci_hwmon_parse_dt(pdev, m, HIGH);
+	if (ret)
+		return ret;
+	ret = msmcci_hwmon_parse_dt(pdev, m, MED);
+	if (ret)
+		return ret;
+	ret = msmcci_hwmon_parse_dt(pdev, m, LOW);
+	if (ret)
+		return ret;
+
+	m->hw.of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
+	if (!m->hw.of_node) {
+		dev_err(dev, "No target device specified\n");
+		return -EINVAL;
+	}
+	m->hw.start_hwmon = &start_hwmon;
+	m->hw.stop_hwmon = &stop_hwmon;
+	m->hw.meas_mrps_and_set_irq = &meas_mrps_and_set_irq;
+	mutex_init(&m->update_lock);
+
+	/*
+	 * This tests whether secure IO for monitor registers
+	 * is supported.
+	 */
+	ret = mon_init(m);
+	if (ret) {
+		dev_err(dev, "Failed to config monitor. Cache hwmon not registered\n");
+		return ret;
+	}
+
+	ret = register_cache_hwmon(dev, &m->hw);
+	if (ret) {
+		dev_err(dev, "MSMCCI cache hwmon registration failed\n");
+		return ret;
+	}
+
+	mutex_lock(&list_lock);
+	list_add_tail(&m->list, &msmcci_hwmon_list);
+	mutex_unlock(&list_lock);
+
+	dev_info(dev, "MSMCCI cache hwmon registered\n");
+	return 0;
+}
+
+static const struct of_device_id cci_match_table[] = {
+	{ .compatible = "qcom,msmcci-hwmon" },
+	{}
+};
+
+static struct platform_driver msmcci_hwmon_driver = {
+	.probe = msmcci_hwmon_driver_probe,
+	.driver = {
+		.name = "msmcci-hwmon",
+		.of_match_table = cci_match_table,
+	},
+};
+
+module_platform_driver(msmcci_hwmon_driver);
+MODULE_DESCRIPTION("QTI CCI performance monitor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 015f711..d235fbe 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -691,7 +691,7 @@
 	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
 	ioat_chan->completion =
 		dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool,
-				GFP_KERNEL, &ioat_chan->completion_dma);
+				GFP_NOWAIT, &ioat_chan->completion_dma);
 	if (!ioat_chan->completion)
 		return -ENOMEM;
 
@@ -701,7 +701,7 @@
 	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
 
 	order = IOAT_MAX_ORDER;
-	ring = ioat_alloc_ring(c, order, GFP_KERNEL);
+	ring = ioat_alloc_ring(c, order, GFP_NOWAIT);
 	if (!ring)
 		return -ENOMEM;
 
diff --git a/drivers/edac/qcom_llcc_edac.c b/drivers/edac/qcom_llcc_edac.c
index d469869..6bec860 100644
--- a/drivers/edac/qcom_llcc_edac.c
+++ b/drivers/edac/qcom_llcc_edac.c
@@ -39,6 +39,10 @@
 
 #define DRP_SYN_REG_CNT	8
 
+#define LLCC_COMMON_STATUS0		0x0003000C
+#define LLCC_LB_CNT_MASK		0xf0000000
+#define LLCC_LB_CNT_SHIFT		28
+
 /* single & Double Bit syndrome register offsets */
 #define TRP_ECC_SB_ERR_SYN0		0x0002304C
 #define TRP_ECC_DB_ERR_SYN0		0x00020370
@@ -97,7 +101,10 @@
 
 struct erp_drvdata {
 	struct regmap *llcc_map;
+	phys_addr_t *llcc_banks;
 	u32 ecc_irq;
+	u32 num_banks;
+	u32 b_off;
 };
 
 static const struct errors_edac errors[] = {
@@ -108,29 +115,32 @@
 };
 
 /* Clear the error interrupt and counter registers */
-static void qcom_llcc_clear_errors(int err_type, struct regmap *llcc_map)
+static void qcom_llcc_clear_errors(int err_type, struct erp_drvdata *drv)
 {
 	switch (err_type) {
 	case LLCC_DRAM_CE:
 	case LLCC_DRAM_UE:
 		/* Clear the interrupt */
-		regmap_write(llcc_map, DRP_INTERRUPT_CLEAR, DRP_TRP_INT_CLEAR);
+		regmap_write(drv->llcc_map, drv->b_off + DRP_INTERRUPT_CLEAR,
+			DRP_TRP_INT_CLEAR);
 		/* Clear the counters */
-		regmap_write(llcc_map, DRP_ECC_ERROR_CNTR_CLEAR,
+		regmap_write(drv->llcc_map,
+			drv->b_off + DRP_ECC_ERROR_CNTR_CLEAR,
 			DRP_TRP_CNT_CLEAR);
 		break;
 	case LLCC_TRAM_CE:
 	case LLCC_TRAM_UE:
-		regmap_write(llcc_map, TRP_INTERRUPT_0_CLEAR,
+		regmap_write(drv->llcc_map, drv->b_off + TRP_INTERRUPT_0_CLEAR,
 			     DRP_TRP_INT_CLEAR);
-		regmap_write(llcc_map, TRP_ECC_ERROR_CNTR_CLEAR,
-			     DRP_TRP_CNT_CLEAR);
+		regmap_write(drv->llcc_map,
+			drv->b_off + TRP_ECC_ERROR_CNTR_CLEAR,
+			DRP_TRP_CNT_CLEAR);
 		break;
 	}
 }
 
 /* Dump syndrome registers for tag Ram Double bit errors */
-static void dump_trp_db_syn_reg(struct regmap *llcc_map)
+static void dump_trp_db_syn_reg(struct erp_drvdata *drv, u32 bank)
 {
 	int i;
 	int db_err_cnt;
@@ -140,17 +150,20 @@
 
 	for (i = 0; i < TRP_SYN_REG_CNT; i++) {
 		synd_reg = TRP_ECC_DB_ERR_SYN0 + (i * 4);
-		regmap_read(llcc_map, synd_reg, &synd_val);
+		regmap_read(drv->llcc_map, drv->llcc_banks[bank] + synd_reg,
+			&synd_val);
 		edac_printk(KERN_CRIT, EDAC_LLCC, "TRP_ECC_SYN%d: 0x%8x\n",
 			i, synd_val);
 	}
 
-	regmap_read(llcc_map, TRP_ECC_ERROR_STATUS1, &db_err_cnt);
+	regmap_read(drv->llcc_map,
+		drv->llcc_banks[bank] + TRP_ECC_ERROR_STATUS1, &db_err_cnt);
 	db_err_cnt = (db_err_cnt & ECC_DB_ERR_COUNT_MASK);
 	edac_printk(KERN_CRIT, EDAC_LLCC, "Double-Bit error count: 0x%4x\n",
 		db_err_cnt);
 
-	regmap_read(llcc_map, TRP_ECC_ERROR_STATUS0, &db_err_ways);
+	regmap_read(drv->llcc_map,
+		drv->llcc_banks[bank] + TRP_ECC_ERROR_STATUS0, &db_err_ways);
 	db_err_ways = (db_err_ways & ECC_DB_ERR_WAYS_MASK);
 	db_err_ways >>= ECC_DB_ERR_WAYS_SHIFT;
 
@@ -159,7 +172,7 @@
 }
 
 /* Dump syndrome register for tag Ram Single Bit Errors */
-static void dump_trp_sb_syn_reg(struct regmap *llcc_map)
+static void dump_trp_sb_syn_reg(struct erp_drvdata *drv, u32 bank)
 {
 	int i;
 	int sb_err_cnt;
@@ -169,18 +182,21 @@
 
 	for (i = 0; i < TRP_SYN_REG_CNT; i++) {
 		synd_reg = TRP_ECC_SB_ERR_SYN0 + (i * 4);
-		regmap_read(llcc_map, synd_reg, &synd_val);
+		regmap_read(drv->llcc_map, drv->llcc_banks[bank] + synd_reg,
+			&synd_val);
 		edac_printk(KERN_CRIT, EDAC_LLCC, "TRP_ECC_SYN%d: 0x%8x\n",
 			i, synd_val);
 	}
 
-	regmap_read(llcc_map, TRP_ECC_ERROR_STATUS1, &sb_err_cnt);
+	regmap_read(drv->llcc_map,
+		drv->llcc_banks[bank] + TRP_ECC_ERROR_STATUS1, &sb_err_cnt);
 	sb_err_cnt = (sb_err_cnt & ECC_SB_ERR_COUNT_MASK);
 	sb_err_cnt >>= ECC_SB_ERR_COUNT_SHIFT;
 	edac_printk(KERN_CRIT, EDAC_LLCC, "Single-Bit error count: 0x%4x\n",
 		sb_err_cnt);
 
-	regmap_read(llcc_map, TRP_ECC_ERROR_STATUS0, &sb_err_ways);
+	regmap_read(drv->llcc_map,
+		drv->llcc_banks[bank] + TRP_ECC_ERROR_STATUS0, &sb_err_ways);
 	sb_err_ways = sb_err_ways & ECC_SB_ERR_WAYS_MASK;
 
 	edac_printk(KERN_CRIT, EDAC_LLCC, "Single-Bit error ways: 0x%4x\n",
@@ -188,7 +204,7 @@
 }
 
 /* Dump syndrome registers for Data Ram Double bit errors */
-static void dump_drp_db_syn_reg(struct regmap *llcc_map)
+static void dump_drp_db_syn_reg(struct erp_drvdata *drv, u32 bank)
 {
 	int i;
 	int db_err_cnt;
@@ -198,17 +214,20 @@
 
 	for (i = 0; i < DRP_SYN_REG_CNT; i++) {
 		synd_reg = DRP_ECC_DB_ERR_SYN0 + (i * 4);
-		regmap_read(llcc_map, synd_reg, &synd_val);
+		regmap_read(drv->llcc_map, drv->llcc_banks[bank] + synd_reg,
+			&synd_val);
 		edac_printk(KERN_CRIT, EDAC_LLCC, "DRP_ECC_SYN%d: 0x%8x\n",
 			i, synd_val);
 	}
 
-	regmap_read(llcc_map, DRP_ECC_ERROR_STATUS1, &db_err_cnt);
+	regmap_read(drv->llcc_map,
+		drv->llcc_banks[bank] + DRP_ECC_ERROR_STATUS1, &db_err_cnt);
 	db_err_cnt = (db_err_cnt & ECC_DB_ERR_COUNT_MASK);
 	edac_printk(KERN_CRIT, EDAC_LLCC, "Double-Bit error count: 0x%4x\n",
 		db_err_cnt);
 
-	regmap_read(llcc_map, DRP_ECC_ERROR_STATUS0, &db_err_ways);
+	regmap_read(drv->llcc_map,
+		drv->llcc_banks[bank] + DRP_ECC_ERROR_STATUS0, &db_err_ways);
 	db_err_ways &= ECC_DB_ERR_WAYS_MASK;
 	db_err_ways >>= ECC_DB_ERR_WAYS_SHIFT;
 	edac_printk(KERN_CRIT, EDAC_LLCC, "Double-Bit error ways: 0x%4x\n",
@@ -216,7 +235,7 @@
 }
 
 /* Dump Syndrome registers for Data Ram Single bit errors*/
-static void dump_drp_sb_syn_reg(struct regmap *llcc_map)
+static void dump_drp_sb_syn_reg(struct erp_drvdata *drv, u32 bank)
 {
 	int i;
 	int sb_err_cnt;
@@ -226,18 +245,21 @@
 
 	for (i = 0; i < DRP_SYN_REG_CNT; i++) {
 		synd_reg = DRP_ECC_SB_ERR_SYN0 + (i * 4);
-		regmap_read(llcc_map, synd_reg, &synd_val);
+		regmap_read(drv->llcc_map, drv->llcc_banks[bank] + synd_reg,
+			&synd_val);
 		edac_printk(KERN_CRIT, EDAC_LLCC, "DRP_ECC_SYN%d: 0x%8x\n",
 			i, synd_val);
 	}
 
-	regmap_read(llcc_map, DRP_ECC_ERROR_STATUS1, &sb_err_cnt);
+	regmap_read(drv->llcc_map,
+		drv->llcc_banks[bank] + DRP_ECC_ERROR_STATUS1, &sb_err_cnt);
 	sb_err_cnt &= ECC_SB_ERR_COUNT_MASK;
 	sb_err_cnt >>= ECC_SB_ERR_COUNT_SHIFT;
 	edac_printk(KERN_CRIT, EDAC_LLCC, "Single-Bit error count: 0x%4x\n",
 		sb_err_cnt);
 
-	regmap_read(llcc_map, DRP_ECC_ERROR_STATUS0, &sb_err_ways);
+	regmap_read(drv->llcc_map,
+		drv->llcc_banks[bank] + DRP_ECC_ERROR_STATUS0, &sb_err_ways);
 	sb_err_ways = sb_err_ways & ECC_SB_ERR_WAYS_MASK;
 
 	edac_printk(KERN_CRIT, EDAC_LLCC, "Single-Bit error ways: 0x%4x\n",
@@ -246,24 +268,26 @@
 
 
 static void dump_syn_reg(struct edac_device_ctl_info *edev_ctl,
-			 int err_type, struct regmap *llcc_map)
+			 int err_type, u32 bank)
 {
+	struct erp_drvdata *drv = edev_ctl->pvt_info;
+
 	switch (err_type) {
 	case LLCC_DRAM_CE:
-		dump_drp_sb_syn_reg(llcc_map);
+		dump_drp_sb_syn_reg(drv, bank);
 		break;
 	case LLCC_DRAM_UE:
-		dump_drp_db_syn_reg(llcc_map);
+		dump_drp_db_syn_reg(drv, bank);
 		break;
 	case LLCC_TRAM_CE:
-		dump_trp_sb_syn_reg(llcc_map);
+		dump_trp_sb_syn_reg(drv, bank);
 		break;
 	case LLCC_TRAM_UE:
-		dump_trp_db_syn_reg(llcc_map);
+		dump_trp_db_syn_reg(drv, bank);
 		break;
 	}
 
-	qcom_llcc_clear_errors(err_type, llcc_map);
+	qcom_llcc_clear_errors(err_type, drv);
 
 	errors[err_type].func(edev_ctl, 0, 0, errors[err_type].msg);
 }
@@ -274,30 +298,36 @@
 	u32 drp_error;
 	u32 trp_error;
 	struct erp_drvdata *drv = edev_ctl->pvt_info;
+	u32 i;
 
-	/* Look for Data RAM errors */
-	regmap_read(drv->llcc_map, DRP_INTERRUPT_STATUS, &drp_error);
+	for (i = 0; i < drv->num_banks; i++) {
+		/* Look for Data RAM errors */
+		regmap_read(drv->llcc_map,
+			drv->llcc_banks[i] + DRP_INTERRUPT_STATUS, &drp_error);
 
-	if (drp_error & SB_ECC_ERROR) {
-		edac_printk(KERN_CRIT, EDAC_LLCC,
-			"Single Bit Error detected in Data Ram\n");
-		dump_syn_reg(edev_ctl, LLCC_DRAM_CE, drv->llcc_map);
-	} else if (drp_error & DB_ECC_ERROR) {
-		edac_printk(KERN_CRIT, EDAC_LLCC,
-			"Double Bit Error detected in Data Ram\n");
-		dump_syn_reg(edev_ctl, LLCC_DRAM_UE, drv->llcc_map);
-	}
+		if (drp_error & SB_ECC_ERROR) {
+			edac_printk(KERN_CRIT, EDAC_LLCC,
+				"Single Bit Error detected in Data Ram\n");
+			dump_syn_reg(edev_ctl, LLCC_DRAM_CE, i);
+		} else if (drp_error & DB_ECC_ERROR) {
+			edac_printk(KERN_CRIT, EDAC_LLCC,
+				"Double Bit Error detected in Data Ram\n");
+			dump_syn_reg(edev_ctl, LLCC_DRAM_UE, i);
+		}
 
-	/* Look for Tag RAM errors */
-	regmap_read(drv->llcc_map, TRP_INTERRUPT_0_STATUS, &trp_error);
-	if (trp_error & SB_ECC_ERROR) {
-		edac_printk(KERN_CRIT, EDAC_LLCC,
-			"Single Bit Error detected in Tag Ram\n");
-		dump_syn_reg(edev_ctl, LLCC_TRAM_CE, drv->llcc_map);
-	} else if (trp_error & DB_ECC_ERROR) {
-		edac_printk(KERN_CRIT, EDAC_LLCC,
-			"Double Bit Error detected in Tag Ram\n");
-		dump_syn_reg(edev_ctl, LLCC_TRAM_UE, drv->llcc_map);
+		/* Look for Tag RAM errors */
+		regmap_read(drv->llcc_map,
+			drv->llcc_banks[i] + TRP_INTERRUPT_0_STATUS,
+			&trp_error);
+		if (trp_error & SB_ECC_ERROR) {
+			edac_printk(KERN_CRIT, EDAC_LLCC,
+				"Single Bit Error detected in Tag Ram\n");
+			dump_syn_reg(edev_ctl, LLCC_TRAM_CE, i);
+		} else if (trp_error & DB_ECC_ERROR) {
+			edac_printk(KERN_CRIT, EDAC_LLCC,
+				"Double Bit Error detected in Tag Ram\n");
+			dump_syn_reg(edev_ctl, LLCC_TRAM_UE, i);
+		}
 	}
 }
 
@@ -319,6 +349,8 @@
 	struct erp_drvdata *drv;
 	struct edac_device_ctl_info *edev_ctl;
 	struct device *dev = &pdev->dev;
+	u32 *banks;
+	u32 i;
 
 	/* Allocate edac control info */
 	edev_ctl = edac_device_alloc_ctl_info(sizeof(*drv), "qcom-llcc", 1,
@@ -358,6 +390,43 @@
 		}
 	}
 
+	/* Find the number of LLC banks supported */
+	regmap_read(drv->llcc_map, LLCC_COMMON_STATUS0,
+		    &drv->num_banks);
+
+	drv->num_banks &= LLCC_LB_CNT_MASK;
+	drv->num_banks >>= LLCC_LB_CNT_SHIFT;
+
+	drv->llcc_banks = devm_kzalloc(&pdev->dev,
+		sizeof(phys_addr_t) * drv->num_banks, GFP_KERNEL);
+
+	if (!drv->num_banks) {
+		dev_err(dev, "Cannot allocate memory for llcc_banks\n");
+		return -ENOMEM;
+	}
+
+	banks = devm_kzalloc(&pdev->dev,
+		sizeof(u32) * drv->num_banks, GFP_KERNEL);
+	if (!banks)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(dev->parent->of_node,
+			"qcom,llcc-banks-off", banks, drv->num_banks);
+	if (rc) {
+		dev_err(dev, "Cannot read llcc-banks-off property\n");
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32(dev->parent->of_node,
+			"qcom,llcc-broadcast-off", &drv->b_off);
+	if (rc) {
+		dev_err(dev, "Cannot read llcc-broadcast-off property\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < drv->num_banks; i++)
+		drv->llcc_banks[i] = banks[i];
+
 	platform_set_drvdata(pdev, edev_ctl);
 
 	rc = edac_device_add_device(edev_ctl);
diff --git a/drivers/esoc/Kconfig b/drivers/esoc/Kconfig
index 0efca1e..a56c7e0 100644
--- a/drivers/esoc/Kconfig
+++ b/drivers/esoc/Kconfig
@@ -61,4 +61,12 @@
 	  by command engine to the external modem. Also allows masking
 	  of certain notifications being sent to the external modem.
 
+config MDM_DBG_REQ_ENG
+	tristate "manual request engine for 4x series external modems"
+	depends on ESOC_MDM_DBG_ENG
+	help
+	  Provides a user interface to handle incoming requests from
+	  the external modem. Allows for debugging of IPC mechanism
+	  between the external modem and the primary soc.
+
 endif
diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c
index b1834e2..334278b 100644
--- a/drivers/esoc/esoc-mdm-4x.c
+++ b/drivers/esoc/esoc-mdm-4x.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -435,11 +435,12 @@
 {
 	int value;
 	struct esoc_clink *esoc;
+	struct device *dev;
 	struct mdm_ctrl *mdm = (struct mdm_ctrl *)dev_id;
-	struct device *dev = mdm->dev;
 
 	if (!mdm)
 		return IRQ_HANDLED;
+	dev = mdm->dev;
 	esoc = mdm->esoc;
 	value = gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS));
 	if (value == 0 && mdm->ready) {
@@ -500,7 +501,7 @@
 	struct device_node *node = mdm->dev->of_node;
 
 	addr = of_iomap(node, 0);
-	if (IS_ERR(addr)) {
+	if (IS_ERR_OR_NULL(addr)) {
 		dev_err(mdm->dev, "failed to get debug base address\n");
 		return;
 	}
@@ -509,7 +510,7 @@
 	if (val == MDM_DBG_MODE) {
 		mdm->dbg_mode = true;
 		mdm->cti = coresight_cti_get(MDM_CTI_NAME);
-		if (IS_ERR(mdm->cti)) {
+		if (IS_ERR_OR_NULL(mdm->cti)) {
 			dev_err(mdm->dev, "unable to get cti handle\n");
 			goto cti_get_err;
 		}
@@ -743,7 +744,7 @@
 	mdm->dev = &pdev->dev;
 	mdm->pon_ops = pon_ops;
 	esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
-	if (IS_ERR(esoc)) {
+	if (IS_ERR_OR_NULL(esoc)) {
 		dev_err(mdm->dev, "cannot allocate esoc device\n");
 		return PTR_ERR(esoc);
 	}
@@ -813,7 +814,7 @@
 	mdm->pon_ops = pon_ops;
 	node = pdev->dev.of_node;
 	esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
-	if (IS_ERR(esoc)) {
+	if (IS_ERR_OR_NULL(esoc)) {
 		dev_err(mdm->dev, "cannot allocate esoc device\n");
 		return PTR_ERR(esoc);
 	}
@@ -901,7 +902,7 @@
 	mdm->pon_ops = pon_ops;
 	node = pdev->dev.of_node;
 	esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
-	if (IS_ERR(esoc)) {
+	if (IS_ERR_OR_NULL(esoc)) {
 		dev_err(mdm->dev, "cannot allocate esoc device\n");
 		return PTR_ERR(esoc);
 	}
@@ -936,6 +937,10 @@
 	mdm->dual_interface = of_property_read_bool(node,
 						"qcom,mdm-dual-link");
 	esoc->link_name = MDM9x55_PCIE;
+	ret = of_property_read_string(node, "qcom,mdm-link-info",
+					&esoc->link_info);
+	if (ret)
+		dev_info(mdm->dev, "esoc link info missing\n");
 	esoc->clink_ops = clink_ops;
 	esoc->parent = mdm->dev;
 	esoc->owner = THIS_MODULE;
@@ -1001,11 +1006,11 @@
 	struct mdm_ctrl *mdm;
 
 	match = of_match_node(mdm_dt_match, node);
-	if (IS_ERR(match))
+	if (IS_ERR_OR_NULL(match))
 		return PTR_ERR(match);
 	mdm_ops = match->data;
 	mdm = devm_kzalloc(&pdev->dev, sizeof(*mdm), GFP_KERNEL);
-	if (IS_ERR(mdm))
+	if (IS_ERR_OR_NULL(mdm))
 		return PTR_ERR(mdm);
 	return mdm_ops->config_hw(mdm, mdm_ops, pdev);
 }
diff --git a/drivers/esoc/esoc-mdm-dbg-eng.c b/drivers/esoc/esoc-mdm-dbg-eng.c
index a186ea8..309c820 100644
--- a/drivers/esoc/esoc-mdm-dbg-eng.c
+++ b/drivers/esoc/esoc-mdm-dbg-eng.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -179,7 +179,165 @@
 }
 static DRIVER_ATTR(notifier_mask, 00200, NULL, notifier_mask_store);
 
-int mdm_dbg_eng_init(struct esoc_drv *esoc_drv)
+#ifdef CONFIG_MDM_DBG_REQ_ENG
+static struct esoc_clink *dbg_clink;
+/* Last recorded request from esoc */
+static enum esoc_req last_req;
+static DEFINE_SPINLOCK(req_lock);
+/*
+ * esoc_to_user: Conversion of esoc ids to user visible strings
+ * id: esoc request, command, notifier, event id
+ * str: string equivalent of the above
+ */
+struct esoc_to_user {
+	unsigned int id;
+	char str[20];
+};
+
+static struct esoc_to_user in_to_resp[] = {
+	{
+		.id = ESOC_IMG_XFER_DONE,
+		.str = "XFER_DONE",
+	},
+	{
+		.id = ESOC_BOOT_DONE,
+		.str = "BOOT_DONE",
+	},
+	{
+		.id = ESOC_BOOT_FAIL,
+		.str = "BOOT_FAIL",
+	},
+	{
+		.id = ESOC_IMG_XFER_RETRY,
+		.str = "XFER_RETRY",
+	},
+	{	.id = ESOC_IMG_XFER_FAIL,
+		.str = "XFER_FAIL",
+	},
+	{
+		.id = ESOC_UPGRADE_AVAILABLE,
+		.str = "UPGRADE",
+	},
+	{	.id = ESOC_DEBUG_DONE,
+		.str = "DEBUG_DONE",
+	},
+	{
+		.id = ESOC_DEBUG_FAIL,
+		.str = "DEBUG_FAIL",
+	},
+};
+
+static struct esoc_to_user req_to_str[] = {
+	{
+		.id = ESOC_REQ_IMG,
+		.str = "REQ_IMG",
+	},
+	{
+		.id = ESOC_REQ_DEBUG,
+		.str = "REQ_DEBUG",
+	},
+	{
+		.id = ESOC_REQ_SHUTDOWN,
+		.str = "REQ_SHUTDOWN",
+	},
+};
+
+static ssize_t req_eng_resp_store(struct device_driver *drv, const char *buf,
+							size_t count)
+{
+	unsigned int i;
+	const struct esoc_clink_ops *const clink_ops = dbg_clink->clink_ops;
+
+	dev_dbg(&dbg_clink->dev, "user input req eng response %s\n", buf);
+	for (i = 0; i < ARRAY_SIZE(in_to_resp); i++) {
+		size_t len1 = strlen(buf);
+		size_t len2 = strlen(in_to_resp[i].str);
+
+		if (len1 == len2 && !strcmp(buf, in_to_resp[i].str)) {
+			clink_ops->notify(in_to_resp[i].id, dbg_clink);
+			break;
+		}
+	}
+	if (i > ARRAY_SIZE(in_to_resp))
+		dev_err(&dbg_clink->dev, "Invalid resp %s, specified\n", buf);
+	return count;
+}
+
+static DRIVER_ATTR(req_eng_resp, 0200, NULL, req_eng_resp_store);
+
+static ssize_t last_esoc_req_show(struct device_driver *drv, char *buf)
+{
+	unsigned int i;
+	unsigned long flags;
+	size_t count;
+
+	spin_lock_irqsave(&req_lock, flags);
+	for (i = 0; i < ARRAY_SIZE(req_to_str); i++) {
+		if (last_req == req_to_str[i].id) {
+			count = snprintf(buf, PAGE_SIZE, "%s\n",
+					req_to_str[i].str);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&req_lock, flags);
+	return count;
+}
+static DRIVER_ATTR(last_esoc_req, 0400, last_esoc_req_show, NULL);
+
+static void esoc_handle_req(enum esoc_req req, struct esoc_eng *eng)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&req_lock, flags);
+	last_req = req;
+	spin_unlock_irqrestore(&req_lock, flags);
+}
+
+static void esoc_handle_evt(enum esoc_evt evt, struct esoc_eng *eng)
+{
+}
+
+static struct esoc_eng dbg_req_eng = {
+	.handle_clink_req = esoc_handle_req,
+	.handle_clink_evt = esoc_handle_evt,
+};
+
+int register_dbg_req_eng(struct esoc_clink *clink,
+					struct device_driver *drv)
+{
+	int ret;
+
+	dbg_clink = clink;
+	ret = driver_create_file(drv, &driver_attr_req_eng_resp);
+	if (ret)
+		return ret;
+	ret = driver_create_file(drv, &driver_attr_last_esoc_req);
+	if (ret) {
+		dev_err(&clink->dev, "Unable to create last esoc req\n");
+		goto last_req_err;
+	}
+	ret = esoc_clink_register_req_eng(clink, &dbg_req_eng);
+	if (ret) {
+		pr_err("Unable to register req eng\n");
+		goto req_eng_fail;
+	}
+	spin_lock_init(&req_lock);
+	return 0;
+last_req_err:
+	driver_remove_file(drv, &driver_attr_last_esoc_req);
+req_eng_fail:
+	driver_remove_file(drv, &driver_attr_req_eng_resp);
+	return ret;
+}
+#else
+int register_dbg_req_eng(struct esoc_clink *clink, struct device_driver *d)
+{
+	return 0;
+}
+#endif
+
+int mdm_dbg_eng_init(struct esoc_drv *esoc_drv,
+			struct esoc_clink *clink)
 {
 	int ret;
 	struct device_driver *drv = &esoc_drv->driver;
@@ -194,7 +352,14 @@
 		pr_err("Unable to create notify mask file\n");
 		goto notify_mask_err;
 	}
+	ret = register_dbg_req_eng(clink, drv);
+	if (ret) {
+		pr_err("Failed to register esoc dbg req eng\n");
+		goto dbg_req_fail;
+	}
 	return 0;
+dbg_req_fail:
+	driver_remove_file(drv, &driver_attr_notifier_mask);
 notify_mask_err:
 	driver_remove_file(drv, &driver_attr_command_mask);
 cmd_mask_err:
diff --git a/drivers/esoc/esoc-mdm-drv.c b/drivers/esoc/esoc-mdm-drv.c
index 473a9c7..31cd8c4 100644
--- a/drivers/esoc/esoc-mdm-drv.c
+++ b/drivers/esoc/esoc-mdm-drv.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -233,7 +233,7 @@
 	struct esoc_eng *esoc_eng;
 
 	mdm_drv = devm_kzalloc(&esoc_clink->dev, sizeof(*mdm_drv), GFP_KERNEL);
-	if (IS_ERR(mdm_drv))
+	if (IS_ERR_OR_NULL(mdm_drv))
 		return PTR_ERR(mdm_drv);
 	esoc_eng = &mdm_drv->cmd_eng;
 	esoc_eng->handle_clink_evt = mdm_handle_clink_evt;
@@ -261,7 +261,7 @@
 	ret = register_reboot_notifier(&mdm_drv->esoc_restart);
 	if (ret)
 		dev_err(&esoc_clink->dev, "register for reboot failed\n");
-	ret = mdm_dbg_eng_init(drv);
+	ret = mdm_dbg_eng_init(drv, esoc_clink);
 	if (ret) {
 		debug_init_done = false;
 		dev_err(&esoc_clink->dev, "dbg engine failure\n");
diff --git a/drivers/esoc/esoc.h b/drivers/esoc/esoc.h
index 0cec985..9fc3192 100644
--- a/drivers/esoc/esoc.h
+++ b/drivers/esoc/esoc.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2015, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -46,6 +46,7 @@
  * struct esoc_clink: Representation of external esoc device
  * @name: Name of the external esoc.
  * @link_name: name of the physical link.
+ * @link_info: additional info about the physical link.
  * @parent: parent device.
  * @dev: device for userspace interface.
  * @id: id of the external device.
@@ -62,6 +63,7 @@
 struct esoc_clink {
 	const char *name;
 	const char *link_name;
+	const char *link_info;
 	struct device *parent;
 	struct device dev;
 	unsigned int id;
diff --git a/drivers/esoc/esoc_bus.c b/drivers/esoc/esoc_bus.c
index 4807e2b..cef570b 100644
--- a/drivers/esoc/esoc_bus.c
+++ b/drivers/esoc/esoc_bus.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2015, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -32,10 +32,19 @@
 				to_esoc_clink(dev)->link_name);
 }
 
+static ssize_t
+esoc_link_info_show(struct device *dev, struct device_attribute *attr,
+							char *buf)
+{
+	return snprintf(buf, ESOC_LINK_LEN, "%s",
+				to_esoc_clink(dev)->link_info);
+}
+
 static struct device_attribute esoc_clink_attrs[] = {
 
 	__ATTR_RO(esoc_name),
 	__ATTR_RO(esoc_link),
+	__ATTR_RO(esoc_link_info),
 	__ATTR_NULL,
 };
 
@@ -129,7 +138,7 @@
 	struct device *dev;
 
 	dev = bus_find_device(&esoc_bus_type, NULL, &id, esoc_clink_match_id);
-	if (IS_ERR(dev))
+	if (IS_ERR_OR_NULL(dev))
 		return NULL;
 	esoc_clink = to_esoc_clink(dev);
 	return esoc_clink;
@@ -143,7 +152,7 @@
 
 	dev = bus_find_device(&esoc_bus_type, NULL, node,
 						esoc_clink_match_node);
-	if (IS_ERR(dev))
+	if (IS_ERR_OR_NULL(dev))
 		return NULL;
 	esoc_clink = to_esoc_clink(dev);
 	return esoc_clink;
@@ -175,14 +184,14 @@
 
 	len = strlen("esoc") + sizeof(esoc_clink->id);
 	subsys_name = kzalloc(len, GFP_KERNEL);
-	if (IS_ERR(subsys_name))
+	if (IS_ERR_OR_NULL(subsys_name))
 		return PTR_ERR(subsys_name);
 	snprintf(subsys_name, len, "esoc%d", esoc_clink->id);
 	esoc_clink->subsys.name = subsys_name;
 	esoc_clink->dev.of_node = esoc_clink->np;
 	esoc_clink->subsys.dev = &esoc_clink->dev;
 	esoc_clink->subsys_dev = subsys_register(&esoc_clink->subsys);
-	if (IS_ERR(esoc_clink->subsys_dev)) {
+	if (IS_ERR_OR_NULL(esoc_clink->subsys_dev)) {
 		dev_err(&esoc_clink->dev, "failed to register ssr node\n");
 		ret = PTR_ERR(esoc_clink->subsys_dev);
 		goto subsys_err;
diff --git a/drivers/esoc/esoc_client.c b/drivers/esoc/esoc_client.c
index 5b194e31..446735c 100644
--- a/drivers/esoc/esoc_client.c
+++ b/drivers/esoc/esoc_client.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -44,10 +44,12 @@
 	struct device_node *np = dev->of_node;
 	struct esoc_clink *esoc_clink;
 	struct esoc_desc *desc;
-	char *esoc_name, *esoc_link;
+	char *esoc_name, *esoc_link, *esoc_link_info;
 
 	for (index = 0;; index++) {
 		esoc_prop = kasprintf(GFP_KERNEL, "esoc-%d", index);
+		if (IS_ERR_OR_NULL(esoc_prop))
+			return ERR_PTR(-ENOMEM);
 		parp = of_get_property(np, esoc_prop, NULL);
 		if (parp == NULL) {
 			dev_err(dev, "esoc device not present\n");
@@ -84,16 +86,26 @@
 			kfree(esoc_name);
 			return ERR_PTR(-ENOMEM);
 		}
+		esoc_link_info = kasprintf(GFP_KERNEL, "%s",
+					esoc_clink->link_info);
+		if (IS_ERR_OR_NULL(esoc_link_info)) {
+			dev_err(dev, "unable to alloc link info name\n");
+			kfree(esoc_name);
+			kfree(esoc_link);
+			return ERR_PTR(-ENOMEM);
+		}
 		desc = devres_alloc(devm_esoc_desc_release,
 						sizeof(*desc), GFP_KERNEL);
 		if (IS_ERR_OR_NULL(desc)) {
 			kfree(esoc_name);
 			kfree(esoc_link);
+			kfree(esoc_link_info);
 			dev_err(dev, "unable to allocate esoc descriptor\n");
 			return ERR_PTR(-ENOMEM);
 		}
 		desc->name = esoc_name;
 		desc->link = esoc_link;
+		desc->link_info = esoc_link_info;
 		desc->priv = esoc_clink;
 		devres_add(dev, desc);
 		return desc;
diff --git a/drivers/esoc/esoc_dev.c b/drivers/esoc/esoc_dev.c
index 17a30b8..0c9e428 100644
--- a/drivers/esoc/esoc_dev.c
+++ b/drivers/esoc/esoc_dev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -215,7 +215,7 @@
 							esoc_clink->name);
 				return -EIO;
 			}
-			put_user(req, (unsigned long __user *)uarg);
+			put_user(req, (unsigned int __user *)uarg);
 
 		}
 		return err;
@@ -227,7 +227,7 @@
 		err = clink_ops->get_status(&status, esoc_clink);
 		if (err)
 			return err;
-		put_user(status, (unsigned long __user *)uarg);
+		put_user(status, (unsigned int __user *)uarg);
 		break;
 	case ESOC_WAIT_FOR_CRASH:
 		err = wait_event_interruptible(esoc_udev->evt_wait,
@@ -241,7 +241,7 @@
 							esoc_clink->name);
 				return -EIO;
 			}
-			put_user(evt, (unsigned long __user *)uarg);
+			put_user(evt, (unsigned int __user *)uarg);
 		}
 		return err;
 	default:
@@ -259,7 +259,16 @@
 	unsigned int minor = iminor(inode);
 
 	esoc_udev = esoc_udev_get_by_minor(minor);
+	if (!esoc_udev) {
+		pr_err("failed to get udev\n");
+		return -ENOMEM;
+	}
+
 	esoc_clink = get_esoc_clink(esoc_udev->clink->id);
+	if (!esoc_clink) {
+		pr_err("failed to get clink\n");
+		return -ENOMEM;
+	}
 
 	uhandle = kzalloc(sizeof(*uhandle), GFP_KERNEL);
 	if (!uhandle) {
@@ -304,12 +313,12 @@
 	struct esoc_clink *esoc_clink = to_esoc_clink(dev);
 
 	esoc_udev = get_free_esoc_udev(esoc_clink);
-	if (IS_ERR(esoc_udev))
+	if (IS_ERR_OR_NULL(esoc_udev))
 		return PTR_ERR(esoc_udev);
 	esoc_udev->dev = device_create(esoc_class, &esoc_clink->dev,
 					MKDEV(esoc_major, esoc_clink->id),
 					esoc_clink, "esoc-%d", esoc_clink->id);
-	if (IS_ERR(esoc_udev->dev)) {
+	if (IS_ERR_OR_NULL(esoc_udev->dev)) {
 		pr_err("failed to create user device\n");
 		goto dev_err;
 	}
@@ -357,8 +366,7 @@
 	int ret = 0;
 
 	esoc_class = class_create(THIS_MODULE, "esoc-dev");
-
-	if (IS_ERR(esoc_class)) {
+	if (IS_ERR_OR_NULL(esoc_class)) {
 		pr_err("coudn't create class");
 		return PTR_ERR(esoc_class);
 	}
diff --git a/drivers/esoc/mdm-dbg.h b/drivers/esoc/mdm-dbg.h
index ae31339..ffba87c 100644
--- a/drivers/esoc/mdm-dbg.h
+++ b/drivers/esoc/mdm-dbg.h
@@ -24,7 +24,8 @@
 	return false;
 }
 
-static inline int mdm_dbg_eng_init(struct esoc_drv *drv)
+static inline int mdm_dbg_eng_init(struct esoc_drv *drv,
+						struct esoc_clink *clink)
 {
 	return 0;
 }
@@ -32,7 +33,8 @@
 #else
 extern bool dbg_check_cmd_mask(unsigned int cmd);
 extern bool dbg_check_notify_mask(unsigned int notify);
-extern int mdm_dbg_eng_init(struct esoc_drv *drv);
+extern int mdm_dbg_eng_init(struct esoc_drv *drv,
+				struct esoc_clink *clink);
 #endif
 
 static inline bool mdm_dbg_stall_cmd(unsigned int cmd)
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index b447a01..6f3c891 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3506,6 +3506,16 @@
 			max_sclk = 75000;
 			max_mclk = 80000;
 		}
+	} else if (adev->asic_type == CHIP_OLAND) {
+		if ((adev->pdev->revision == 0xC7) ||
+		    (adev->pdev->revision == 0x80) ||
+		    (adev->pdev->revision == 0x81) ||
+		    (adev->pdev->revision == 0x83) ||
+		    (adev->pdev->revision == 0x87) ||
+		    (adev->pdev->device == 0x6604) ||
+		    (adev->pdev->device == 0x6605)) {
+			max_sclk = 75000;
+		}
 	}
 	/* Apply dpm quirks */
 	while (p && p->chip_device != 0) {
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 6e0447f..72ec93d 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1382,6 +1382,7 @@
 
 	pm_runtime_enable(dev);
 
+	pm_runtime_get_sync(dev);
 	phy_power_on(dp->phy);
 
 	analogix_dp_init_dp(dp);
@@ -1414,9 +1415,15 @@
 		goto err_disable_pm_runtime;
 	}
 
+	phy_power_off(dp->phy);
+	pm_runtime_put(dev);
+
 	return 0;
 
 err_disable_pm_runtime:
+
+	phy_power_off(dp->phy);
+	pm_runtime_put(dev);
 	pm_runtime_disable(dev);
 
 	return ret;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 2e42a05..50acd79 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1382,6 +1382,15 @@
 	return ret < 0 ? ret : 0;
 }
 
+void release_crtc_commit(struct completion *completion)
+{
+	struct drm_crtc_commit *commit = container_of(completion,
+						      typeof(*commit),
+						      flip_done);
+
+	drm_crtc_commit_put(commit);
+}
+
 /**
  * drm_atomic_helper_setup_commit - setup possibly nonblocking commit
  * @state: new modeset state to be committed
@@ -1474,6 +1483,8 @@
 		}
 
 		crtc_state->event->base.completion = &commit->flip_done;
+		crtc_state->event->base.completion_release = release_crtc_commit;
+		drm_crtc_commit_get(commit);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index e84faec..f5815e1 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -686,8 +686,8 @@
 	assert_spin_locked(&dev->event_lock);
 
 	if (e->completion) {
-		/* ->completion might disappear as soon as it signalled. */
 		complete_all(e->completion);
+		e->completion_release(e->completion);
 		e->completion = NULL;
 	}
 
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 71000d5..70b47ca 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -113,3 +113,13 @@
           driver during fatal errors and enable some display-driver logging
           into an internal buffer (this avoids logging overhead).
 
+config DRM_SDE_RSC
+	bool "Enable sde resource state coordinator(rsc) driver"
+	depends on DRM_MSM
+	help
+          The SDE DRM RSC provides display Resource State Coordinator support
+          to vote the ab/ib bandwidth for primary display. Each rsc client
+          can vote their active state. Any active request from any client
+          avoids the display core power collapse. A client can also register
+          for display core power collapse events on rsc.
+
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index c1dfb44..b5d78b1 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -2,6 +2,7 @@
 ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
 ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
 ccflags-y += -Idrivers/gpu/drm/msm/sde
+ccflags-y += -Idrivers/media/platform/msm/sde/rotator
 
 msm_drm-y := \
 	hdmi/hdmi.o \
@@ -49,7 +50,9 @@
 	sde_io_util.o \
 	sde/sde_hw_reg_dma_v1_color_proc.o \
 	sde/sde_hw_color_proc_v4.o \
-	sde_rsc.o \
+	sde/sde_hw_ad4.o \
+
+msm_drm-$(CONFIG_DRM_SDE_RSC) += sde_rsc.o \
 	sde_rsc_hw.o \
 
 # use drm gpu driver only if qcom_kgsl driver not available
@@ -127,10 +130,12 @@
 	sde/sde_hw_util.o \
 	sde/sde_hw_sspp.o \
 	sde/sde_hw_wb.o \
+	sde/sde_hw_rot.o \
 	sde/sde_hw_pingpong.o \
 	sde/sde_hw_top.o \
 	sde/sde_hw_interrupts.o \
 	sde/sde_hw_vbif.o \
+	sde/sde_hw_blk.o \
 	sde/sde_formats.o \
 	sde_power_handle.o \
 	sde/sde_hw_color_processing_v1_7.o \
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h b/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
index 0e2e7ec..2a84a2d 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
@@ -19,6 +19,7 @@
 #include <linux/platform_device.h>
 #include <linux/types.h>
 #include <linux/clk.h>
+#include "sde_power_handle.h"
 
 #define MAX_STRING_LEN 32
 #define MAX_DSI_CTRL 2
@@ -67,6 +68,8 @@
  * @core_mmss_clk:       Handle to MMSS core clock.
  * @bus_clk:             Handle to bus clock.
  * @mnoc_clk:            Handle to MMSS NOC clock.
+ * @dsi_core_client:	 Pointer to SDE power client
+ * @phandle:             Pointer to SDE power handle
  */
 struct dsi_core_clk_info {
 	struct clk *mdp_core_clk;
@@ -74,6 +77,8 @@
 	struct clk *core_mmss_clk;
 	struct clk *bus_clk;
 	struct clk *mnoc_clk;
+	struct sde_power_client *dsi_core_client;
+	struct sde_power_handle *phandle;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
index 9650a0b..cc87775 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
@@ -185,10 +185,12 @@
 {
 	int rc = 0;
 
-	rc = clk_prepare_enable(c_clks->clks.mdp_core_clk);
-	if (rc) {
-		pr_err("failed to enable mdp_core_clk, rc=%d\n", rc);
-		goto error;
+	if (c_clks->clks.mdp_core_clk) {
+		rc = clk_prepare_enable(c_clks->clks.mdp_core_clk);
+		if (rc) {
+			pr_err("failed to enable mdp_core_clk, rc=%d\n", rc);
+			goto error;
+		}
 	}
 
 	if (c_clks->clks.mnoc_clk) {
@@ -199,28 +201,36 @@
 		}
 	}
 
-	rc = clk_prepare_enable(c_clks->clks.iface_clk);
-	if (rc) {
-		pr_err("failed to enable iface_clk, rc=%d\n", rc);
-		goto error_disable_mnoc_clk;
+	if (c_clks->clks.iface_clk) {
+		rc = clk_prepare_enable(c_clks->clks.iface_clk);
+		if (rc) {
+			pr_err("failed to enable iface_clk, rc=%d\n", rc);
+			goto error_disable_mnoc_clk;
+		}
 	}
 
-	rc = clk_prepare_enable(c_clks->clks.bus_clk);
-	if (rc) {
-		pr_err("failed to enable bus_clk, rc=%d\n", rc);
-		goto error_disable_iface_clk;
+	if (c_clks->clks.bus_clk) {
+		rc = clk_prepare_enable(c_clks->clks.bus_clk);
+		if (rc) {
+			pr_err("failed to enable bus_clk, rc=%d\n", rc);
+			goto error_disable_iface_clk;
+		}
 	}
 
-	rc = clk_prepare_enable(c_clks->clks.core_mmss_clk);
-	if (rc) {
-		pr_err("failed to enable core_mmss_clk, rc=%d\n", rc);
-		goto error_disable_bus_clk;
+	if (c_clks->clks.core_mmss_clk) {
+		rc = clk_prepare_enable(c_clks->clks.core_mmss_clk);
+		if (rc) {
+			pr_err("failed to enable core_mmss_clk, rc=%d\n", rc);
+			goto error_disable_bus_clk;
+		}
 	}
 
-	rc = msm_bus_scale_client_update_request(c_clks->bus_handle, 1);
-	if (rc) {
-		pr_err("bus scale client enable failed, rc=%d\n", rc);
-		goto error_disable_mmss_clk;
+	if (c_clks->bus_handle) {
+		rc = msm_bus_scale_client_update_request(c_clks->bus_handle, 1);
+		if (rc) {
+			pr_err("bus scale client enable failed, rc=%d\n", rc);
+			goto error_disable_mmss_clk;
+		}
 	}
 
 	return rc;
@@ -458,11 +468,18 @@
 	 */
 
 	m_clks = &clks[master_ndx];
+	rc = sde_power_resource_enable(m_clks->clks.phandle,
+			m_clks->clks.dsi_core_client, true);
+
+	if (rc) {
+		pr_err("Power resource enable failed, rc=%d\n", rc);
+		goto error;
+	}
 
 	rc = dsi_core_clk_start(m_clks);
 	if (rc) {
 		pr_err("failed to turn on master clocks, rc=%d\n", rc);
-		goto error;
+		goto error_disable_master_resource;
 	}
 
 	/* Turn on rest of the core clocks */
@@ -471,15 +488,28 @@
 		if (!clk || (clk == m_clks))
 			continue;
 
+		rc = sde_power_resource_enable(clk->clks.phandle,
+				clk->clks.dsi_core_client, true);
+		if (rc) {
+			pr_err("Power resource enable failed, rc=%d\n", rc);
+			goto error_disable_master;
+		}
+
 		rc = dsi_core_clk_start(clk);
 		if (rc) {
 			pr_err("failed to turn on clocks, rc=%d\n", rc);
+			(void)sde_power_resource_enable(clk->clks.phandle,
+					clk->clks.dsi_core_client, false);
 			goto error_disable_master;
 		}
 	}
 	return rc;
 error_disable_master:
 	(void)dsi_core_clk_stop(m_clks);
+
+error_disable_master_resource:
+	(void)sde_power_resource_enable(m_clks->clks.phandle,
+				m_clks->clks.dsi_core_client, false);
 error:
 	return rc;
 }
@@ -547,14 +577,30 @@
 			continue;
 
 		rc = dsi_core_clk_stop(clk);
-		if (rc)
-			pr_err("failed to turn off clocks, rc=%d\n", rc);
+		if (rc) {
+			pr_debug("failed to turn off clocks, rc=%d\n", rc);
+			goto error;
+		}
+
+		rc = sde_power_resource_enable(clk->clks.phandle,
+				clk->clks.dsi_core_client, false);
+		if (rc) {
+			pr_err("Power resource disable failed: %d\n", rc);
+			goto error;
+		}
 	}
 
 	rc = dsi_core_clk_stop(m_clks);
-	if (rc)
+	if (rc) {
 		pr_err("failed to turn off master clocks, rc=%d\n", rc);
+		goto error;
+	}
 
+	rc = sde_power_resource_enable(m_clks->clks.phandle,
+				m_clks->clks.dsi_core_client, false);
+	if (rc)
+		pr_err("Power resource disable failed: %d\n", rc);
+error:
 	return rc;
 }
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index cd851bc..5df48c3 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -490,30 +490,26 @@
 
 	core->mdp_core_clk = devm_clk_get(&pdev->dev, "mdp_core_clk");
 	if (IS_ERR(core->mdp_core_clk)) {
-		rc = PTR_ERR(core->mdp_core_clk);
-		pr_err("failed to get mdp_core_clk, rc=%d\n", rc);
-		goto fail;
+		core->mdp_core_clk = NULL;
+		pr_debug("failed to get mdp_core_clk, rc=%d\n", rc);
 	}
 
 	core->iface_clk = devm_clk_get(&pdev->dev, "iface_clk");
 	if (IS_ERR(core->iface_clk)) {
-		rc = PTR_ERR(core->iface_clk);
-		pr_err("failed to get iface_clk, rc=%d\n", rc);
-		goto fail;
+		core->iface_clk = NULL;
+		pr_debug("failed to get iface_clk, rc=%d\n", rc);
 	}
 
 	core->core_mmss_clk = devm_clk_get(&pdev->dev, "core_mmss_clk");
 	if (IS_ERR(core->core_mmss_clk)) {
-		rc = PTR_ERR(core->core_mmss_clk);
-		pr_err("failed to get core_mmss_clk, rc=%d\n", rc);
-		goto fail;
+		core->core_mmss_clk = NULL;
+		pr_debug("failed to get core_mmss_clk, rc=%d\n", rc);
 	}
 
 	core->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
 	if (IS_ERR(core->bus_clk)) {
-		rc = PTR_ERR(core->bus_clk);
-		pr_err("failed to get bus_clk, rc=%d\n", rc);
-		goto fail;
+		core->bus_clk = NULL;
+		pr_debug("failed to get bus_clk, rc=%d\n", rc);
 	}
 
 	core->mnoc_clk = devm_clk_get(&pdev->dev, "mnoc_clk");
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index ddf791c..bcaf428 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -2228,10 +2228,12 @@
 	struct dsi_display *display;
 	struct dsi_clk_info info;
 	struct clk_ctrl_cb clk_cb;
+	struct msm_drm_private *priv;
 	void *handle = NULL;
 	struct platform_device *pdev = to_platform_device(dev);
 	char *client1 = "dsi_clk_client";
 	char *client2 = "mdp_event_client";
+	char dsi_client_name[DSI_CLIENT_NAME_SIZE];
 	int i, rc = 0;
 
 	if (!dev || !pdev || !master) {
@@ -2247,6 +2249,7 @@
 				drm, display);
 		return -EINVAL;
 	}
+	priv = drm->dev_private;
 
 	mutex_lock(&display->display_lock);
 
@@ -2260,7 +2263,6 @@
 
 	for (i = 0; i < display->ctrl_count; i++) {
 		display_ctrl = &display->ctrl[i];
-
 		rc = dsi_ctrl_drv_init(display_ctrl->ctrl, display->root);
 		if (rc) {
 			pr_err("[%s] failed to initialize ctrl[%d], rc=%d\n",
@@ -2280,9 +2282,19 @@
 			sizeof(struct dsi_core_clk_info));
 		memcpy(&info.l_clks[i], &display_ctrl->ctrl->clk_info.link_clks,
 			sizeof(struct dsi_link_clk_info));
+		info.c_clks[i].phandle = &priv->phandle;
 		info.bus_handle[i] =
 			display_ctrl->ctrl->axi_bus_info.bus_handle;
 		info.ctrl_index[i] = display_ctrl->ctrl->cell_index;
+		snprintf(dsi_client_name, DSI_CLIENT_NAME_SIZE,
+						"dsi_core_client%u", i);
+		info.c_clks[i].dsi_core_client = sde_power_client_create(
+				info.c_clks[i].phandle, dsi_client_name);
+		if (IS_ERR_OR_NULL(info.c_clks[i].dsi_core_client)) {
+			pr_err("[%s] client creation failed for ctrl[%d]",
+					dsi_client_name, i);
+			goto error_ctrl_deinit;
+		}
 	}
 
 	info.pre_clkoff_cb = dsi_pre_clkoff_cb;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index 89bba96..cfbb14ec 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -29,7 +29,7 @@
 #include "dsi_panel.h"
 
 #define MAX_DSI_CTRLS_PER_DISPLAY             2
-
+#define DSI_CLIENT_NAME_SIZE		20
 /*
  * DSI Validate Mode modifiers
  * @DSI_VALIDATE_FLAG_ALLOW_ADJUST:	Allow mode validation to also do fixup
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 4da92ee..bda9c2d 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -1970,6 +1970,52 @@
 	return rc;
 }
 
+static int dsi_panel_parse_hdr_config(struct dsi_panel *panel,
+				     struct device_node *of_node)
+{
+
+	int rc = 0;
+	struct drm_panel_hdr_properties *hdr_prop;
+
+	hdr_prop = &panel->hdr_props;
+	hdr_prop->hdr_enabled = of_property_read_bool(of_node,
+		"qcom,mdss-dsi-panel-hdr-enabled");
+
+	if (hdr_prop->hdr_enabled) {
+		rc = of_property_read_u32_array(of_node,
+				"qcom,mdss-dsi-panel-hdr-color-primaries",
+				hdr_prop->display_primaries,
+				DISPLAY_PRIMARIES_MAX);
+		if (rc) {
+			pr_err("%s:%d, Unable to read color primaries,rc:%u",
+					__func__, __LINE__, rc);
+			hdr_prop->hdr_enabled = false;
+			return rc;
+		}
+
+		rc = of_property_read_u32(of_node,
+			"qcom,mdss-dsi-panel-peak-brightness",
+			&(hdr_prop->peak_brightness));
+		if (rc) {
+			pr_err("%s:%d, Unable to read hdr brightness, rc:%u",
+				__func__, __LINE__, rc);
+			hdr_prop->hdr_enabled = false;
+			return rc;
+		}
+
+		rc = of_property_read_u32(of_node,
+			"qcom,mdss-dsi-panel-blackness-level",
+			&(hdr_prop->blackness_level));
+		if (rc) {
+			pr_err("%s:%d, Unable to read hdr brightness, rc:%u",
+				__func__, __LINE__, rc);
+			hdr_prop->hdr_enabled = false;
+			return rc;
+		}
+	}
+	return 0;
+}
+
 struct dsi_panel *dsi_panel_get(struct device *parent,
 				struct device_node *of_node)
 {
@@ -2071,6 +2117,10 @@
 	if (rc)
 		pr_err("failed to parse panel jitter config, rc=%d\n", rc);
 
+	rc = dsi_panel_parse_hdr_config(panel, of_node);
+	if (rc)
+		pr_err("failed to parse hdr config, rc=%d\n", rc);
+
 	panel->panel_of_node = of_node;
 	drm_panel_init(&panel->drm_panel);
 	mutex_init(&panel->panel_lock);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index ab30e16..57226ba 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -22,6 +22,7 @@
 #include <linux/leds.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_mipi_dsi.h>
+#include <drm/msm_drm.h>
 
 #include "dsi_defs.h"
 #include "dsi_ctrl_hw.h"
@@ -173,6 +174,7 @@
 	struct dsi_backlight_config bl_config;
 	struct dsi_panel_reset_config reset_config;
 	struct dsi_pinctrl_info pinctrl;
+	struct drm_panel_hdr_properties hdr_props;
 
 	bool lp11_init;
 	bool ulps_enabled;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index aa11a36..9d2e95b 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -306,11 +306,8 @@
 
 	sde_dbg_destroy();
 
-	sde_power_client_destroy(&priv->phandle, priv->pclient);
-	sde_power_resource_deinit(pdev, &priv->phandle);
-
 	component_unbind_all(dev, ddev);
-
+	sde_power_client_destroy(&priv->phandle, priv->pclient);
 	sde_power_resource_deinit(pdev, &priv->phandle);
 
 	msm_mdss_destroy(ddev);
@@ -493,20 +490,20 @@
 	ret = sde_power_resource_init(pdev, &priv->phandle);
 	if (ret) {
 		pr_err("sde power resource init failed\n");
-		goto fail;
+		goto power_init_fail;
 	}
 
 	priv->pclient = sde_power_client_create(&priv->phandle, "sde");
 	if (IS_ERR_OR_NULL(priv->pclient)) {
 		pr_err("sde power client create failed\n");
 		ret = -EINVAL;
-		goto fail;
+		goto power_client_fail;
 	}
 
 	/* Bind all our sub-components: */
 	ret = msm_component_bind_all(dev, ddev);
 	if (ret)
-		return ret;
+		goto bind_fail;
 
 	ret = msm_init_vram(ddev);
 	if (ret)
@@ -636,6 +633,12 @@
 fail:
 	msm_drm_uninit(dev);
 	return ret;
+bind_fail:
+	sde_power_client_destroy(&priv->phandle, priv->pclient);
+power_client_fail:
+	sde_power_resource_deinit(pdev, &priv->phandle);
+power_init_fail:
+	msm_mdss_destroy(ddev);
 mdss_init_fail:
 	kfree(priv);
 priv_alloc_fail:
@@ -1062,6 +1065,262 @@
 	return ret;
 }
 
+static int msm_drm_object_supports_event(struct drm_device *dev,
+		struct drm_msm_event_req *req)
+{
+	int ret = -EINVAL;
+	struct drm_mode_object *arg_obj;
+
+	arg_obj = drm_mode_object_find(dev, req->object_id, req->object_type);
+	if (!arg_obj)
+		return -ENOENT;
+
+	switch (arg_obj->type) {
+	case DRM_MODE_OBJECT_CRTC:
+	case DRM_MODE_OBJECT_CONNECTOR:
+		ret = 0;
+		break;
+	default:
+		ret = -EOPNOTSUPP;
+		break;
+	}
+
+	return ret;
+}
+
+static int msm_register_event(struct drm_device *dev,
+	struct drm_msm_event_req *req, struct drm_file *file, bool en)
+{
+	int ret = -EINVAL;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_kms *kms = priv->kms;
+	struct drm_mode_object *arg_obj;
+
+	arg_obj = drm_mode_object_find(dev, req->object_id, req->object_type);
+	if (!arg_obj)
+		return -ENOENT;
+
+	ret = kms->funcs->register_events(kms, arg_obj, req->event, en);
+	return ret;
+}
+
+static int msm_event_client_count(struct drm_device *dev,
+		struct drm_msm_event_req *req_event, bool locked)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	unsigned long flag = 0;
+	struct msm_drm_event *node;
+	int count = 0;
+
+	if (!locked)
+		spin_lock_irqsave(&dev->event_lock, flag);
+	list_for_each_entry(node, &priv->client_event_list, base.link) {
+		if (node->event.type == req_event->event &&
+			node->info.object_id == req_event->object_id)
+			count++;
+	}
+	if (!locked)
+		spin_unlock_irqrestore(&dev->event_lock, flag);
+
+	return count;
+}
+
+static int msm_ioctl_register_event(struct drm_device *dev, void *data,
+				    struct drm_file *file)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct drm_msm_event_req *req_event = data;
+	struct msm_drm_event *client, *node;
+	unsigned long flag = 0;
+	bool dup_request = false;
+	int ret = 0, count = 0;
+
+	ret = msm_drm_object_supports_event(dev, req_event);
+	if (ret) {
+		DRM_ERROR("unsupported event %x object %x object id %d\n",
+			req_event->event, req_event->object_type,
+			req_event->object_id);
+		return ret;
+	}
+
+	spin_lock_irqsave(&dev->event_lock, flag);
+	list_for_each_entry(node, &priv->client_event_list, base.link) {
+		if (node->base.file_priv != file)
+			continue;
+		if (node->event.type == req_event->event &&
+			node->info.object_id == req_event->object_id) {
+			DRM_DEBUG("duplicate request for event %x obj id %d\n",
+				node->event.type, node->info.object_id);
+			dup_request = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flag);
+
+	if (dup_request)
+		return -EALREADY;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		return -ENOMEM;
+
+	client->base.file_priv = file;
+	client->base.pid = current->pid;
+	client->base.event = &client->event;
+	client->event.type = req_event->event;
+	memcpy(&client->info, req_event, sizeof(client->info));
+
+	/* Get the count of clients that have registered for event.
+	 * Event should be enabled for first client, for subsequent enable
+	 * calls add to client list and return.
+	 */
+	count = msm_event_client_count(dev, req_event, false);
+	/* Add current client to list */
+	spin_lock_irqsave(&dev->event_lock, flag);
+	list_add_tail(&client->base.link, &priv->client_event_list);
+	spin_unlock_irqrestore(&dev->event_lock, flag);
+
+	if (count)
+		return 0;
+
+	ret = msm_register_event(dev, req_event, file, true);
+	if (ret) {
+		DRM_ERROR("failed to enable event %x object %x object id %d\n",
+			req_event->event, req_event->object_type,
+			req_event->object_id);
+		spin_lock_irqsave(&dev->event_lock, flag);
+		list_del(&client->base.link);
+		spin_unlock_irqrestore(&dev->event_lock, flag);
+		kfree(client);
+	}
+	return ret;
+}
+
+static int msm_ioctl_deregister_event(struct drm_device *dev, void *data,
+				      struct drm_file *file)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct drm_msm_event_req *req_event = data;
+	struct msm_drm_event *client = NULL, *node, *temp;
+	unsigned long flag = 0;
+	int count = 0;
+	bool found = false;
+	int ret = 0;
+
+	ret = msm_drm_object_supports_event(dev, req_event);
+	if (ret) {
+		DRM_ERROR("unsupported event %x object %x object id %d\n",
+			req_event->event, req_event->object_type,
+			req_event->object_id);
+		return ret;
+	}
+
+	spin_lock_irqsave(&dev->event_lock, flag);
+	list_for_each_entry_safe(node, temp, &priv->client_event_list,
+			base.link) {
+		if (node->event.type == req_event->event &&
+		    node->info.object_id == req_event->object_id &&
+		    node->base.file_priv == file) {
+			client = node;
+			list_del(&client->base.link);
+			found = true;
+			kfree(client);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flag);
+
+	if (!found)
+		return -ENOENT;
+
+	count = msm_event_client_count(dev, req_event, false);
+	if (!count)
+		ret = msm_register_event(dev, req_event, file, false);
+
+	return ret;
+}
+
+void msm_send_crtc_notification(struct drm_crtc *crtc,
+				struct drm_event *event, u8 *payload)
+{
+	struct drm_device *dev = NULL;
+	struct msm_drm_private *priv = NULL;
+	unsigned long flags;
+	struct msm_drm_event *notify, *node;
+	int len = 0, ret;
+
+	if (!crtc || !event || !event->length || !payload) {
+		DRM_ERROR("err param crtc %pK event %pK len %d payload %pK\n",
+			crtc, event, ((event) ? (event->length) : -1),
+			payload);
+		return;
+	}
+	dev = crtc->dev;
+	priv = (dev) ? dev->dev_private : NULL;
+	if (!dev || !priv) {
+		DRM_ERROR("invalid dev %pK priv %pK\n", dev, priv);
+		return;
+	}
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	list_for_each_entry(node, &priv->client_event_list, base.link) {
+		if (node->event.type != event->type ||
+			crtc->base.id != node->info.object_id)
+			continue;
+		len = event->length + sizeof(struct drm_msm_event_resp);
+		if (node->base.file_priv->event_space < len) {
+			DRM_ERROR("Insufficient space to notify\n");
+			continue;
+		}
+		notify = kzalloc(len, GFP_ATOMIC);
+		if (!notify)
+			continue;
+		notify->base.file_priv = node->base.file_priv;
+		notify->base.event = &notify->event;
+		notify->base.pid = node->base.pid;
+		notify->event.type = node->event.type;
+		notify->event.length = len;
+		memcpy(&notify->info, &node->info, sizeof(notify->info));
+		memcpy(notify->data, payload, event->length);
+		ret = drm_event_reserve_init_locked(dev, node->base.file_priv,
+			&notify->base, &notify->event);
+		if (ret) {
+			kfree(notify);
+			continue;
+		}
+		drm_send_event_locked(dev, &notify->base);
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static int msm_release(struct inode *inode, struct file *filp)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct drm_minor *minor = file_priv->minor;
+	struct drm_device *dev = minor->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_drm_event *node, *temp;
+	u32 count;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	list_for_each_entry_safe(node, temp, &priv->client_event_list,
+			base.link) {
+		if (node->base.file_priv != file_priv)
+			continue;
+		list_del(&node->base.link);
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		count = msm_event_client_count(dev, &node->info, true);
+		if (!count)
+			msm_register_event(dev, &node->info, file_priv, false);
+		kfree(node);
+		spin_lock_irqsave(&dev->event_lock, flags);
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	return drm_release(inode, filp);
+}
+
 static const struct drm_ioctl_desc msm_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(MSM_GET_PARAM,    msm_ioctl_get_param,    DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(MSM_GEM_NEW,      msm_ioctl_gem_new,      DRM_AUTH|DRM_RENDER_ALLOW),
@@ -1072,6 +1331,10 @@
 	DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE,   msm_ioctl_wait_fence,   DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE,  msm_ioctl_gem_madvise,  DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(SDE_WB_CONFIG, sde_wb_config, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MSM_REGISTER_EVENT,  msm_ioctl_register_event,
+			  DRM_UNLOCKED|DRM_CONTROL_ALLOW),
+	DRM_IOCTL_DEF_DRV(MSM_DEREGISTER_EVENT,  msm_ioctl_deregister_event,
+			  DRM_UNLOCKED|DRM_CONTROL_ALLOW),
 };
 
 static const struct vm_operations_struct vm_ops = {
@@ -1083,7 +1346,7 @@
 static const struct file_operations fops = {
 	.owner              = THIS_MODULE,
 	.open               = drm_open,
-	.release            = drm_release,
+	.release            = msm_release,
 	.unlocked_ioctl     = drm_ioctl,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl       = drm_compat_ioctl,
@@ -1133,7 +1396,7 @@
 	.debugfs_cleanup    = msm_debugfs_cleanup,
 #endif
 	.ioctls             = msm_ioctls,
-	.num_ioctls         = DRM_MSM_NUM_IOCTLS,
+	.num_ioctls         = ARRAY_SIZE(msm_ioctls),
 	.fops               = &fops,
 	.name               = "msm_drm",
 	.desc               = "MSM Snapdragon DRM",
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 499b57b..f2fccd7 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -89,6 +89,7 @@
 	PLANE_PROP_SKIN_COLOR,
 	PLANE_PROP_SKY_COLOR,
 	PLANE_PROP_FOLIAGE_COLOR,
+	PLANE_PROP_ROT_CAPS_V1,
 
 	/* # of blob properties */
 	PLANE_PROP_BLOBCOUNT,
@@ -105,6 +106,10 @@
 	PLANE_PROP_VALUE_ADJUST,
 	PLANE_PROP_CONTRAST_ADJUST,
 	PLANE_PROP_EXCL_RECT_V1,
+	PLANE_PROP_ROT_DST_X,
+	PLANE_PROP_ROT_DST_Y,
+	PLANE_PROP_ROT_DST_W,
+	PLANE_PROP_ROT_DST_H,
 
 	/* enum/bitmask properties */
 	PLANE_PROP_ROTATION,
@@ -129,6 +134,10 @@
 	CRTC_PROP_CORE_CLK,
 	CRTC_PROP_CORE_AB,
 	CRTC_PROP_CORE_IB,
+	CRTC_PROP_MEM_AB,
+	CRTC_PROP_MEM_IB,
+	CRTC_PROP_ROT_PREFILL_BW,
+	CRTC_PROP_ROT_CLK,
 
 	/* total # of properties */
 	CRTC_PROP_COUNT
@@ -137,6 +146,7 @@
 enum msm_mdp_conn_property {
 	/* blob properties, always put these first */
 	CONNECTOR_PROP_SDE_INFO,
+	CONNECTOR_PROP_HDR_INFO,
 
 	/* # of blob properties */
 	CONNECTOR_PROP_BLOBCOUNT,
@@ -364,6 +374,7 @@
 struct msm_drm_event {
 	struct drm_pending_event base;
 	struct drm_event event;
+	struct drm_msm_event_req info;
 	u8 data[];
 };
 
@@ -613,6 +624,15 @@
 	MSM_DSI_CMD_ENCODER_ID = 1,
 	MSM_DSI_ENCODER_NUM = 2
 };
+
+/* *
+ * msm_send_crtc_notification - notify user-space clients of crtc events.
+ * @crtc: crtc that is generating the event.
+ * @event: event that needs to be notified.
+ * @payload: payload for the event.
+ */
+void msm_send_crtc_notification(struct drm_crtc *crtc,
+		struct drm_event *event, u8 *payload);
 #ifdef CONFIG_DRM_MSM_DSI
 void __init msm_dsi_register(void);
 void __exit msm_dsi_unregister(void);
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 4ebbc58..aa1b090 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -83,6 +83,8 @@
 	void (*preclose)(struct msm_kms *kms, struct drm_file *file);
 	void (*postclose)(struct msm_kms *kms, struct drm_file *file);
 	void (*lastclose)(struct msm_kms *kms);
+	int (*register_events)(struct msm_kms *kms,
+			struct drm_mode_object *obj, u32 event, bool en);
 	/* cleanup: */
 	void (*destroy)(struct msm_kms *kms);
 };
diff --git a/drivers/gpu/drm/msm/sde/sde_ad4.h b/drivers/gpu/drm/msm/sde/sde_ad4.h
new file mode 100644
index 0000000..4a664a8
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_ad4.h
@@ -0,0 +1,96 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SDE_AD4_H_
+#define _SDE_AD4_H_
+
+#include <drm/drm_mode.h>
+#include <drm/drm_property.h>
+#include "sde_hw_dspp.h"
+
+/**
+ * enum ad4_modes - ad4 modes supported by driver
+ */
+enum ad4_modes {
+	AD4_OFF,
+	AD4_AUTO_STRENGTH,
+	AD4_CALIBRATION,
+	AD4_MANUAL,
+};
+
+/**
+ * struct drm_prop_enum_list - drm structure for creating enum property and
+ *                             enumerating values
+ */
+static const struct drm_prop_enum_list ad4_modes[] = {
+	{AD4_OFF, "off"},
+	{AD4_AUTO_STRENGTH, "auto_strength_mode"},
+	{AD4_CALIBRATION, "calibration_mode"},
+	{AD4_MANUAL, "manual_mode"},
+};
+
+/**
+ * enum ad_property - properties that can be set for ad
+ */
+enum ad_property {
+	AD_MODE,
+	AD_INIT,
+	AD_CFG,
+	AD_INPUT,
+	AD_SUSPEND,
+	AD_ASSERTIVE,
+	AD_BACKLIGHT,
+	AD_PROPMAX,
+};
+
+/**
+ * enum ad_intr_resp_property - ad4 interrupt response enum
+ */
+enum ad_intr_resp_property {
+	AD4_BACKLIGHT,
+	AD4_RESPMAX,
+};
+
+/**
+ * struct sde_ad_hw_cfg - structure for setting the ad properties
+ * @prop: enum of ad property
+ * @hw_cfg: payload for the prop being set.
+ */
+struct sde_ad_hw_cfg {
+	enum ad_property prop;
+	struct sde_hw_cp_cfg *hw_cfg;
+};
+
+/**
+ * sde_validate_dspp_ad4() - api to validate if ad property is allowed for
+ *                           the display with allocated dspp/mixers.
+ * @dspp: pointer to dspp info structure.
+ * @prop: pointer to u32 pointing to ad property
+ */
+int sde_validate_dspp_ad4(struct sde_hw_dspp *dspp, u32 *prop);
+
+/**
+ * sde_setup_dspp_ad4 - api to apply the ad property, sde_validate_dspp_ad4
+ *                      should be called before call this function
+ * @dspp: pointer to dspp info structure.
+ * @cfg: pointer to struct sde_ad_hw_cfg
+ */
+void sde_setup_dspp_ad4(struct sde_hw_dspp *dspp, void *cfg);
+
+/**
+ * sde_read_intr_resp_ad4 - api to get ad4 interrupt status for event
+ * @dspp: pointer to dspp object
+ * @event: event for which response is needed
+ * @resp: value of event requested
+ */
+void sde_read_intr_resp_ad4(struct sde_hw_dspp *dspp, u32 event, u32 *resp);
+
+#endif /* _SDE_AD4_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index f7fcd01..79b39bd 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -21,6 +21,9 @@
 #include "sde_crtc.h"
 #include "sde_hw_dspp.h"
 #include "sde_hw_lm.h"
+#include "sde_ad4.h"
+#include "sde_hw_interrupts.h"
+#include "sde_core_irq.h"
 
 struct sde_cp_node {
 	u32 property_id;
@@ -34,6 +37,7 @@
 	struct list_head dirty_list;
 	bool is_dspp_feature;
 	u32 prop_blob_sz;
+	struct sde_irq_callback *irq;
 };
 
 struct sde_cp_prop_attach {
@@ -60,6 +64,14 @@
 
 static dspp_prop_install_func_t dspp_prop_install_func[SDE_DSPP_MAX];
 
+static void sde_cp_update_list(struct sde_cp_node *prop_node,
+		struct sde_crtc *crtc, bool dirty_list);
+
+static int sde_cp_ad_validate_prop(struct sde_cp_node *prop_node,
+		struct sde_crtc *crtc);
+
+static void sde_cp_notify_ad_event(struct drm_crtc *crtc_drm, void *arg);
+
 #define setup_dspp_prop_install_funcs(func) \
 do { \
 	func[SDE_DSPP_PCC] = dspp_pcc_install_property; \
@@ -96,6 +108,12 @@
 	SDE_CP_CRTC_DSPP_HIST,
 	SDE_CP_CRTC_DSPP_AD,
 	SDE_CP_CRTC_DSPP_VLUT,
+	SDE_CP_CRTC_DSPP_AD_MODE,
+	SDE_CP_CRTC_DSPP_AD_INIT,
+	SDE_CP_CRTC_DSPP_AD_CFG,
+	SDE_CP_CRTC_DSPP_AD_INPUT,
+	SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS,
+	SDE_CP_CRTC_DSPP_AD_BACKLIGHT,
 	SDE_CP_CRTC_DSPP_MAX,
 	/* DSPP features end */
 
@@ -136,9 +154,10 @@
 	} else if (prop_node->prop_flags & DRM_MODE_PROP_RANGE) {
 		/* Check if local blob is Set */
 		if (!blob) {
-			hw_cfg->len = sizeof(prop_node->prop_val);
-			if (prop_node->prop_val)
+			if (prop_node->prop_val) {
+				hw_cfg->len = sizeof(prop_node->prop_val);
 				hw_cfg->payload = &prop_node->prop_val;
+			}
 		} else {
 			hw_cfg->len = (prop_node->prop_val) ? blob->length :
 					0;
@@ -147,6 +166,10 @@
 		}
 		if (prop_node->prop_val)
 			*feature_enabled = true;
+	} else if (prop_node->prop_flags & DRM_MODE_PROP_ENUM) {
+		*feature_enabled = (prop_node->prop_val != 0);
+		hw_cfg->len = sizeof(prop_node->prop_val);
+		hw_cfg->payload = &prop_node->prop_val;
 	} else {
 		DRM_ERROR("property type is not supported\n");
 	}
@@ -178,7 +201,7 @@
 		}
 	}
 
-	if (!found || prop_node->prop_flags & DRM_MODE_PROP_BLOB) {
+	if (!found || !(prop_node->prop_flags & DRM_MODE_PROP_RANGE)) {
 		DRM_ERROR("local blob create failed prop found %d flags %d\n",
 		       found, prop_node->prop_flags);
 		return ret;
@@ -232,10 +255,14 @@
 {
 	int ret = -EINVAL;
 
-	if (property->flags & DRM_MODE_PROP_BLOB)
+	if (property->flags & DRM_MODE_PROP_BLOB) {
 		ret = sde_cp_disable_crtc_blob_property(prop_node);
-	else if (property->flags & DRM_MODE_PROP_RANGE)
+	} else if (property->flags & DRM_MODE_PROP_RANGE) {
 		ret = sde_cp_handle_range_property(prop_node, 0);
+	} else if (property->flags & DRM_MODE_PROP_ENUM) {
+		ret = 0;
+		prop_node->prop_val = 0;
+	}
 	return ret;
 }
 
@@ -275,10 +302,14 @@
 {
 	int ret = -EINVAL;
 
-	if (property->flags & DRM_MODE_PROP_BLOB)
+	if (property->flags & DRM_MODE_PROP_BLOB) {
 		ret = sde_cp_enable_crtc_blob_property(crtc, prop_node, val);
-	else if (property->flags & DRM_MODE_PROP_RANGE)
+	} else if (property->flags & DRM_MODE_PROP_RANGE) {
 		ret = sde_cp_handle_range_property(prop_node, val);
+	} else if (property->flags & DRM_MODE_PROP_ENUM) {
+		ret = 0;
+		prop_node->prop_val = val;
+	}
 	return ret;
 }
 
@@ -331,6 +362,8 @@
 	INIT_LIST_HEAD(&sde_crtc->active_list);
 	INIT_LIST_HEAD(&sde_crtc->dirty_list);
 	INIT_LIST_HEAD(&sde_crtc->feature_list);
+	INIT_LIST_HEAD(&sde_crtc->ad_dirty);
+	INIT_LIST_HEAD(&sde_crtc->ad_active);
 }
 
 static void sde_cp_crtc_install_immutable_property(struct drm_crtc *crtc,
@@ -357,8 +390,8 @@
 	prop = priv->cp_property[feature];
 
 	if (!prop) {
-		prop = drm_property_create(crtc->dev, DRM_MODE_PROP_IMMUTABLE,
-					   name, 0);
+		prop = drm_property_create_range(crtc->dev,
+				DRM_MODE_PROP_IMMUTABLE, name, 0, 1);
 		if (!prop) {
 			DRM_ERROR("property create failed: %s\n", name);
 			kfree(prop_node);
@@ -412,7 +445,7 @@
 	sde_cp_crtc_prop_attach(&prop_attach);
 }
 
-static void sde_cp_crtc_create_blob_property(struct drm_crtc *crtc, char *name,
+static void sde_cp_crtc_install_blob_property(struct drm_crtc *crtc, char *name,
 			u32 feature, u32 blob_sz)
 {
 	struct drm_property *prop;
@@ -452,6 +485,46 @@
 	sde_cp_crtc_prop_attach(&prop_attach);
 }
 
+static void sde_cp_crtc_install_enum_property(struct drm_crtc *crtc,
+	u32 feature, const struct drm_prop_enum_list *list, u32 enum_sz,
+	char *name)
+{
+	struct drm_property *prop;
+	struct sde_cp_node *prop_node = NULL;
+	struct msm_drm_private *priv;
+	uint64_t val = 0;
+	struct sde_cp_prop_attach prop_attach;
+
+	if (feature >=  SDE_CP_CRTC_MAX_FEATURES) {
+		DRM_ERROR("invalid feature %d max %d\n", feature,
+		       SDE_CP_CRTC_MAX_FEATURES);
+		return;
+	}
+
+	prop_node = kzalloc(sizeof(*prop_node), GFP_KERNEL);
+	if (!prop_node)
+		return;
+
+	priv = crtc->dev->dev_private;
+	prop = priv->cp_property[feature];
+
+	if (!prop) {
+		prop = drm_property_create_enum(crtc->dev, 0, name,
+			list, enum_sz);
+		if (!prop) {
+			DRM_ERROR("property create failed: %s\n", name);
+			kfree(prop_node);
+			return;
+		}
+		priv->cp_property[feature] = prop;
+	}
+
+	INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node,
+				feature, val);
+
+	sde_cp_crtc_prop_attach(&prop_attach);
+}
+
 static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
 				   struct sde_crtc *sde_crtc, u32 last_feature)
 {
@@ -462,13 +535,18 @@
 	int i = 0;
 	bool feature_enabled = false;
 	int ret = 0;
+	struct sde_ad_hw_cfg ad_cfg;
 
 	sde_cp_get_hw_payload(prop_node, &hw_cfg, &feature_enabled);
+	hw_cfg.num_of_mixers = sde_crtc->num_mixers;
+	hw_cfg.displayh = sde_crtc->base.mode.hdisplay;
+	hw_cfg.displayv = sde_crtc->base.mode.vdisplay;
 
 	for (i = 0; i < num_mixers && !ret; i++) {
 		hw_lm = sde_crtc->mixers[i].hw_lm;
 		hw_dspp = sde_crtc->mixers[i].hw_dspp;
 		hw_cfg.ctl = sde_crtc->mixers[i].hw_ctl;
+		hw_cfg.mixer_info = hw_lm;
 		if (i == num_mixers - 1)
 			hw_cfg.last_feature = last_feature;
 		else
@@ -558,6 +636,60 @@
 			}
 			hw_lm->ops.setup_gc(hw_lm, &hw_cfg);
 			break;
+		case SDE_CP_CRTC_DSPP_AD_MODE:
+			if (!hw_dspp || !hw_dspp->ops.setup_ad) {
+				ret = -EINVAL;
+				continue;
+			}
+			ad_cfg.prop = AD_MODE;
+			ad_cfg.hw_cfg = &hw_cfg;
+			hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_AD_INIT:
+			if (!hw_dspp || !hw_dspp->ops.setup_ad) {
+				ret = -EINVAL;
+				continue;
+			}
+			ad_cfg.prop = AD_INIT;
+			ad_cfg.hw_cfg = &hw_cfg;
+			hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_AD_CFG:
+			if (!hw_dspp || !hw_dspp->ops.setup_ad) {
+				ret = -EINVAL;
+				continue;
+			}
+			ad_cfg.prop = AD_CFG;
+			ad_cfg.hw_cfg = &hw_cfg;
+			hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_AD_INPUT:
+			if (!hw_dspp || !hw_dspp->ops.setup_ad) {
+				ret = -EINVAL;
+				continue;
+			}
+			ad_cfg.prop = AD_INPUT;
+			ad_cfg.hw_cfg = &hw_cfg;
+			hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS:
+			if (!hw_dspp || !hw_dspp->ops.setup_ad) {
+				ret = -EINVAL;
+				continue;
+			}
+			ad_cfg.prop = AD_ASSERTIVE;
+			ad_cfg.hw_cfg = &hw_cfg;
+			hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_AD_BACKLIGHT:
+			if (!hw_dspp || !hw_dspp->ops.setup_ad) {
+				ret = -EINVAL;
+				continue;
+			}
+			ad_cfg.prop = AD_BACKLIGHT;
+			ad_cfg.hw_cfg = &hw_cfg;
+			hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+			break;
 		default:
 			ret = -EINVAL;
 			break;
@@ -574,7 +706,7 @@
 	if (feature_enabled) {
 		DRM_DEBUG_DRIVER("Add feature to active list %d\n",
 				 prop_node->property_id);
-		list_add_tail(&prop_node->active_list, &sde_crtc->active_list);
+		sde_cp_update_list(prop_node, sde_crtc, false);
 	} else {
 		DRM_DEBUG_DRIVER("remove feature from active list %d\n",
 			 prop_node->property_id);
@@ -612,10 +744,17 @@
 		return;
 	}
 
-	/* Check if dirty list is empty for early return */
-	if (list_empty(&sde_crtc->dirty_list)) {
-		DRM_DEBUG_DRIVER("Dirty list is empty\n");
-		return;
+	/* Check if dirty lists are empty and ad features are disabled for
+	 * early return. If ad properties are active then we need to issue
+	 * dspp flush.
+	 **/
+	if (list_empty(&sde_crtc->dirty_list) &&
+		list_empty(&sde_crtc->ad_dirty)) {
+		if (list_empty(&sde_crtc->ad_active)) {
+			DRM_DEBUG_DRIVER("Dirty list is empty\n");
+			return;
+		}
+		set_dspp_flush = true;
 	}
 
 	num_of_features = 0;
@@ -623,7 +762,7 @@
 		num_of_features++;
 
 	list_for_each_entry_safe(prop_node, n, &sde_crtc->dirty_list,
-							dirty_list) {
+				dirty_list) {
 		num_of_features--;
 		sde_cp_crtc_setfeature(prop_node, sde_crtc,
 				(num_of_features == 0));
@@ -634,6 +773,18 @@
 			set_lm_flush = true;
 	}
 
+	num_of_features = 0;
+	list_for_each_entry(prop_node, &sde_crtc->ad_dirty, dirty_list)
+		num_of_features++;
+
+	list_for_each_entry_safe(prop_node, n, &sde_crtc->ad_dirty,
+				dirty_list) {
+		num_of_features--;
+		set_dspp_flush = true;
+		sde_cp_crtc_setfeature(prop_node, sde_crtc,
+				(num_of_features == 0));
+	}
+
 	for (i = 0; i < num_mixers; i++) {
 		ctl = sde_crtc->mixers[i].hw_ctl;
 		if (!ctl)
@@ -791,6 +942,13 @@
 			sde_crtc->num_mixers);
 		return -EINVAL;
 	}
+
+	ret = sde_cp_ad_validate_prop(prop_node, sde_crtc);
+	if (ret) {
+		DRM_ERROR("ad property validation failed ret %d\n", ret);
+		return ret;
+	}
+
 	/* remove the property from dirty list */
 	list_del_init(&prop_node->dirty_list);
 
@@ -804,7 +962,7 @@
 		/* remove the property from active list */
 		list_del_init(&prop_node->active_list);
 		/* Mark the feature as dirty */
-		list_add_tail(&prop_node->dirty_list, &sde_crtc->dirty_list);
+		sde_cp_update_list(prop_node, sde_crtc, true);
 	}
 	return ret;
 }
@@ -888,7 +1046,7 @@
 
 	list_for_each_entry_safe(prop_node, n, &sde_crtc->active_list,
 				 active_list) {
-		list_add_tail(&prop_node->dirty_list, &sde_crtc->dirty_list);
+		sde_cp_update_list(prop_node, sde_crtc, true);
 		list_del_init(&prop_node->active_list);
 	}
 }
@@ -913,7 +1071,7 @@
 		"SDE_DSPP_PCC_V", version);
 	switch (version) {
 	case 1:
-		sde_cp_crtc_create_blob_property(crtc, feature_name,
+		sde_cp_crtc_install_blob_property(crtc, feature_name,
 			SDE_CP_CRTC_DSPP_PCC, sizeof(struct drm_msm_pcc));
 		break;
 	default:
@@ -988,6 +1146,33 @@
 		sde_cp_crtc_install_immutable_property(crtc,
 			feature_name, SDE_CP_CRTC_DSPP_AD);
 		break;
+	case 4:
+		sde_cp_crtc_install_immutable_property(crtc,
+			feature_name, SDE_CP_CRTC_DSPP_AD);
+
+		sde_cp_crtc_install_enum_property(crtc,
+			SDE_CP_CRTC_DSPP_AD_MODE, ad4_modes,
+			ARRAY_SIZE(ad4_modes), "SDE_DSPP_AD_V4_MODE");
+
+		sde_cp_crtc_install_range_property(crtc, "SDE_DSPP_AD_V4_INIT",
+			SDE_CP_CRTC_DSPP_AD_INIT, 0, U64_MAX, 0);
+		sde_cp_create_local_blob(crtc, SDE_CP_CRTC_DSPP_AD_INIT,
+			sizeof(struct drm_msm_ad4_init));
+
+		sde_cp_crtc_install_range_property(crtc, "SDE_DSPP_AD_V4_CFG",
+			SDE_CP_CRTC_DSPP_AD_CFG, 0, U64_MAX, 0);
+		sde_cp_create_local_blob(crtc, SDE_CP_CRTC_DSPP_AD_CFG,
+			sizeof(struct drm_msm_ad4_cfg));
+		sde_cp_crtc_install_range_property(crtc,
+			"SDE_DSPP_AD_V4_ASSERTIVNESS",
+			SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS, 0, (BIT(8) - 1), 0);
+		sde_cp_crtc_install_range_property(crtc, "SDE_DSPP_AD_V4_INPUT",
+			SDE_CP_CRTC_DSPP_AD_INPUT, 0, U16_MAX, 0);
+		sde_cp_crtc_install_range_property(crtc,
+				"SDE_DSPP_AD_V4_BACKLIGHT",
+			SDE_CP_CRTC_DSPP_AD_BACKLIGHT, 0, (BIT(16) - 1),
+			0);
+		break;
 	default:
 		DRM_ERROR("version %d not supported\n", version);
 		break;
@@ -1008,7 +1193,7 @@
 		 "SDE_LM_GC_V", version);
 	switch (version) {
 	case 1:
-		sde_cp_crtc_create_blob_property(crtc, feature_name,
+		sde_cp_crtc_install_blob_property(crtc, feature_name,
 			SDE_CP_CRTC_LM_GC, sizeof(struct drm_msm_pgc_lut));
 		break;
 	default:
@@ -1032,7 +1217,7 @@
 		"SDE_DSPP_GAMUT_V", version);
 	switch (version) {
 	case 4:
-		sde_cp_crtc_create_blob_property(crtc, feature_name,
+		sde_cp_crtc_install_blob_property(crtc, feature_name,
 			SDE_CP_CRTC_DSPP_GAMUT,
 			sizeof(struct drm_msm_3d_gamut));
 		break;
@@ -1057,7 +1242,7 @@
 		"SDE_DSPP_GC_V", version);
 	switch (version) {
 	case 1:
-		sde_cp_crtc_create_blob_property(crtc, feature_name,
+		sde_cp_crtc_install_blob_property(crtc, feature_name,
 			SDE_CP_CRTC_DSPP_GC, sizeof(struct drm_msm_pgc_lut));
 		break;
 	default:
@@ -1065,3 +1250,193 @@
 		break;
 	}
 }
+
+static void sde_cp_update_list(struct sde_cp_node *prop_node,
+		struct sde_crtc *crtc, bool dirty_list)
+{
+	switch (prop_node->feature) {
+	case SDE_CP_CRTC_DSPP_AD_MODE:
+	case SDE_CP_CRTC_DSPP_AD_INIT:
+	case SDE_CP_CRTC_DSPP_AD_CFG:
+	case SDE_CP_CRTC_DSPP_AD_INPUT:
+	case SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS:
+	case SDE_CP_CRTC_DSPP_AD_BACKLIGHT:
+		if (dirty_list)
+			list_add_tail(&prop_node->dirty_list, &crtc->ad_dirty);
+		else
+			list_add_tail(&prop_node->active_list,
+					&crtc->ad_active);
+		break;
+	default:
+		/* color processing properties handle here */
+		if (dirty_list)
+			list_add_tail(&prop_node->dirty_list,
+					&crtc->dirty_list);
+		else
+			list_add_tail(&prop_node->active_list,
+					&crtc->active_list);
+		break;
+	};
+}
+
+static int sde_cp_ad_validate_prop(struct sde_cp_node *prop_node,
+		struct sde_crtc *crtc)
+{
+	int i = 0, ret = 0;
+	u32 ad_prop;
+
+	for (i = 0; i < crtc->num_mixers && !ret; i++) {
+		if (!crtc->mixers[i].hw_dspp) {
+			ret = -EINVAL;
+			continue;
+		}
+		switch (prop_node->feature) {
+		case SDE_CP_CRTC_DSPP_AD_MODE:
+			ad_prop = AD_MODE;
+			break;
+		case SDE_CP_CRTC_DSPP_AD_INIT:
+			ad_prop = AD_INIT;
+			break;
+		case SDE_CP_CRTC_DSPP_AD_CFG:
+			ad_prop = AD_CFG;
+			break;
+		case SDE_CP_CRTC_DSPP_AD_INPUT:
+			ad_prop = AD_INPUT;
+			break;
+		case SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS:
+			ad_prop = AD_ASSERTIVE;
+			break;
+		case SDE_CP_CRTC_DSPP_AD_BACKLIGHT:
+			ad_prop = AD_BACKLIGHT;
+			break;
+		default:
+			/* Not an AD property */
+			return 0;
+		}
+		if (!crtc->mixers[i].hw_dspp->ops.validate_ad)
+			ret = -EINVAL;
+		else
+			ret = crtc->mixers[i].hw_dspp->ops.validate_ad(
+				crtc->mixers[i].hw_dspp, &ad_prop);
+	}
+	return ret;
+}
+
+static void sde_cp_ad_interrupt_cb(void *arg, int irq_idx)
+{
+	struct sde_crtc *crtc = arg;
+
+	sde_crtc_event_queue(&crtc->base, sde_cp_notify_ad_event, NULL);
+}
+
+static void sde_cp_notify_ad_event(struct drm_crtc *crtc_drm, void *arg)
+{
+	uint32_t bl = 0;
+	struct sde_hw_mixer *hw_lm = NULL;
+	struct sde_hw_dspp *hw_dspp = NULL;
+	u32 num_mixers;
+	struct sde_crtc *crtc;
+	struct drm_event event;
+	int i;
+
+	crtc = to_sde_crtc(crtc_drm);
+	num_mixers = crtc->num_mixers;
+	if (!num_mixers)
+		return;
+
+	for (i = 0; i < num_mixers; i++) {
+		hw_lm = crtc->mixers[i].hw_lm;
+		hw_dspp = crtc->mixers[i].hw_dspp;
+		if (!hw_lm->cfg.right_mixer)
+			break;
+	}
+
+	if (!hw_dspp)
+		return;
+
+	hw_dspp->ops.ad_read_intr_resp(hw_dspp, AD4_BACKLIGHT, &bl);
+	event.length = sizeof(u32);
+	event.type = DRM_EVENT_AD_BACKLIGHT;
+	msm_send_crtc_notification(&crtc->base, &event, (u8 *)&bl);
+}
+
+int sde_cp_ad_interrupt(struct drm_crtc *crtc_drm, bool en,
+	struct sde_irq_callback *ad_irq)
+{
+	struct sde_kms *kms = NULL;
+	u32 num_mixers;
+	struct sde_hw_mixer *hw_lm;
+	struct sde_hw_dspp *hw_dspp = NULL;
+	struct sde_crtc *crtc;
+	int i;
+	int irq_idx, ret;
+	struct sde_cp_node prop_node;
+
+	if (!crtc_drm || !ad_irq) {
+		DRM_ERROR("invalid crtc %pK irq %pK\n", crtc_drm, ad_irq);
+		return -EINVAL;
+	}
+
+	crtc = to_sde_crtc(crtc_drm);
+	if (!crtc) {
+		DRM_ERROR("invalid sde_crtc %pK\n", crtc);
+		return -EINVAL;
+	}
+
+	mutex_lock(&crtc->crtc_lock);
+	kms = get_kms(crtc_drm);
+	num_mixers = crtc->num_mixers;
+
+	memset(&prop_node, 0, sizeof(prop_node));
+	prop_node.feature = SDE_CP_CRTC_DSPP_AD_BACKLIGHT;
+	ret = sde_cp_ad_validate_prop(&prop_node, crtc);
+	if (ret) {
+		DRM_ERROR("Ad not supported ret %d\n", ret);
+		goto exit;
+	}
+
+	for (i = 0; i < num_mixers; i++) {
+		hw_lm = crtc->mixers[i].hw_lm;
+		hw_dspp = crtc->mixers[i].hw_dspp;
+		if (!hw_lm->cfg.right_mixer)
+			break;
+	}
+
+	if (!hw_dspp) {
+		DRM_ERROR("invalid dspp\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	irq_idx = sde_core_irq_idx_lookup(kms, SDE_IRQ_TYPE_AD4_BL_DONE,
+			hw_dspp->idx);
+	if (irq_idx < 0) {
+		DRM_ERROR("failed to get the irq idx ret %d\n", irq_idx);
+		ret = irq_idx;
+		goto exit;
+	}
+
+	if (!en) {
+		sde_core_irq_disable(kms, &irq_idx, 1);
+		sde_core_irq_unregister_callback(kms, irq_idx, ad_irq);
+		ret = 0;
+		goto exit;
+	}
+
+	INIT_LIST_HEAD(&ad_irq->list);
+	ad_irq->arg = crtc;
+	ad_irq->func = sde_cp_ad_interrupt_cb;
+	ret = sde_core_irq_register_callback(kms, irq_idx, ad_irq);
+	if (ret) {
+		DRM_ERROR("failed to register the callback ret %d\n", ret);
+		goto exit;
+	}
+	ret = sde_core_irq_enable(kms, &irq_idx, 1);
+	if (ret) {
+		DRM_ERROR("failed to enable irq ret %d\n", ret);
+		sde_core_irq_unregister_callback(kms, irq_idx, ad_irq);
+	}
+exit:
+	mutex_unlock(&crtc->crtc_lock);
+	return ret;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.h b/drivers/gpu/drm/msm/sde/sde_color_processing.h
index 9fa63f8..e78f690 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.h
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.h
@@ -15,6 +15,8 @@
 #define _SDE_COLOR_PROCESSING_H
 #include <drm/drm_crtc.h>
 
+struct sde_irq_callback;
+
 /*
  * PA MEMORY COLOR types
  * @MEMCOLOR_SKIN          Skin memory color type
@@ -92,4 +94,13 @@
  * @crtc: Pointer to crtc.
  */
 void sde_cp_crtc_resume(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_ad_interrupt: Api to enable/disable ad interrupt
+ * @crtc: Pointer to crtc.
+ * @en: Variable to enable/disable interrupt.
+ * @irq: Pointer to irq callback
+ */
+int sde_cp_ad_interrupt(struct drm_crtc *crtc, bool en,
+		struct sde_irq_callback *irq);
 #endif /*_SDE_COLOR_PROCESSING_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 92b7e5d..1f39180 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -17,6 +17,7 @@
 #include "sde_connector.h"
 #include <linux/backlight.h>
 #include "dsi_drm.h"
+#include "dsi_display.h"
 
 #define BL_NODE_NAME_SIZE 32
 
@@ -227,6 +228,8 @@
 
 	if (c_conn->blob_caps)
 		drm_property_unreference_blob(c_conn->blob_caps);
+	if (c_conn->blob_hdr)
+		drm_property_unreference_blob(c_conn->blob_hdr);
 	msm_property_destroy(&c_conn->property_info);
 
 	drm_connector_unregister(connector);
@@ -396,9 +399,12 @@
 		/* convert fb val to drm framebuffer and prepare it */
 		c_state->out_fb =
 			drm_framebuffer_lookup(connector->dev, val);
-		if (!c_state->out_fb) {
+		if (!c_state->out_fb && val) {
 			SDE_ERROR("failed to look up fb %lld\n", val);
 			rc = -EFAULT;
+		} else if (!c_state->out_fb && !val) {
+			SDE_DEBUG("cleared fb_id\n");
+			rc = 0;
 		} else {
 			msm_framebuffer_set_kmap(c_state->out_fb,
 					c_conn->fb_kmap);
@@ -666,6 +672,7 @@
 	struct sde_kms *sde_kms;
 	struct sde_kms_info *info;
 	struct sde_connector *c_conn = NULL;
+	struct dsi_display *dsi_display;
 	int rc;
 
 	if (!dev || !dev->dev_private || !encoder) {
@@ -781,6 +788,23 @@
 		kfree(info);
 	}
 
+	if (connector_type == DRM_MODE_CONNECTOR_DSI) {
+		dsi_display = (struct dsi_display *)(display);
+		if (dsi_display && dsi_display->panel &&
+			dsi_display->panel->hdr_props.hdr_enabled == true) {
+			msm_property_install_blob(&c_conn->property_info,
+				"hdr_properties",
+				DRM_MODE_PROP_IMMUTABLE,
+				CONNECTOR_PROP_HDR_INFO);
+
+			msm_property_set_blob(&c_conn->property_info,
+				&c_conn->blob_hdr,
+				&dsi_display->panel->hdr_props,
+				sizeof(dsi_display->panel->hdr_props),
+				CONNECTOR_PROP_HDR_INFO);
+		}
+	}
+
 	msm_property_install_range(&c_conn->property_info, "RETIRE_FENCE",
 			0x0, 0, INR_OPEN_MAX, 0, CONNECTOR_PROP_RETIRE_FENCE);
 
@@ -810,6 +834,8 @@
 error_destroy_property:
 	if (c_conn->blob_caps)
 		drm_property_unreference_blob(c_conn->blob_caps);
+	if (c_conn->blob_hdr)
+		drm_property_unreference_blob(c_conn->blob_hdr);
 	msm_property_destroy(&c_conn->property_info);
 error_cleanup_fence:
 	sde_fence_deinit(&c_conn->retire_fence);
@@ -820,3 +846,9 @@
 
 	return ERR_PTR(rc);
 }
+
+int sde_connector_register_custom_event(struct sde_kms *kms,
+		struct drm_connector *conn_drm, u32 event, bool val)
+{
+	return -EINVAL;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 8be359d..9d36851 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -177,6 +177,7 @@
  * @property_info: Private structure for generic property handling
  * @property_data: Array of private data for generic property handling
  * @blob_caps: Pointer to blob structure for 'capabilities' property
+ * @blob_hdr: Pointer to blob structure for 'hdr_properties' property
  * @fb_kmap: true if kernel mapping of framebuffer is requested
  * @event_table: Array of registered events
  * @event_lock: Lock object for event_table
@@ -200,6 +201,7 @@
 	struct msm_property_info property_info;
 	struct msm_property_data property_data[CONNECTOR_PROP_COUNT];
 	struct drm_property_blob *blob_caps;
+	struct drm_property_blob *blob_hdr;
 
 	bool fb_kmap;
 	struct sde_connector_evt event_table[SDE_CONN_EVENT_COUNT];
@@ -390,5 +392,16 @@
 void sde_connector_unregister_event(struct drm_connector *connector,
 		uint32_t event_idx);
 
+/**
+ * sde_connector_register_custom_event - register for async events
+ * @kms: Pointer to sde_kms
+ * @conn_drm: Pointer to drm connector object
+ * @event: Event for which request is being sent
+ * @en: Flag to enable/disable the event
+ * Returns: Zero on success
+ */
+int sde_connector_register_custom_event(struct sde_kms *kms,
+		struct drm_connector *conn_drm, u32 event, bool en);
+
 #endif /* _SDE_CONNECTOR_H_ */
 
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c
index 307c617..db2c515 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c
@@ -18,13 +18,13 @@
 #include <linux/sort.h>
 #include <linux/clk.h>
 #include <linux/bitmap.h>
+#include <linux/sde_rsc.h>
 
 #include "msm_prop.h"
 
 #include "sde_kms.h"
 #include "sde_trace.h"
 #include "sde_crtc.h"
-#include "sde_rsc.h"
 #include "sde_core_perf.h"
 
 static struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index a037250b..cec8792 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -37,6 +37,24 @@
 #include "sde_power_handle.h"
 #include "sde_core_perf.h"
 
+struct sde_crtc_irq_info {
+	struct sde_irq_callback irq;
+	u32 event;
+	int (*func)(struct drm_crtc *crtc, bool en,
+			struct sde_irq_callback *irq);
+	struct list_head list;
+};
+
+struct sde_crtc_custom_events {
+	u32 event;
+	int (*func)(struct drm_crtc *crtc, bool en,
+			struct sde_irq_callback *irq);
+};
+
+static struct sde_crtc_custom_events custom_events[] = {
+	{DRM_EVENT_AD_BACKLIGHT, sde_cp_ad_interrupt}
+};
+
 /* default input fence timeout, in ms */
 #define SDE_CRTC_INPUT_FENCE_TIMEOUT    2000
 
@@ -51,6 +69,8 @@
 #define LEFT_MIXER 0
 #define RIGHT_MIXER 1
 
+#define MISR_BUFF_SIZE			256
+
 static inline struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
 {
 	struct msm_drm_private *priv;
@@ -68,6 +88,403 @@
 	return to_sde_kms(priv->kms);
 }
 
+static inline int _sde_crtc_power_enable(struct sde_crtc *sde_crtc, bool enable)
+{
+	struct drm_crtc *crtc;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!sde_crtc) {
+		SDE_ERROR("invalid sde crtc\n");
+		return -EINVAL;
+	}
+
+	crtc = &sde_crtc->base;
+	if (!crtc->dev || !crtc->dev->dev_private) {
+		SDE_ERROR("invalid drm device\n");
+		return -EINVAL;
+	}
+
+	priv = crtc->dev->dev_private;
+	if (!priv->kms) {
+		SDE_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+
+	sde_kms = to_sde_kms(priv->kms);
+
+	return sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
+									enable);
+}
+
+/**
+ * _sde_crtc_rp_to_crtc - get crtc from resource pool object
+ * @rp: Pointer to resource pool
+ * return: Pointer to drm crtc if success; null otherwise
+ */
+static struct drm_crtc *_sde_crtc_rp_to_crtc(struct sde_crtc_respool *rp)
+{
+	if (!rp)
+		return NULL;
+
+	return container_of(rp, struct sde_crtc_state, rp)->base.crtc;
+}
+
+/**
+ * _sde_crtc_rp_reclaim - reclaim unused, or all if forced, resources in pool
+ * @rp: Pointer to resource pool
+ * @force: True to reclaim all resources; otherwise, reclaim only unused ones
+ * return: None
+ */
+static void _sde_crtc_rp_reclaim(struct sde_crtc_respool *rp, bool force)
+{
+	struct sde_crtc_res *res, *next;
+	struct drm_crtc *crtc;
+
+	crtc = _sde_crtc_rp_to_crtc(rp);
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+
+	SDE_DEBUG("crtc%d.%u %s\n", crtc->base.id, rp->sequence_id,
+			force ? "destroy" : "free_unused");
+
+	list_for_each_entry_safe(res, next, &rp->res_list, list) {
+		if (!force && !(res->flags & SDE_CRTC_RES_FLAG_FREE))
+			continue;
+		SDE_DEBUG("crtc%d.%u reclaim res:0x%x/0x%llx/%pK/%d\n",
+				crtc->base.id, rp->sequence_id,
+				res->type, res->tag, res->val,
+				atomic_read(&res->refcount));
+		list_del(&res->list);
+		if (res->ops.put)
+			res->ops.put(res->val);
+		kfree(res);
+	}
+}
+
+/**
+ * _sde_crtc_rp_free_unused - free unused resource in pool
+ * @rp: Pointer to resource pool
+ * return: none
+ */
+static void _sde_crtc_rp_free_unused(struct sde_crtc_respool *rp)
+{
+	_sde_crtc_rp_reclaim(rp, false);
+}
+
+/**
+ * _sde_crtc_rp_destroy - destroy resource pool
+ * @rp: Pointer to resource pool
+ * return: None
+ */
+static void _sde_crtc_rp_destroy(struct sde_crtc_respool *rp)
+{
+	_sde_crtc_rp_reclaim(rp, true);
+}
+
+/**
+ * _sde_crtc_hw_blk_get - get callback for hardware block
+ * @val: Resource handle
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: Resource handle
+ */
+static void *_sde_crtc_hw_blk_get(void *val, u32 type, u64 tag)
+{
+	SDE_DEBUG("res:%d/0x%llx/%pK\n", type, tag, val);
+	return sde_hw_blk_get(val, type, tag);
+}
+
+/**
+ * _sde_crtc_hw_blk_put - put callback for hardware block
+ * @val: Resource handle
+ * return: None
+ */
+static void _sde_crtc_hw_blk_put(void *val)
+{
+	SDE_DEBUG("res://%pK\n", val);
+	sde_hw_blk_put(val);
+}
+
+/**
+ * _sde_crtc_rp_duplicate - duplicate resource pool and reset reference count
+ * @rp: Pointer to original resource pool
+ * @dup_rp: Pointer to duplicated resource pool
+ * return: None
+ */
+static void _sde_crtc_rp_duplicate(struct sde_crtc_respool *rp,
+		struct sde_crtc_respool *dup_rp)
+{
+	struct sde_crtc_res *res, *dup_res;
+	struct drm_crtc *crtc;
+
+	if (!rp || !dup_rp) {
+		SDE_ERROR("invalid resource pool\n");
+		return;
+	}
+
+	crtc = _sde_crtc_rp_to_crtc(rp);
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+
+	SDE_DEBUG("crtc%d.%u duplicate\n", crtc->base.id, rp->sequence_id);
+
+	dup_rp->sequence_id = rp->sequence_id + 1;
+	INIT_LIST_HEAD(&dup_rp->res_list);
+	dup_rp->ops = rp->ops;
+	list_for_each_entry(res, &rp->res_list, list) {
+		dup_res = kzalloc(sizeof(struct sde_crtc_res), GFP_KERNEL);
+		if (!dup_res)
+			return;
+		INIT_LIST_HEAD(&dup_res->list);
+		atomic_set(&dup_res->refcount, 0);
+		dup_res->type = res->type;
+		dup_res->tag = res->tag;
+		dup_res->val = res->val;
+		dup_res->ops = res->ops;
+		dup_res->flags = SDE_CRTC_RES_FLAG_FREE;
+		SDE_DEBUG("crtc%d.%u dup res:0x%x/0x%llx/%pK/%d\n",
+				crtc->base.id, dup_rp->sequence_id,
+				dup_res->type, dup_res->tag, dup_res->val,
+				atomic_read(&dup_res->refcount));
+		list_add_tail(&dup_res->list, &dup_rp->res_list);
+		if (dup_res->ops.get)
+			dup_res->ops.get(dup_res->val, 0, -1);
+	}
+}
+
+/**
+ * _sde_crtc_rp_reset - reset resource pool after allocation
+ * @rp: Pointer to original resource pool
+ * return: None
+ */
+static void _sde_crtc_rp_reset(struct sde_crtc_respool *rp)
+{
+	if (!rp) {
+		SDE_ERROR("invalid resource pool\n");
+		return;
+	}
+
+	rp->sequence_id = 0;
+	INIT_LIST_HEAD(&rp->res_list);
+	rp->ops.get = _sde_crtc_hw_blk_get;
+	rp->ops.put = _sde_crtc_hw_blk_put;
+}
+
+/**
+ * _sde_crtc_rp_add - add given resource to resource pool
+ * @rp: Pointer to original resource pool
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * @val: Resource handle
+ * @ops: Resource callback operations
+ * return: 0 if success; error code otherwise
+ */
+static int _sde_crtc_rp_add(struct sde_crtc_respool *rp, u32 type, u64 tag,
+		void *val, struct sde_crtc_res_ops *ops)
+{
+	struct sde_crtc_res *res;
+	struct drm_crtc *crtc;
+
+	if (!rp || !ops) {
+		SDE_ERROR("invalid resource pool/ops\n");
+		return -EINVAL;
+	}
+
+	crtc = _sde_crtc_rp_to_crtc(rp);
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return -EINVAL;
+	}
+
+	list_for_each_entry(res, &rp->res_list, list) {
+		if (res->type != type || res->tag != tag)
+			continue;
+		SDE_ERROR("crtc%d.%u already exist res:0x%x/0x%llx/%pK/%d\n",
+				crtc->base.id, rp->sequence_id,
+				res->type, res->tag, res->val,
+				atomic_read(&res->refcount));
+		return -EEXIST;
+	}
+	res = kzalloc(sizeof(struct sde_crtc_res), GFP_KERNEL);
+	if (!res)
+		return -ENOMEM;
+	INIT_LIST_HEAD(&res->list);
+	atomic_set(&res->refcount, 1);
+	res->type = type;
+	res->tag = tag;
+	res->val = val;
+	res->ops = *ops;
+	list_add_tail(&res->list, &rp->res_list);
+	SDE_DEBUG("crtc%d.%u added res:0x%x/0x%llx\n",
+			crtc->base.id, rp->sequence_id, type, tag);
+	return 0;
+}
+
+/**
+ * _sde_crtc_rp_get - lookup the resource from given resource pool and obtain
+ *	if available; otherwise, obtain resource from global pool
+ * @rp: Pointer to original resource pool
+ * @type: Resource type
+ * @tag:  Search tag for given resource
+ * return: Resource handle if success; pointer error or null otherwise
+ */
+static void *_sde_crtc_rp_get(struct sde_crtc_respool *rp, u32 type, u64 tag)
+{
+	struct sde_crtc_res *res;
+	void *val = NULL;
+	int rc;
+	struct drm_crtc *crtc;
+
+	if (!rp) {
+		SDE_ERROR("invalid resource pool\n");
+		return NULL;
+	}
+
+	crtc = _sde_crtc_rp_to_crtc(rp);
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return NULL;
+	}
+
+	list_for_each_entry(res, &rp->res_list, list) {
+		if (res->type != type || res->tag != tag)
+			continue;
+		SDE_DEBUG("crtc%d.%u found res:0x%x/0x%llx/%pK/%d\n",
+				crtc->base.id, rp->sequence_id,
+				res->type, res->tag, res->val,
+				atomic_read(&res->refcount));
+		atomic_inc(&res->refcount);
+		res->flags &= ~SDE_CRTC_RES_FLAG_FREE;
+		return res->val;
+	}
+	list_for_each_entry(res, &rp->res_list, list) {
+		if (res->type != type || !(res->flags & SDE_CRTC_RES_FLAG_FREE))
+			continue;
+		SDE_DEBUG("crtc%d.%u retag res:0x%x/0x%llx/%pK/%d\n",
+				crtc->base.id, rp->sequence_id,
+				res->type, res->tag, res->val,
+				atomic_read(&res->refcount));
+		atomic_inc(&res->refcount);
+		res->tag = tag;
+		res->flags &= ~SDE_CRTC_RES_FLAG_FREE;
+		return res->val;
+	}
+	if (rp->ops.get)
+		val = rp->ops.get(NULL, type, -1);
+	if (IS_ERR_OR_NULL(val)) {
+		SDE_ERROR("crtc%d.%u failed to get res:0x%x//\n",
+				crtc->base.id, rp->sequence_id, type);
+		return NULL;
+	}
+	rc = _sde_crtc_rp_add(rp, type, tag, val, &rp->ops);
+	if (rc) {
+		SDE_ERROR("crtc%d.%u failed to add res:0x%x/0x%llx\n",
+				crtc->base.id, rp->sequence_id, type, tag);
+		if (rp->ops.put)
+			rp->ops.put(val);
+		val = NULL;
+	}
+	return val;
+}
+
+/**
+ * _sde_crtc_rp_put - return given resource to resource pool
+ * @rp: Pointer to original resource pool
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: None
+ */
+static void _sde_crtc_rp_put(struct sde_crtc_respool *rp, u32 type, u64 tag)
+{
+	struct sde_crtc_res *res, *next;
+	struct drm_crtc *crtc;
+
+	if (!rp) {
+		SDE_ERROR("invalid resource pool\n");
+		return;
+	}
+
+	crtc = _sde_crtc_rp_to_crtc(rp);
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+
+	list_for_each_entry_safe(res, next, &rp->res_list, list) {
+		if (res->type != type || res->tag != tag)
+			continue;
+		SDE_DEBUG("crtc%d.%u found res:0x%x/0x%llx/%pK/%d\n",
+				crtc->base.id, rp->sequence_id,
+				res->type, res->tag, res->val,
+				atomic_read(&res->refcount));
+		if (res->flags & SDE_CRTC_RES_FLAG_FREE)
+			SDE_ERROR(
+				"crtc%d.%u already free res:0x%x/0x%llx/%pK/%d\n",
+					crtc->base.id, rp->sequence_id,
+					res->type, res->tag, res->val,
+					atomic_read(&res->refcount));
+		else if (atomic_dec_return(&res->refcount) == 0)
+			res->flags |= SDE_CRTC_RES_FLAG_FREE;
+
+		return;
+	}
+	SDE_ERROR("crtc%d.%u not found res:0x%x/0x%llx\n",
+			crtc->base.id, rp->sequence_id, type, tag);
+}
+
+int sde_crtc_res_add(struct drm_crtc_state *state, u32 type, u64 tag,
+		void *val, struct sde_crtc_res_ops *ops)
+{
+	struct sde_crtc_respool *rp;
+
+	if (!state) {
+		SDE_ERROR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	rp = &to_sde_crtc_state(state)->rp;
+	return _sde_crtc_rp_add(rp, type, tag, val, ops);
+}
+
+void *sde_crtc_res_get(struct drm_crtc_state *state, u32 type, u64 tag)
+{
+	struct sde_crtc_respool *rp;
+	void *val;
+
+	if (!state) {
+		SDE_ERROR("invalid parameters\n");
+		return NULL;
+	}
+
+	rp = &to_sde_crtc_state(state)->rp;
+	val = _sde_crtc_rp_get(rp, type, tag);
+	if (IS_ERR(val)) {
+		SDE_ERROR("failed to get res type:0x%x:0x%llx\n",
+				type, tag);
+		return NULL;
+	}
+
+	return val;
+}
+
+void sde_crtc_res_put(struct drm_crtc_state *state, u32 type, u64 tag)
+{
+	struct sde_crtc_respool *rp;
+
+	if (!state) {
+		SDE_ERROR("invalid parameters\n");
+		return;
+	}
+
+	rp = &to_sde_crtc_state(state)->rp;
+	_sde_crtc_rp_put(rp, type, tag);
+}
+
 static void _sde_crtc_deinit_events(struct sde_crtc *sde_crtc)
 {
 	if (!sde_crtc)
@@ -260,6 +677,8 @@
 	int left_crtc_zpos_cnt[SDE_STAGE_MAX + 1] = {0};
 	int right_crtc_zpos_cnt[SDE_STAGE_MAX + 1] = {0};
 	int i;
+	bool sbuf_mode = false;
+	u32 prefill = 0;
 
 	if (!sde_crtc || !mixer) {
 		SDE_ERROR("invalid sde_crtc or mixer\n");
@@ -275,8 +694,10 @@
 
 		pstate = to_sde_plane_state(plane->state);
 
-		flush_mask = ctl->ops.get_bitmask_sspp(ctl,
-							sde_plane_pipe(plane));
+		if (sde_plane_is_sbuf_mode(plane, &prefill))
+			sbuf_mode = true;
+
+		sde_plane_get_ctl_flush(plane, ctl, &flush_mask);
 
 		/* always stage plane on either left or right lm */
 		if (plane->state->crtc_x >= crtc_split_width) {
@@ -355,6 +776,21 @@
 			_sde_crtc_setup_dim_layer_cfg(crtc, sde_crtc,
 					mixer, &cstate->dim_layer[i]);
 	}
+
+	if (ctl->ops.setup_sbuf_cfg) {
+		cstate = to_sde_crtc_state(crtc->state);
+		if (!sbuf_mode) {
+			cstate->sbuf_cfg.rot_op_mode =
+					SDE_CTL_ROT_OP_MODE_OFFLINE;
+			cstate->sbuf_prefill_line = 0;
+		} else {
+			cstate->sbuf_cfg.rot_op_mode =
+					SDE_CTL_ROT_OP_MODE_INLINE_SYNC;
+			cstate->sbuf_prefill_line = prefill;
+		}
+
+		ctl->ops.setup_sbuf_cfg(ctl, &cstate->sbuf_cfg);
+	}
 }
 
 /**
@@ -448,12 +884,6 @@
 			sde_connector_prepare_fence(conn);
 		}
 
-	if (cstate->num_connectors > 0 && cstate->connectors[0]->encoder)
-		cstate->intf_mode = sde_encoder_get_intf_mode(
-				cstate->connectors[0]->encoder);
-	else
-		cstate->intf_mode = INTF_MODE_NONE;
-
 	/* prepare main output fence */
 	sde_fence_prepare(&sde_crtc->output_fence);
 }
@@ -495,6 +925,22 @@
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
+enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc)
+{
+	struct drm_encoder *encoder;
+
+	if (!crtc || !crtc->dev) {
+		SDE_ERROR("invalid crtc\n");
+		return INTF_MODE_NONE;
+	}
+
+	drm_for_each_encoder(encoder, crtc->dev)
+		if (encoder->crtc == crtc)
+			return sde_encoder_get_intf_mode(encoder);
+
+	return INTF_MODE_NONE;
+}
+
 static void sde_crtc_vblank_cb(void *data)
 {
 	struct drm_crtc *crtc = (struct drm_crtc *)data;
@@ -993,6 +1439,8 @@
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
 
+	_sde_crtc_rp_destroy(&cstate->rp);
+
 	__drm_atomic_helper_crtc_destroy_state(state);
 
 	/* destroy value helper */
@@ -1007,6 +1455,7 @@
 	struct sde_crtc *sde_crtc;
 	struct msm_drm_private *priv;
 	struct sde_kms *sde_kms;
+	struct sde_crtc_state *cstate;
 
 	if (!crtc) {
 		SDE_ERROR("invalid argument\n");
@@ -1016,8 +1465,11 @@
 	sde_crtc = to_sde_crtc(crtc);
 	sde_kms = _sde_crtc_get_kms(crtc);
 	priv = sde_kms->dev->dev_private;
+	cstate = to_sde_crtc_state(crtc->state);
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct sde_encoder_kickoff_params params = { 0 };
+
 		if (encoder->crtc != crtc)
 			continue;
 
@@ -1025,7 +1477,8 @@
 		 * Encoder will flush/start now, unless it has a tx pending.
 		 * If so, it may delay and flush at an irq event (e.g. ppdone)
 		 */
-		sde_encoder_prepare_for_kickoff(encoder);
+		params.inline_rotate_prefill = cstate->sbuf_prefill_line;
+		sde_encoder_prepare_for_kickoff(encoder, &params);
 	}
 
 	if (atomic_read(&sde_crtc->frame_pending) > 2) {
@@ -1062,8 +1515,6 @@
 	struct drm_device *dev;
 	struct drm_crtc *crtc;
 	struct drm_encoder *enc;
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
 
 	if (!sde_crtc) {
 		SDE_ERROR("invalid crtc\n");
@@ -1072,17 +1523,11 @@
 
 	crtc = &sde_crtc->base;
 	dev = crtc->dev;
-	priv = dev->dev_private;
-
-	if (!priv->kms) {
-		SDE_ERROR("invalid kms\n");
-		return;
-	}
-	sde_kms = to_sde_kms(priv->kms);
 
 	if (enable) {
-		sde_power_resource_enable(&priv->phandle,
-				sde_kms->core_client, true);
+		if (_sde_crtc_power_enable(sde_crtc, true))
+			return;
+
 		list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
 			if (enc->crtc != crtc)
 				continue;
@@ -1101,8 +1546,7 @@
 
 			sde_encoder_register_vblank_callback(enc, NULL, NULL);
 		}
-		sde_power_resource_enable(&priv->phandle,
-				sde_kms->core_client, false);
+		_sde_crtc_power_enable(sde_crtc, false);
 	}
 }
 
@@ -1188,6 +1632,8 @@
 	/* duplicate base helper */
 	__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
 
+	_sde_crtc_rp_duplicate(&old_cstate->rp, &cstate->rp);
+
 	return &cstate->base;
 }
 
@@ -1230,6 +1676,8 @@
 
 	_sde_crtc_set_input_fence_timeout(cstate);
 
+	_sde_crtc_rp_reset(&cstate->rp);
+
 	cstate->base.crtc = crtc;
 	crtc->state = &cstate->base;
 }
@@ -1239,6 +1687,9 @@
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *cstate;
 	struct drm_encoder *encoder;
+	unsigned long flags;
+	struct sde_crtc_irq_info *node = NULL;
+	int ret;
 
 	if (!crtc || !crtc->dev || !crtc->state) {
 		SDE_ERROR("invalid crtc\n");
@@ -1290,6 +1741,18 @@
 
 	memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
 	sde_crtc->num_mixers = 0;
+
+	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
+	list_for_each_entry(node, &sde_crtc->user_event_list, list) {
+		ret = 0;
+		if (node->func)
+			ret = node->func(crtc, false, &node->irq);
+		if (ret)
+			SDE_ERROR("%s failed to disable event %x\n",
+					sde_crtc->name, node->event);
+	}
+	spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
+
 	mutex_unlock(&sde_crtc->crtc_lock);
 }
 
@@ -1299,9 +1762,10 @@
 	struct sde_crtc_mixer *mixer;
 	struct sde_hw_mixer *lm;
 	struct drm_display_mode *mode;
-	struct sde_hw_mixer_cfg cfg;
 	struct drm_encoder *encoder;
-	int i;
+	unsigned long flags;
+	struct sde_crtc_irq_info *node = NULL;
+	int i, ret;
 
 	if (!crtc) {
 		SDE_ERROR("invalid crtc\n");
@@ -1330,12 +1794,23 @@
 
 	for (i = 0; i < sde_crtc->num_mixers; i++) {
 		lm = mixer[i].hw_lm;
-		cfg.out_width = sde_crtc_mixer_width(sde_crtc, mode);
-		cfg.out_height = mode->vdisplay;
-		cfg.right_mixer = (i == 0) ? false : true;
-		cfg.flags = 0;
-		lm->ops.setup_mixer_out(lm, &cfg);
+		lm->cfg.out_width = sde_crtc_mixer_width(sde_crtc, mode);
+		lm->cfg.out_height = mode->vdisplay;
+		lm->cfg.right_mixer = (i == 0) ? false : true;
+		lm->cfg.flags = 0;
+		lm->ops.setup_mixer_out(lm, &lm->cfg);
 	}
+
+	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
+	list_for_each_entry(node, &sde_crtc->user_event_list, list) {
+		ret = 0;
+		if (node->func)
+			ret = node->func(crtc, true, &node->irq);
+		if (ret)
+			SDE_ERROR("%s failed to enable event %x\n",
+				sde_crtc->name, node->event);
+	}
+	spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
 }
 
 struct plane_state {
@@ -1386,14 +1861,15 @@
 		return -EINVAL;
 	}
 
+	sde_crtc = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(state);
+
 	if (!state->enable || !state->active) {
 		SDE_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
 				crtc->base.id, state->enable, state->active);
-		return 0;
+		goto end;
 	}
 
-	sde_crtc = to_sde_crtc(crtc);
-	cstate = to_sde_crtc_state(state);
 	mode = &state->adjusted_mode;
 	SDE_DEBUG("%s: check", sde_crtc->name);
 
@@ -1601,6 +2077,7 @@
 
 
 end:
+	_sde_crtc_rp_free_unused(&cstate->rp);
 	return rc;
 }
 
@@ -1693,12 +2170,28 @@
 			CRTC_PROP_CORE_CLK);
 	msm_property_install_range(&sde_crtc->property_info,
 			"core_ab", 0x0, 0, U64_MAX,
-			SDE_POWER_HANDLE_DATA_BUS_AB_QUOTA,
+			SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
 			CRTC_PROP_CORE_AB);
 	msm_property_install_range(&sde_crtc->property_info,
 			"core_ib", 0x0, 0, U64_MAX,
-			SDE_POWER_HANDLE_DATA_BUS_IB_QUOTA,
+			SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA,
 			CRTC_PROP_CORE_IB);
+	msm_property_install_range(&sde_crtc->property_info,
+			"mem_ab", 0x0, 0, U64_MAX,
+			SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
+			CRTC_PROP_MEM_AB);
+	msm_property_install_range(&sde_crtc->property_info,
+			"mem_ib", 0x0, 0, U64_MAX,
+			SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
+			CRTC_PROP_MEM_IB);
+	msm_property_install_range(&sde_crtc->property_info,
+			"rot_prefill_bw", 0, 0, U64_MAX,
+			catalog->perf.max_bw_high * 1000ULL,
+			CRTC_PROP_ROT_PREFILL_BW);
+	msm_property_install_range(&sde_crtc->property_info,
+			"rot_clk", 0, 0, U64_MAX,
+			sde_kms->perf.max_core_clk_rate,
+			CRTC_PROP_ROT_CLK);
 
 	msm_property_install_blob(&sde_crtc->property_info, "capabilities",
 		DRM_MODE_PROP_IMMUTABLE, CRTC_PROP_INFO);
@@ -1790,6 +2283,9 @@
 		}
 		if (ret)
 			DRM_ERROR("failed to set the property\n");
+
+		SDE_DEBUG("crtc%d %s[%d] <= 0x%llx ret=%d\n", crtc->base.id,
+				property->name, property->base.id, val, ret);
 	}
 
 	return ret;
@@ -1974,7 +2470,108 @@
 	return single_open(file, _sde_debugfs_status_show, inode->i_private);
 }
 
-#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix)				\
+static ssize_t _sde_crtc_misr_setup(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_mixer *m;
+	int i = 0, rc;
+	char buf[MISR_BUFF_SIZE + 1];
+	u32 frame_count, enable;
+	size_t buff_copy;
+
+	if (!file || !file->private_data)
+		return -EINVAL;
+
+	sde_crtc = file->private_data;
+	buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
+	if (copy_from_user(buf, user_buf, buff_copy)) {
+		SDE_ERROR("buffer copy failed\n");
+		return -EINVAL;
+	}
+
+	buf[buff_copy] = 0; /* end of string */
+
+	if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
+		return -EINVAL;
+
+	rc = _sde_crtc_power_enable(sde_crtc, true);
+	if (rc)
+		return rc;
+
+	mutex_lock(&sde_crtc->crtc_lock);
+	sde_crtc->misr_enable = enable;
+	for (i = 0; i < sde_crtc->num_mixers; ++i) {
+		m = &sde_crtc->mixers[i];
+		if (!m->hw_lm)
+			continue;
+
+		m->hw_lm->ops.setup_misr(m->hw_lm, enable, frame_count);
+	}
+	mutex_unlock(&sde_crtc->crtc_lock);
+	_sde_crtc_power_enable(sde_crtc, false);
+
+	return count;
+}
+
+static ssize_t _sde_crtc_misr_read(struct file *file,
+		char __user *user_buff, size_t count, loff_t *ppos)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_mixer *m;
+	int i = 0, rc;
+	ssize_t len = 0;
+	char buf[MISR_BUFF_SIZE + 1] = {'\0'};
+
+	if (*ppos)
+		return 0;
+
+	if (!file || !file->private_data)
+		return -EINVAL;
+
+	sde_crtc = file->private_data;
+	rc = _sde_crtc_power_enable(sde_crtc, true);
+	if (rc)
+		return rc;
+
+	mutex_lock(&sde_crtc->crtc_lock);
+	if (!sde_crtc->misr_enable) {
+		len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+			"disabled\n");
+		goto buff_check;
+	}
+
+	for (i = 0; i < sde_crtc->num_mixers; ++i) {
+		m = &sde_crtc->mixers[i];
+		if (!m->hw_lm)
+			continue;
+
+		len += snprintf(buf + len, MISR_BUFF_SIZE - len, "lm idx:%d\n",
+					m->hw_lm->idx - LM_0);
+		len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
+				m->hw_lm->ops.collect_misr(m->hw_lm));
+	}
+
+buff_check:
+	if (count <= len) {
+		len = 0;
+		goto end;
+	}
+
+	if (copy_to_user(user_buff, buf, len)) {
+		len = -EFAULT;
+		goto end;
+	}
+
+	*ppos += len;   /* increase offset */
+
+end:
+	mutex_unlock(&sde_crtc->crtc_lock);
+	_sde_crtc_power_enable(sde_crtc, false);
+	return len;
+}
+
+#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix)                          \
 static int __prefix ## _open(struct inode *inode, struct file *file)	\
 {									\
 	return single_open(file, __prefix ## _show, inode->i_private);	\
@@ -1991,15 +2588,23 @@
 {
 	struct drm_crtc *crtc = (struct drm_crtc *) s->private;
 	struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state);
+	struct sde_crtc_res *res;
 
 	seq_printf(s, "num_connectors: %d\n", cstate->num_connectors);
 	seq_printf(s, "client type: %d\n", sde_crtc_get_client_type(crtc));
-	seq_printf(s, "intf_mode: %d\n", cstate->intf_mode);
+	seq_printf(s, "intf_mode: %d\n", sde_crtc_get_intf_mode(crtc));
 	seq_printf(s, "bw_ctl: %llu\n", cstate->cur_perf.bw_ctl);
 	seq_printf(s, "core_clk_rate: %u\n", cstate->cur_perf.core_clk_rate);
 	seq_printf(s, "max_per_pipe_ib: %llu\n",
 			cstate->cur_perf.max_per_pipe_ib);
 
+	seq_printf(s, "rp.%d: ", cstate->rp.sequence_id);
+	list_for_each_entry(res, &cstate->rp.res_list, list)
+		seq_printf(s, "0x%x/0x%llx/%pK/%d ",
+				res->type, res->tag, res->val,
+				atomic_read(&res->refcount));
+	seq_puts(s, "\n");
+
 	return 0;
 }
 DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_crtc_debugfs_state);
@@ -2015,6 +2620,11 @@
 		.llseek =	seq_lseek,
 		.release =	single_release,
 	};
+	static const struct file_operations debugfs_misr_fops = {
+		.open =		simple_open,
+		.read =		_sde_crtc_misr_read,
+		.write =	_sde_crtc_misr_setup,
+	};
 
 	if (!crtc)
 		return -EINVAL;
@@ -2037,6 +2647,8 @@
 			sde_crtc->debugfs_root,
 			&sde_crtc->base,
 			&sde_crtc_debugfs_state_fops);
+	debugfs_create_file("misr_data", 0644, sde_crtc->debugfs_root,
+					sde_crtc, &debugfs_misr_fops);
 
 	return 0;
 }
@@ -2058,7 +2670,6 @@
 
 static void _sde_crtc_destroy_debugfs(struct drm_crtc *crtc)
 {
-	return 0;
 }
 #endif /* CONFIG_DEBUG_FS */
 
@@ -2107,21 +2718,22 @@
 	}
 
 	event = container_of(work, struct sde_crtc_event, kt_work);
-	if (event->cb_func)
-		event->cb_func(event->usr);
 
 	/* set sde_crtc to NULL for static work structures */
 	sde_crtc = event->sde_crtc;
 	if (!sde_crtc)
 		return;
 
+	if (event->cb_func)
+		event->cb_func(&sde_crtc->base, event->usr);
+
 	spin_lock_irqsave(&sde_crtc->event_lock, irq_flags);
 	list_add_tail(&event->list, &sde_crtc->event_free_list);
 	spin_unlock_irqrestore(&sde_crtc->event_lock, irq_flags);
 }
 
 int sde_crtc_event_queue(struct drm_crtc *crtc,
-		void (*func)(void *usr), void *usr)
+		void (*func)(struct drm_crtc *crtc, void *usr), void *usr)
 {
 	unsigned long irq_flags;
 	struct sde_crtc *sde_crtc;
@@ -2131,6 +2743,8 @@
 		return -EINVAL;
 	sde_crtc = to_sde_crtc(crtc);
 
+	if (!sde_crtc->event_thread)
+		return -EINVAL;
 	/*
 	 * Obtain an event struct from the private cache. This event
 	 * queue may be called from ISR contexts, so use a private
@@ -2214,6 +2828,7 @@
 	atomic_set(&sde_crtc->frame_pending, 0);
 
 	INIT_LIST_HEAD(&sde_crtc->frame_event_list);
+	INIT_LIST_HEAD(&sde_crtc->user_event_list);
 	for (i = 0; i < ARRAY_SIZE(sde_crtc->frame_events); i++) {
 		INIT_LIST_HEAD(&sde_crtc->frame_events[i].list);
 		list_add(&sde_crtc->frame_events[i].list,
@@ -2257,3 +2872,129 @@
 	SDE_DEBUG("%s: successfully initialized crtc\n", sde_crtc->name);
 	return crtc;
 }
+
+static int _sde_crtc_event_enable(struct sde_kms *kms,
+		struct drm_crtc *crtc_drm, u32 event)
+{
+	struct sde_crtc *crtc = NULL;
+	struct sde_crtc_irq_info *node;
+	struct msm_drm_private *priv;
+	unsigned long flags;
+	bool found = false;
+	int ret, i = 0;
+
+	crtc = to_sde_crtc(crtc_drm);
+	spin_lock_irqsave(&crtc->spin_lock, flags);
+	list_for_each_entry(node, &crtc->user_event_list, list) {
+		if (node->event == event) {
+			found = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&crtc->spin_lock, flags);
+
+	/* event already enabled */
+	if (found)
+		return 0;
+
+	node = NULL;
+	for (i = 0; i < ARRAY_SIZE(custom_events); i++) {
+		if (custom_events[i].event == event &&
+			custom_events[i].func) {
+			node = kzalloc(sizeof(*node), GFP_KERNEL);
+			if (!node)
+				return -ENOMEM;
+			node->event = event;
+			INIT_LIST_HEAD(&node->list);
+			node->func = custom_events[i].func;
+			node->event = event;
+			break;
+		}
+	}
+
+	if (!node) {
+		SDE_ERROR("unsupported event %x\n", event);
+		return -EINVAL;
+	}
+
+	priv = kms->dev->dev_private;
+	ret = 0;
+	if (crtc_drm->enabled) {
+		sde_power_resource_enable(&priv->phandle, kms->core_client,
+				true);
+		ret = node->func(crtc_drm, true, &node->irq);
+		sde_power_resource_enable(&priv->phandle, kms->core_client,
+				false);
+	}
+
+	if (!ret) {
+		spin_lock_irqsave(&crtc->spin_lock, flags);
+		list_add_tail(&node->list, &crtc->user_event_list);
+		spin_unlock_irqrestore(&crtc->spin_lock, flags);
+	} else {
+		kfree(node);
+	}
+
+	return ret;
+}
+
+static int _sde_crtc_event_disable(struct sde_kms *kms,
+		struct drm_crtc *crtc_drm, u32 event)
+{
+	struct sde_crtc *crtc = NULL;
+	struct sde_crtc_irq_info *node = NULL;
+	struct msm_drm_private *priv;
+	unsigned long flags;
+	bool found = false;
+	int ret;
+
+	crtc = to_sde_crtc(crtc_drm);
+	spin_lock_irqsave(&crtc->spin_lock, flags);
+	list_for_each_entry(node, &crtc->user_event_list, list) {
+		if (node->event == event) {
+			list_del(&node->list);
+			found = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&crtc->spin_lock, flags);
+
+	/* event already disabled */
+	if (!found)
+		return 0;
+
+	/**
+	 * crtc is disabled interrupts are cleared remove from the list,
+	 * no need to disable/de-register.
+	 */
+	if (!crtc_drm->enabled) {
+		kfree(node);
+		return 0;
+	}
+	priv = kms->dev->dev_private;
+	sde_power_resource_enable(&priv->phandle, kms->core_client, true);
+	ret = node->func(crtc_drm, false, &node->irq);
+	sde_power_resource_enable(&priv->phandle, kms->core_client, false);
+	return ret;
+}
+
+int sde_crtc_register_custom_event(struct sde_kms *kms,
+		struct drm_crtc *crtc_drm, u32 event, bool en)
+{
+	struct sde_crtc *crtc = NULL;
+	int ret;
+
+	crtc = to_sde_crtc(crtc_drm);
+	if (!crtc || !kms || !kms->dev) {
+		DRM_ERROR("invalid sde_crtc %pK kms %pK dev %pK\n", crtc,
+			kms, ((kms) ? (kms->dev) : NULL));
+		return -EINVAL;
+	}
+
+	if (en)
+		ret = _sde_crtc_event_enable(kms, crtc_drm, event);
+	else
+		ret = _sde_crtc_event_disable(kms, crtc_drm, event);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 286d9e6..19ae27f 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -25,6 +25,7 @@
 #include "sde_fence.h"
 #include "sde_kms.h"
 #include "sde_core_perf.h"
+#include "sde_hw_blk.h"
 
 #define SDE_CRTC_NAME_SIZE	12
 
@@ -92,7 +93,7 @@
 	struct kthread_work kt_work;
 	void *sde_crtc;
 
-	void (*cb_func)(void *usr);
+	void (*cb_func)(struct drm_crtc *crtc, void *usr);
 	void *usr;
 };
 
@@ -124,6 +125,8 @@
  * @feature_list  : list of color processing features supported on a crtc
  * @active_list   : list of color processing features are active
  * @dirty_list    : list of color processing features are dirty
+ * @ad_dirty: list containing ad properties that are dirty
+ * @ad_active: list containing ad properties that are active
  * @crtc_lock     : crtc lock around create, destroy and access.
  * @frame_pending : Whether or not an update is pending
  * @frame_events  : static allocation of in-flight frame events
@@ -134,6 +137,7 @@
  * @event_cache   : Local cache of event worker structures
  * @event_free_list : List of available event structures
  * @event_lock    : Spinlock around event handling code
+ * @misr_enable   : boolean entry indicates misr enable/disable status.
  */
 struct sde_crtc {
 	struct drm_crtc base;
@@ -165,6 +169,9 @@
 	struct list_head feature_list;
 	struct list_head active_list;
 	struct list_head dirty_list;
+	struct list_head ad_dirty;
+	struct list_head ad_active;
+	struct list_head user_event_list;
 
 	struct mutex crtc_lock;
 
@@ -179,11 +186,62 @@
 	struct sde_crtc_event event_cache[SDE_CRTC_MAX_EVENT_COUNT];
 	struct list_head event_free_list;
 	spinlock_t event_lock;
+	bool misr_enable;
 };
 
 #define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
 
 /**
+ * struct sde_crtc_res_ops - common operations for crtc resources
+ * @get: get given resource
+ * @put: put given resource
+ */
+struct sde_crtc_res_ops {
+	void *(*get)(void *val, u32 type, u64 tag);
+	void (*put)(void *val);
+};
+
+/* crtc resource type (0x0-0xffff reserved for hw block type */
+#define SDE_CRTC_RES_ROT_OUT_FBO	0x10000
+#define SDE_CRTC_RES_ROT_OUT_FB		0x10001
+#define SDE_CRTC_RES_ROT_PLANE		0x10002
+#define SDE_CRTC_RES_ROT_IN_FB		0x10003
+
+#define SDE_CRTC_RES_FLAG_FREE		BIT(0)
+
+/**
+ * struct sde_crtc_res - definition of crtc resources
+ * @list: list of crtc resource
+ * @type: crtc resource type
+ * @tag: unique identifier per type
+ * @refcount: reference/usage count
+ * @ops: callback operations
+ * @val: resource handle associated with type/tag
+ * @flags: customization flags
+ */
+struct sde_crtc_res {
+	struct list_head list;
+	u32 type;
+	u64 tag;
+	atomic_t refcount;
+	struct sde_crtc_res_ops ops;
+	void *val;
+	u32 flags;
+};
+
+/**
+ * sde_crtc_respool - crtc resource pool
+ * @sequence_id: sequence identifier, incremented per state duplication
+ * @res_list: list of resource managed by this resource pool
+ * @ops: resource operations for parent resource pool
+ */
+struct sde_crtc_respool {
+	u32 sequence_id;
+	struct list_head res_list;
+	struct sde_crtc_res_ops ops;
+};
+
+/**
  * struct sde_crtc_state - sde container for atomic crtc state
  * @base: Base drm crtc state structure
  * @connectors    : Currently associated drm connectors
@@ -197,6 +255,8 @@
  * @dim_layer: Dim layer configs
  * @cur_perf: current performance state
  * @new_perf: new performance state
+ * @sbuf_cfg: stream buffer configuration
+ * @sbuf_prefill_line: number of line for inline rotator prefetch
  */
 struct sde_crtc_state {
 	struct drm_crtc_state base;
@@ -215,6 +275,10 @@
 
 	struct sde_core_perf_params cur_perf;
 	struct sde_core_perf_params new_perf;
+	struct sde_ctl_sbuf_cfg sbuf_cfg;
+	u64 sbuf_prefill_line;
+
+	struct sde_crtc_respool rp;
 };
 
 #define to_sde_crtc_state(x) \
@@ -308,16 +372,20 @@
 void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
 
 /**
+ * sde_crtc_register_custom_event - api for enabling/disabling crtc event
+ * @kms: Pointer to sde_kms
+ * @crtc_drm: Pointer to crtc object
+ * @event: Event that client is interested
+ * @en: Flag to enable/disable the event
+ */
+int sde_crtc_register_custom_event(struct sde_kms *kms,
+		struct drm_crtc *crtc_drm, u32 event, bool en);
+
+/**
  * sde_crtc_get_intf_mode - get interface mode of the given crtc
  * @crtc: Pointert to crtc
  */
-static inline enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc)
-{
-	struct sde_crtc_state *cstate =
-			crtc ? to_sde_crtc_state(crtc->state) : NULL;
-
-	return cstate ? cstate->intf_mode : INTF_MODE_NONE;
-}
+enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc);
 
 /**
  * sde_crtc_get_client_type - check the crtc type- rt, nrt, rsc, etc.
@@ -353,6 +421,36 @@
  * Returns: Zero on success
  */
 int sde_crtc_event_queue(struct drm_crtc *crtc,
-		void (*func)(void *usr), void *usr);
+		void (*func)(struct drm_crtc *crtc, void *usr), void *usr);
+
+/**
+ * sde_crtc_res_add - add given resource to resource pool in crtc state
+ * @state: Pointer to drm crtc state
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * @val: Resource handle
+ * @ops: Resource callback operations
+ * return: 0 if success; error code otherwise
+ */
+int sde_crtc_res_add(struct drm_crtc_state *state, u32 type, u64 tag,
+		void *val, struct sde_crtc_res_ops *ops);
+
+/**
+ * sde_crtc_res_get - get given resource from resource pool in crtc state
+ * @state: Pointer to drm crtc state
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: Resource handle if success; pointer error or null otherwise
+ */
+void *sde_crtc_res_get(struct drm_crtc_state *state, u32 type, u64 tag);
+
+/**
+ * sde_crtc_res_put - return given resource to resource pool in crtc state
+ * @state: Pointer to drm crtc state
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: None
+ */
+void sde_crtc_res_put(struct drm_crtc_state *state, u32 type, u64 tag);
 
 #endif /* _SDE_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index c2b3064..7137aaa 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -19,6 +19,7 @@
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
+#include <linux/sde_rsc.h>
 
 #include "msm_drv.h"
 #include "sde_kms.h"
@@ -31,8 +32,6 @@
 #include "sde_hw_ctl.h"
 #include "sde_formats.h"
 #include "sde_encoder_phys.h"
-#include "sde_color_processing.h"
-#include "sde_rsc.h"
 #include "sde_power_handle.h"
 #include "sde_hw_dsc.h"
 
@@ -57,6 +56,8 @@
 
 #define MAX_CHANNELS_PER_ENC 2
 
+#define MISR_BUFF_SIZE			256
+
 /**
  * struct sde_encoder_virt - virtual encoder. Container of one or more physical
  *	encoders. Virtual encoder manages one "logical" display. Physical
@@ -90,6 +91,7 @@
  * @crtc_frame_event:		callback event
  * @frame_done_timeout:		frame done timeout in Hz
  * @frame_done_timer:		watchdog timer for frame done event
+ * @misr_enable:		misr enable/disable status
  */
 struct sde_encoder_virt {
 	struct drm_encoder base;
@@ -120,6 +122,7 @@
 	struct sde_rsc_client *rsc_client;
 	struct msm_display_info disp_info;
 	bool rsc_state_update;
+	bool misr_enable;
 };
 
 #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
@@ -131,6 +134,36 @@
 	return (comp_info->comp_type == MSM_DISPLAY_COMPRESSION_DSC);
 }
 
+static inline int _sde_encoder_power_enable(struct sde_encoder_virt *sde_enc,
+								bool enable)
+{
+	struct drm_encoder *drm_enc;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!sde_enc) {
+		SDE_ERROR("invalid sde enc\n");
+		return -EINVAL;
+	}
+
+	drm_enc = &sde_enc->base;
+	if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+		SDE_ERROR("drm device invalid\n");
+		return -EINVAL;
+	}
+
+	priv = drm_enc->dev->dev_private;
+	if (!priv->kms) {
+		SDE_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+
+	sde_kms = to_sde_kms(priv->kms);
+
+	return sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
+									enable);
+}
+
 void sde_encoder_get_hw_resources(struct drm_encoder *drm_enc,
 		struct sde_encoder_hw_resources *hw_res,
 		struct drm_connector_state *conn_state)
@@ -706,8 +739,6 @@
 static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc = NULL;
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
 	int i = 0;
 	int ret = 0;
 
@@ -723,13 +754,13 @@
 	}
 
 	sde_enc = to_sde_encoder_virt(drm_enc);
-	priv = drm_enc->dev->dev_private;
-	sde_kms = to_sde_kms(priv->kms);
 
 	SDE_DEBUG_ENC(sde_enc, "\n");
 	SDE_EVT32(DRMID(drm_enc));
 
-	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+	ret = _sde_encoder_power_enable(sde_enc, true);
+	if (ret)
+		return;
 
 	sde_enc->cur_master = NULL;
 
@@ -810,7 +841,7 @@
 
 	sde_rm_release(&sde_kms->rm, drm_enc);
 
-	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+	_sde_encoder_power_enable(sde_enc, false);
 }
 
 static enum sde_intf sde_encoder_get_intf(struct sde_mdss_cfg *catalog,
@@ -903,6 +934,7 @@
 	enum sde_rsc_state rsc_state;
 	struct sde_rsc_cmd_config rsc_config;
 	int ret;
+	struct msm_display_info *disp_info;
 
 	if (!drm_enc) {
 		SDE_ERROR("invalid encoder\n");
@@ -910,18 +942,25 @@
 	}
 
 	sde_enc = to_sde_encoder_virt(drm_enc);
-	if (!sde_enc->disp_info.is_primary)
-		return NULL;
+	disp_info = &sde_enc->disp_info;
 
+	/**
+	 * only primary command mode panel can request CMD state.
+	 * all other panels/displays can request for VID state including
+	 * secondary command mode panel.
+	 */
 	rsc_state = enable ?
-		(sde_enc->disp_info.capabilities & MSM_DISPLAY_CAP_CMD_MODE ?
-		SDE_RSC_CMD_STATE : SDE_RSC_VID_STATE) : SDE_RSC_IDLE_STATE;
+		(((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) &&
+		  disp_info->is_primary) ? SDE_RSC_CMD_STATE :
+		SDE_RSC_VID_STATE) : SDE_RSC_IDLE_STATE;
 
-	if (rsc_state != SDE_RSC_IDLE_STATE && !sde_enc->rsc_state_update) {
-		rsc_config.fps = sde_enc->disp_info.frame_rate;
-		rsc_config.vtotal = sde_enc->disp_info.vtotal;
-		rsc_config.prefill_lines = sde_enc->disp_info.prefill_lines;
-		rsc_config.jitter = sde_enc->disp_info.jitter;
+	if (rsc_state != SDE_RSC_IDLE_STATE && !sde_enc->rsc_state_update
+					&& disp_info->is_primary) {
+		rsc_config.fps = disp_info->frame_rate;
+		rsc_config.vtotal = disp_info->vtotal;
+		rsc_config.prefill_lines = disp_info->prefill_lines;
+		rsc_config.jitter = disp_info->jitter;
+		/* update it only once */
 		sde_enc->rsc_state_update = true;
 
 		ret = sde_rsc_client_state_update(sde_enc->rsc_client,
@@ -936,7 +975,7 @@
 	if (ret)
 		SDE_ERROR("sde rsc client update failed ret:%d\n", ret);
 
-	return sde_enc->rsc_client;
+	return sde_enc->disp_info.is_primary ? sde_enc->rsc_client : NULL;
 }
 
 void sde_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
@@ -1195,7 +1234,8 @@
 	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
 }
 
-void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
+void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
+		struct sde_encoder_kickoff_params *params)
 {
 	struct sde_encoder_virt *sde_enc;
 	struct sde_encoder_phys *phys;
@@ -1216,7 +1256,7 @@
 		phys = sde_enc->phys_encs[i];
 		if (phys) {
 			if (phys->ops.prepare_for_kickoff)
-				phys->ops.prepare_for_kickoff(phys);
+				phys->ops.prepare_for_kickoff(phys, params);
 			if (phys->enable_state == SDE_ENC_ERR_NEEDS_HW_RESET)
 				needs_hw_reset = true;
 		}
@@ -1328,6 +1368,7 @@
 	return 0;
 }
 
+#ifdef CONFIG_DEBUG_FS
 static int _sde_encoder_status_show(struct seq_file *s, void *data)
 {
 	struct sde_encoder_virt *sde_enc;
@@ -1373,112 +1414,114 @@
 	return 0;
 }
 
-#ifdef CONFIG_DEBUG_FS
 static int _sde_encoder_debugfs_status_open(struct inode *inode,
 		struct file *file)
 {
 	return single_open(file, _sde_encoder_status_show, inode->i_private);
 }
 
-static void _sde_set_misr_params(struct sde_encoder_phys *phys, u32 enable,
-					u32 frame_count)
-{
-	int j;
-
-	if (!phys->misr_map)
-		return;
-
-	phys->misr_map->enable = enable;
-
-	if (frame_count <= SDE_CRC_BATCH_SIZE)
-		phys->misr_map->frame_count = frame_count;
-	else if (frame_count <= 0)
-		phys->misr_map->frame_count = 0;
-	else
-		phys->misr_map->frame_count = SDE_CRC_BATCH_SIZE;
-
-	if (!enable) {
-		phys->misr_map->last_idx = 0;
-		phys->misr_map->frame_count = 0;
-		for (j = 0; j < SDE_CRC_BATCH_SIZE; j++)
-			phys->misr_map->crc_value[j] = 0;
-	}
-}
-
-static ssize_t _sde_encoder_misr_set(struct file *file,
+static ssize_t _sde_encoder_misr_setup(struct file *file,
 		const char __user *user_buf, size_t count, loff_t *ppos)
 {
 	struct sde_encoder_virt *sde_enc;
-	struct drm_encoder *drm_enc;
-	int i = 0;
-	char buf[10];
-	u32 enable, frame_count;
+	int i = 0, rc;
+	char buf[MISR_BUFF_SIZE + 1];
+	size_t buff_copy;
+	u32 frame_count, enable;
 
-	drm_enc = file->private_data;
-	sde_enc = to_sde_encoder_virt(drm_enc);
+	if (!file || !file->private_data)
+		return -EINVAL;
 
-	if (copy_from_user(buf, user_buf, count))
-		return -EFAULT;
+	sde_enc = file->private_data;
 
-	buf[count] = 0; /* end of string */
+	buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
+	if (copy_from_user(buf, user_buf, buff_copy))
+		return -EINVAL;
+
+	buf[buff_copy] = 0; /* end of string */
 
 	if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
-		return -EFAULT;
+		return -EINVAL;
+
+	rc = _sde_encoder_power_enable(sde_enc, true);
+	if (rc)
+		return rc;
 
 	mutex_lock(&sde_enc->enc_lock);
+	sde_enc->misr_enable = enable;
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
-		if (!phys || !phys->misr_map || !phys->ops.setup_misr)
+		if (!phys || !phys->ops.setup_misr)
 			continue;
 
-		_sde_set_misr_params(phys, enable, frame_count);
-		phys->ops.setup_misr(phys, phys->misr_map);
+		phys->ops.setup_misr(phys, enable, frame_count);
 	}
 	mutex_unlock(&sde_enc->enc_lock);
+	_sde_encoder_power_enable(sde_enc, false);
+
 	return count;
 }
 
-static ssize_t _sde_encoder_misr_read(
-		struct file *file,
-		char __user *buff, size_t count, loff_t *ppos)
+static ssize_t _sde_encoder_misr_read(struct file *file,
+		char __user *user_buff, size_t count, loff_t *ppos)
 {
 	struct sde_encoder_virt *sde_enc;
-	struct drm_encoder *drm_enc;
-	int i = 0, j = 0, len = 0;
-	char buf[512] = {'\0'};
+	int i = 0, len = 0;
+	char buf[MISR_BUFF_SIZE + 1] = {'\0'};
+	int rc;
 
 	if (*ppos)
 		return 0;
 
-	drm_enc = file->private_data;
-	sde_enc = to_sde_encoder_virt(drm_enc);
+	if (!file || !file->private_data)
+		return -EINVAL;
+
+	sde_enc = file->private_data;
+
+	rc = _sde_encoder_power_enable(sde_enc, true);
+	if (rc)
+		return rc;
 
 	mutex_lock(&sde_enc->enc_lock);
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-		struct sde_misr_params *misr_map;
-
-		if (!phys || !phys->misr_map)
-			continue;
-
-		misr_map = phys->misr_map;
-
-		len += snprintf(buf+len, sizeof(buf), "INTF%d\n", i);
-		for (j = 0; j < SDE_CRC_BATCH_SIZE; j++)
-			len += snprintf(buf+len, sizeof(buf), "%x\n",
-						misr_map->crc_value[j]);
+	if (!sde_enc->misr_enable) {
+		len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+			"disabled\n");
+		goto buff_check;
+	} else if (sde_enc->disp_info.capabilities &
+						~MSM_DISPLAY_CAP_VID_MODE) {
+		len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+			"unsupported\n");
+		goto buff_check;
 	}
 
-	if (len < 0 || len >= sizeof(buf))
-		return 0;
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+		if (!phys || !phys->ops.collect_misr)
+			continue;
 
-	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
-		return -EFAULT;
+		len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+			"Intf idx:%d\n", phys->intf_idx - INTF_0);
+		len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
+					phys->ops.collect_misr(phys));
+	}
+
+buff_check:
+	if (count <= len) {
+		len = 0;
+		goto end;
+	}
+
+	if (copy_to_user(user_buff, buf, len)) {
+		len = -EFAULT;
+		goto end;
+	}
 
 	*ppos += len;   /* increase offset */
-	mutex_unlock(&sde_enc->enc_lock);
 
+end:
+	mutex_unlock(&sde_enc->enc_lock);
+	_sde_encoder_power_enable(sde_enc, false);
 	return len;
 }
 
@@ -1498,7 +1541,7 @@
 	static const struct file_operations debugfs_misr_fops = {
 		.open = simple_open,
 		.read = _sde_encoder_misr_read,
-		.write = _sde_encoder_misr_set,
+		.write = _sde_encoder_misr_setup,
 	};
 
 	char name[SDE_NAME_SIZE];
@@ -1525,7 +1568,7 @@
 		sde_enc->debugfs_root, sde_enc, &debugfs_status_fops);
 
 	debugfs_create_file("misr_data", 0644,
-		sde_enc->debugfs_root, drm_enc, &debugfs_misr_fops);
+		sde_enc->debugfs_root, sde_enc, &debugfs_misr_fops);
 
 	return 0;
 }
@@ -1546,7 +1589,7 @@
 	return 0;
 }
 
-static _sde_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
+static void _sde_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
 {
 }
 #endif
@@ -1848,7 +1891,7 @@
 	sde_enc->rsc_client = sde_rsc_client_create(SDE_RSC_INDEX, name,
 					disp_info->is_primary);
 	if (IS_ERR_OR_NULL(sde_enc->rsc_client)) {
-		SDE_ERROR("sde rsc client create failed :%ld\n",
+		SDE_DEBUG("sde rsc client create failed :%ld\n",
 						PTR_ERR(sde_enc->rsc_client));
 		sde_enc->rsc_client = NULL;
 	}
@@ -1887,10 +1930,6 @@
 			if (ret)
 				return ret;
 		}
-
-		if (phys && phys->ops.collect_misr)
-			if (phys->misr_map && phys->misr_map->enable)
-				phys->ops.collect_misr(phys, phys->misr_map);
 	}
 
 	return ret;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index bd7ef69..cdecd08 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -45,6 +45,14 @@
 };
 
 /**
+ * sde_encoder_kickoff_params - info encoder requires at kickoff
+ * @inline_rotate_prefill: number of lines to prefill for inline rotation
+ */
+struct sde_encoder_kickoff_params {
+	u32 inline_rotate_prefill;
+};
+
+/**
  * sde_encoder_get_hw_resources - Populate table of required hardware resources
  * @encoder:	encoder pointer
  * @hw_res:	resource table to populate with encoder required resources
@@ -89,8 +97,10 @@
  *	Immediately: if no previous commit is outstanding.
  *	Delayed: Block until next trigger can be issued.
  * @encoder:	encoder pointer
+ * @params:	kickoff time parameters
  */
-void sde_encoder_prepare_for_kickoff(struct drm_encoder *encoder);
+void sde_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
+		struct sde_encoder_kickoff_params *params);
 
 /**
  * sde_encoder_kickoff - trigger a double buffer flip of the ctl path
@@ -116,6 +126,24 @@
 enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder);
 
 /**
+ * enum sde_encoder_property - property tags for sde enoder
+ * @SDE_ENCODER_PROPERTY_INLINE_ROTATE_REFILL: # of prefill line, 0 to disable
+ */
+enum sde_encoder_property {
+	SDE_ENCODER_PROPERTY_INLINE_ROTATE_PREFILL,
+	SDE_ENCODER_PROPERTY_MAX,
+};
+
+/*
+ * sde_encoder_set_property - set the property tag to the given value
+ * @encoder: Pointer to drm encoder object
+ * @tag: property tag
+ * @val: property value
+ * return: 0 if success; errror code otherwise
+ */
+int sde_encoder_set_property(struct drm_encoder *encoder, u32 tag, u64 val);
+
+/**
  * sde_encoder_init - initialize virtual encoder object
  * @dev:        Pointer to drm device structure
  * @disp_info:  Pointer to display information structure
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 6d50c53..da155b0 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -16,6 +16,7 @@
 #define __SDE_ENCODER_PHYS_H__
 
 #include <linux/jiffies.h>
+#include <linux/sde_rsc.h>
 
 #include "sde_kms.h"
 #include "sde_hw_intf.h"
@@ -27,8 +28,6 @@
 #include "sde_encoder.h"
 #include "sde_connector.h"
 
-#include "sde_rsc.h"
-
 #define SDE_ENCODER_NAME_MAX	16
 
 /* wait for at most 2 vsync for lowest refresh rate (24hz) */
@@ -141,15 +140,15 @@
 			struct drm_connector_state *conn_state);
 	int (*control_vblank_irq)(struct sde_encoder_phys *enc, bool enable);
 	int (*wait_for_commit_done)(struct sde_encoder_phys *phys_enc);
-	void (*prepare_for_kickoff)(struct sde_encoder_phys *phys_enc);
+	void (*prepare_for_kickoff)(struct sde_encoder_phys *phys_enc,
+			struct sde_encoder_kickoff_params *params);
 	void (*handle_post_kickoff)(struct sde_encoder_phys *phys_enc);
 	void (*trigger_start)(struct sde_encoder_phys *phys_enc);
 	bool (*needs_single_flush)(struct sde_encoder_phys *phys_enc);
 
 	void (*setup_misr)(struct sde_encoder_phys *phys_encs,
-			struct sde_misr_params *misr_map);
-	void (*collect_misr)(struct sde_encoder_phys *phys_enc,
-			struct sde_misr_params *misr_map);
+				bool enable, u32 frame_count);
+	u32 (*collect_misr)(struct sde_encoder_phys *phys_enc);
 	void (*hw_reset)(struct sde_encoder_phys *phys_enc);
 };
 
@@ -183,7 +182,6 @@
  * @hw_pp:		Hardware interface to the ping pong registers
  * @sde_kms:		Pointer to the sde_kms top level
  * @cached_mode:	DRM mode cached at mode_set time, acted on in enable
- * @misr_map:		Interface for setting and collecting MISR data
  * @enabled:		Whether the encoder has enabled and running a mode
  * @split_role:		Role to play in a split-panel configuration
  * @intf_mode:		Interface mode
@@ -212,7 +210,6 @@
 	struct sde_hw_pingpong *hw_pp;
 	struct sde_kms *sde_kms;
 	struct drm_display_mode cached_mode;
-	struct sde_misr_params *misr_map;
 	enum sde_enc_split_role split_role;
 	enum sde_intf_mode intf_mode;
 	enum sde_intf intf_idx;
@@ -238,12 +235,16 @@
  * @irq_idx:	IRQ interface lookup index
  * @irq_cb:	interrupt callback
  * @hw_intf:	Hardware interface to the intf registers
+ * @timing_params: Current timing parameter
+ * @rot_prefill_line: number of line to prefill for inline rotation; 0 disable
  */
 struct sde_encoder_phys_vid {
 	struct sde_encoder_phys base;
 	int irq_idx[INTR_IDX_MAX];
 	struct sde_irq_callback irq_cb[INTR_IDX_MAX];
 	struct sde_hw_intf *hw_intf;
+	struct intf_timing_params timing_params;
+	u64 rot_prefill_line;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 34bf2d2..86e292f 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -182,7 +182,7 @@
 				atomic_read(&phys_enc->pending_kickoff_cnt));
 
 		SDE_DBG_DUMP("sde", "dsi0_ctrl", "dsi0_phy", "dsi1_ctrl",
-				"dsi1_phy", "vbif", "vbif_nrt", "dbg_bus",
+				"dsi1_phy", "vbif", "dbg_bus",
 				"vbif_dbg_bus", "panic");
 	}
 
@@ -653,7 +653,8 @@
 }
 
 static void sde_encoder_phys_cmd_prepare_for_kickoff(
-		struct sde_encoder_phys *phys_enc)
+		struct sde_encoder_phys *phys_enc,
+		struct sde_encoder_kickoff_params *params)
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
 			to_sde_encoder_phys_cmd(phys_enc);
@@ -687,7 +688,7 @@
 			to_sde_encoder_phys_cmd(phys_enc);
 
 	if (cmd_enc->serialize_wait4pp)
-		sde_encoder_phys_cmd_prepare_for_kickoff(phys_enc);
+		sde_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL);
 
 	/*
 	 * following statement is true serialize_wait4pp is false.
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index 01dd982..29f00f7 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -211,6 +211,54 @@
 	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
 }
 
+/*
+ * programmable_rot_fetch_config: Programs ROT to prefetch lines by offsetting
+ *	the start of fetch into the vertical front porch for cases where the
+ *	vsync pulse width and vertical back porch time is insufficient
+ *
+ *	Gets # of lines to pre-fetch, then calculate VSYNC counter value.
+ *	HW layer requires VSYNC counter of first pixel of tgt VFP line.
+ * @phys_enc: Pointer to physical encoder
+ * @rot_fetch_lines: number of line to prefill, or 0 to disable
+ */
+static void programmable_rot_fetch_config(struct sde_encoder_phys *phys_enc,
+		u64 rot_fetch_lines)
+{
+	struct sde_encoder_phys_vid *vid_enc =
+		to_sde_encoder_phys_vid(phys_enc);
+	struct intf_prog_fetch f = { 0 };
+	struct intf_timing_params *timing = &vid_enc->timing_params;
+	u32 vfp_fetch_lines = 0;
+	u32 horiz_total = 0;
+	u32 vert_total = 0;
+	u32 rot_fetch_start_vsync_counter = 0;
+	unsigned long lock_flags;
+
+	if (WARN_ON_ONCE(!vid_enc->hw_intf->ops.setup_rot_start))
+		return;
+
+	vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
+	if (vfp_fetch_lines && rot_fetch_lines) {
+		vert_total = get_vertical_total(timing);
+		horiz_total = get_horizontal_total(timing);
+		if (vert_total >= (vfp_fetch_lines + rot_fetch_lines)) {
+			rot_fetch_start_vsync_counter =
+			    (vert_total - vfp_fetch_lines - rot_fetch_lines) *
+			    horiz_total + 1;
+			f.enable = 1;
+			f.fetch_start = rot_fetch_start_vsync_counter;
+		}
+	}
+
+	SDE_DEBUG_VIDENC(vid_enc,
+		"rot_fetch_lines %llu rot_fetch_start_vsync_counter %u\n",
+		rot_fetch_lines, rot_fetch_start_vsync_counter);
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	vid_enc->hw_intf->ops.setup_rot_start(vid_enc->hw_intf, &f);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+}
+
 static bool sde_encoder_phys_vid_mode_fixup(
 		struct sde_encoder_phys *phys_enc,
 		const struct drm_display_mode *mode,
@@ -281,29 +329,48 @@
 	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
 
 	programmable_fetch_config(phys_enc, &timing_params);
+
+	vid_enc->timing_params = timing_params;
 }
 
 static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
 {
 	struct sde_encoder_phys_vid *vid_enc = arg;
 	struct sde_encoder_phys *phys_enc;
+	struct sde_hw_ctl *hw_ctl;
 	unsigned long lock_flags;
-	int new_cnt;
+	u32 flush_register = 0;
+	int new_cnt = -1, old_cnt = -1;
 
 	if (!vid_enc)
 		return;
 
 	phys_enc = &vid_enc->base;
+	hw_ctl = phys_enc->hw_ctl;
+
 	if (phys_enc->parent_ops.handle_vblank_virt)
 		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
 				phys_enc);
 
+	old_cnt  = atomic_read(&phys_enc->pending_kickoff_cnt);
+
+	/*
+	 * only decrement the pending flush count if we've actually flushed
+	 * hardware. due to sw irq latency, vblank may have already happened
+	 * so we need to double-check with hw that it accepted the flush bits
+	 */
 	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
-	new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
-	SDE_EVT32_IRQ(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0,
-			new_cnt);
+	if (hw_ctl && hw_ctl->ops.get_flush_register)
+		flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
+
+	if (flush_register == 0)
+		new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
+				-1, 0);
 	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
 
+	SDE_EVT32_IRQ(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0,
+			old_cnt, new_cnt, flush_register);
+
 	/* Signal any waiting atomic commit thread */
 	wake_up_all(&phys_enc->pending_kickoff_wq);
 }
@@ -655,14 +722,15 @@
 }
 
 static void sde_encoder_phys_vid_prepare_for_kickoff(
-		struct sde_encoder_phys *phys_enc)
+		struct sde_encoder_phys *phys_enc,
+		struct sde_encoder_kickoff_params *params)
 {
 	struct sde_encoder_phys_vid *vid_enc;
 	struct sde_hw_ctl *ctl;
 	int rc;
 
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
+	if (!phys_enc || !params) {
+		SDE_ERROR("invalid encoder/parameters\n");
 		return;
 	}
 	vid_enc = to_sde_encoder_phys_vid(phys_enc);
@@ -681,6 +749,8 @@
 				ctl->idx, rc);
 		SDE_DBG_DUMP("panic");
 	}
+
+	programmable_rot_fetch_config(phys_enc, params->inline_rotate_prefill);
 }
 
 static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc)
@@ -777,23 +847,29 @@
 }
 
 static void sde_encoder_phys_vid_setup_misr(struct sde_encoder_phys *phys_enc,
-			struct sde_misr_params *misr_map)
+						bool enable, u32 frame_count)
 {
-	struct sde_encoder_phys_vid *vid_enc =
-		to_sde_encoder_phys_vid(phys_enc);
+	struct sde_encoder_phys_vid *vid_enc;
 
-	if (vid_enc && vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr)
-		vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf, misr_map);
+	if (!phys_enc)
+		return;
+	vid_enc = to_sde_encoder_phys_vid(phys_enc);
+
+	if (vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr)
+		vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf,
+							enable, frame_count);
 }
 
-static void sde_encoder_phys_vid_collect_misr(struct sde_encoder_phys *phys_enc,
-			struct sde_misr_params *misr_map)
+static u32 sde_encoder_phys_vid_collect_misr(struct sde_encoder_phys *phys_enc)
 {
-	struct sde_encoder_phys_vid *vid_enc =
-			to_sde_encoder_phys_vid(phys_enc);
+	struct sde_encoder_phys_vid *vid_enc;
 
-	if (vid_enc && vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr)
-		vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf, misr_map);
+	if (!phys_enc)
+		return 0;
+	vid_enc = to_sde_encoder_phys_vid(phys_enc);
+
+	return vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr ?
+		vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf) : 0;
 }
 
 static void sde_encoder_phys_vid_init_ops(struct sde_encoder_phys_ops *ops)
@@ -866,13 +942,6 @@
 		goto fail;
 	}
 
-	phys_enc->misr_map = kzalloc(sizeof(struct sde_misr_params),
-						GFP_KERNEL);
-	if (!phys_enc->misr_map) {
-		ret = -ENOMEM;
-		goto fail;
-	}
-
 	SDE_DEBUG_VIDENC(vid_enc, "\n");
 
 	sde_encoder_phys_vid_init_ops(&phys_enc->ops);
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index 5187627..28a2b16 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -777,9 +777,11 @@
 /**
  * sde_encoder_phys_wb_prepare_for_kickoff - pre-kickoff processing
  * @phys_enc:	Pointer to physical encoder
+ * @params:	kickoff parameters
  */
 static void sde_encoder_phys_wb_prepare_for_kickoff(
-		struct sde_encoder_phys *phys_enc)
+		struct sde_encoder_phys *phys_enc,
+		struct sde_encoder_kickoff_params *params)
 {
 	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
 	int ret;
@@ -992,7 +994,7 @@
 		goto exit;
 
 	phys_enc->enable_state = SDE_ENC_DISABLING;
-	sde_encoder_phys_wb_prepare_for_kickoff(phys_enc);
+	sde_encoder_phys_wb_prepare_for_kickoff(phys_enc, NULL);
 	if (phys_enc->hw_ctl->ops.trigger_flush)
 		phys_enc->hw_ctl->ops.trigger_flush(phys_enc->hw_ctl);
 	sde_encoder_helper_trigger_start(phys_enc);
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index acfcb5e..01d0d20 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -10,6 +10,8 @@
  * GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
 #include <uapi/drm/drm_fourcc.h>
 #include <uapi/media/msm_media_info.h>
 
@@ -132,217 +134,217 @@
 static const struct sde_format sde_format_map[] = {
 	INTERLEAVED_RGB_FMT(ARGB8888,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
 		true, 4, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(ABGR8888,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
 		true, 4, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(XBGR8888,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
 		true, 4, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(RGBA8888,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
 		true, 4, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(BGRA8888,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
 		true, 4, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(BGRX8888,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
 		false, 4, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(XRGB8888,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
 		false, 4, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(RGBX8888,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
 		false, 4, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(RGB888,
 		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
 		false, 3, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(BGR888,
 		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
 		false, 3, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(RGB565,
 		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
 		false, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(BGR565,
 		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
 		false, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(ARGB1555,
 		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
 		true, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(ABGR1555,
 		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
 		true, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(RGBA5551,
 		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
 		true, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(BGRA5551,
 		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
 		true, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(XRGB1555,
 		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
 		false, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(XBGR1555,
 		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
 		false, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(RGBX5551,
 		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
 		false, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(BGRX5551,
 		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
 		false, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(ARGB4444,
 		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
 		true, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(ABGR4444,
 		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
 		true, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(RGBA4444,
 		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
 		true, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(BGRA4444,
 		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
 		true, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(XRGB4444,
 		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
 		false, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(XBGR4444,
 		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
 		false, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(RGBX4444,
 		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
 		false, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(BGRX4444,
 		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
 		false, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(BGRA1010102,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
 		true, 4, SDE_FORMAT_FLAG_DX,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(RGBA1010102,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
 		true, 4, SDE_FORMAT_FLAG_DX,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(ABGR2101010,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
 		true, 4, SDE_FORMAT_FLAG_DX,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(ARGB2101010,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
 		true, 4, SDE_FORMAT_FLAG_DX,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(XRGB2101010,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
 		false, 4, SDE_FORMAT_FLAG_DX,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(BGRX1010102,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
 		false, 4, SDE_FORMAT_FLAG_DX,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(XBGR2101010,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
 		false, 4, SDE_FORMAT_FLAG_DX,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(RGBX1010102,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
 		false, 4, SDE_FORMAT_FLAG_DX,
 		SDE_FETCH_LINEAR, 1),
 
@@ -408,46 +410,123 @@
 };
 
 /*
+ * A5x tile formats tables:
+ * These tables hold the A5x tile formats supported.
+ */
+static const struct sde_format sde_format_map_tile[] = {
+	INTERLEAVED_RGB_FMT(ARGB8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		true, 4, 0,
+		SDE_FETCH_UBWC, 1),
+
+	INTERLEAVED_RGB_FMT(ABGR8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		true, 4, 0,
+		SDE_FETCH_UBWC, 1),
+
+	INTERLEAVED_RGB_FMT(RGBA8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, 0,
+		SDE_FETCH_UBWC, 1),
+
+	INTERLEAVED_RGB_FMT(BGRA8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		true, 4, 0,
+		SDE_FETCH_UBWC, 1),
+
+	INTERLEAVED_RGB_FMT(BGRX8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		false, 4, 0,
+		SDE_FETCH_UBWC, 1),
+
+	INTERLEAVED_RGB_FMT(XRGB8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		false, 4, 0,
+		SDE_FETCH_UBWC, 1),
+
+	INTERLEAVED_RGB_FMT(RGBX8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		false, 4, 0,
+		SDE_FETCH_UBWC, 1),
+
+	PSEUDO_YUV_FMT(NV12,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr,
+		SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
+		SDE_FETCH_UBWC, 2),
+
+	PSEUDO_YUV_FMT(NV21,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C1_B_Cb,
+		SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
+		SDE_FETCH_UBWC, 2),
+};
+
+static const struct sde_format sde_format_map_p010_tile[] = {
+	PSEUDO_YUV_FMT_LOOSE(NV12,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr,
+		SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX),
+		SDE_FETCH_UBWC, 2),
+};
+
+static const struct sde_format sde_format_map_tp10_tile[] = {
+	PSEUDO_YUV_FMT(NV12,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr,
+		SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX),
+		SDE_FETCH_UBWC, 2),
+};
+
+/*
  * UBWC formats table:
  * This table holds the UBWC formats supported.
  * If a compression ratio needs to be used for this or any other format,
  * the data will be passed by user-space.
  */
 static const struct sde_format sde_format_map_ubwc[] = {
-	INTERLEAVED_RGB_FMT(RGB565,
+	INTERLEAVED_RGB_FMT(BGR565,
 		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
 		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
-		false, 2, 0,
+		false, 2, SDE_FORMAT_FLAG_COMPRESSED,
 		SDE_FETCH_UBWC, 2),
 
-	INTERLEAVED_RGB_FMT(RGBA8888,
+	INTERLEAVED_RGB_FMT(ABGR8888,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
 		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-		true, 4, 0,
+		true, 4, SDE_FORMAT_FLAG_COMPRESSED,
 		SDE_FETCH_UBWC, 2),
 
-	INTERLEAVED_RGB_FMT(RGBX8888,
+	INTERLEAVED_RGB_FMT(XBGR8888,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
 		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-		false, 4, 0,
+		false, 4, SDE_FORMAT_FLAG_COMPRESSED,
 		SDE_FETCH_UBWC, 2),
 
-	INTERLEAVED_RGB_FMT(RGBA1010102,
+	INTERLEAVED_RGB_FMT(ABGR2101010,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
 		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-		true, 4, SDE_FORMAT_FLAG_DX,
+		true, 4, SDE_FORMAT_FLAG_DX | SDE_FORMAT_FLAG_COMPRESSED,
 		SDE_FETCH_UBWC, 2),
 
-	INTERLEAVED_RGB_FMT(RGBX1010102,
+	INTERLEAVED_RGB_FMT(XBGR2101010,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
 		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-		true, 4, SDE_FORMAT_FLAG_DX,
+		true, 4, SDE_FORMAT_FLAG_DX | SDE_FORMAT_FLAG_COMPRESSED,
 		SDE_FETCH_UBWC, 2),
 
 	PSEUDO_YUV_FMT(NV12,
 		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
 		C1_B_Cb, C2_R_Cr,
-		SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
+		SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV |
+				SDE_FORMAT_FLAG_COMPRESSED,
 		SDE_FETCH_UBWC, 4),
 };
 
@@ -463,7 +542,8 @@
 	PSEUDO_YUV_FMT_LOOSE(NV12,
 		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
 		C1_B_Cb, C2_R_Cr,
-		SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX),
+		SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX |
+				SDE_FORMAT_FLAG_COMPRESSED),
 		SDE_FETCH_UBWC, 4),
 };
 
@@ -471,7 +551,8 @@
 	PSEUDO_YUV_FMT(NV12,
 		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
 		C1_B_Cb, C2_R_Cr,
-		SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX),
+		SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX |
+				SDE_FORMAT_FLAG_COMPRESSED),
 		SDE_FETCH_UBWC, 4),
 };
 
@@ -509,11 +590,11 @@
 static int _sde_format_get_media_color_ubwc(const struct sde_format *fmt)
 {
 	static const struct sde_media_color_map sde_media_ubwc_map[] = {
-		{DRM_FORMAT_RGBA8888, COLOR_FMT_RGBA8888_UBWC},
-		{DRM_FORMAT_RGBX8888, COLOR_FMT_RGBA8888_UBWC},
-		{DRM_FORMAT_RGBA1010102, COLOR_FMT_RGBA1010102_UBWC},
-		{DRM_FORMAT_RGBX1010102, COLOR_FMT_RGBA1010102_UBWC},
-		{DRM_FORMAT_RGB565, COLOR_FMT_RGB565_UBWC},
+		{DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC},
+		{DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC},
+		{DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+		{DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+		{DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC},
 	};
 	int color_fmt = -1;
 	int i;
@@ -545,6 +626,7 @@
 {
 	int i;
 	int color;
+	bool meta = SDE_FORMAT_IS_UBWC(fmt);
 
 	memset(layout, 0, sizeof(struct sde_hw_fmt_layout));
 	layout->format = fmt;
@@ -564,7 +646,7 @@
 		uint32_t y_meta_scanlines = 0;
 		uint32_t uv_meta_scanlines = 0;
 
-		layout->num_planes = 4;
+		layout->num_planes = 2;
 		layout->plane_pitch[0] = VENUS_Y_STRIDE(color, width);
 		y_sclines = VENUS_Y_SCANLINES(color, height);
 		layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
@@ -575,6 +657,10 @@
 		layout->plane_size[1] = MSM_MEDIA_ALIGN(layout->plane_pitch[1] *
 			uv_sclines, SDE_UBWC_PLANE_SIZE_ALIGNMENT);
 
+		if (!meta)
+			goto done;
+
+		layout->num_planes += 2;
 		layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, width);
 		y_meta_scanlines = VENUS_Y_META_SCANLINES(color, height);
 		layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
@@ -588,19 +674,23 @@
 	} else {
 		uint32_t rgb_scanlines, rgb_meta_scanlines;
 
-		layout->num_planes = 3;
+		layout->num_planes = 1;
 
 		layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, width);
 		rgb_scanlines = VENUS_RGB_SCANLINES(color, height);
 		layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
 			rgb_scanlines, SDE_UBWC_PLANE_SIZE_ALIGNMENT);
 
+		if (!meta)
+			goto done;
+		layout->num_planes += 2;
 		layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, width);
 		rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, height);
 		layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
 			rgb_meta_scanlines, SDE_UBWC_PLANE_SIZE_ALIGNMENT);
 	}
 
+done:
 	for (i = 0; i < SDE_MAX_PLANES; i++)
 		layout->total_size += layout->plane_size[i];
 
@@ -667,7 +757,7 @@
 	return 0;
 }
 
-static int _sde_format_get_plane_sizes(
+int sde_format_get_plane_sizes(
 		const struct sde_format *fmt,
 		const uint32_t w,
 		const uint32_t h,
@@ -683,12 +773,30 @@
 		return -ERANGE;
 	}
 
-	if (SDE_FORMAT_IS_UBWC(fmt))
+	if (SDE_FORMAT_IS_UBWC(fmt) || SDE_FORMAT_IS_TILE(fmt))
 		return _sde_format_get_plane_sizes_ubwc(fmt, w, h, layout);
 
 	return _sde_format_get_plane_sizes_linear(fmt, w, h, layout);
 }
 
+int sde_format_get_block_size(const struct sde_format *fmt,
+		uint32_t *w, uint32_t *h)
+{
+	if (!fmt || !w || !h) {
+		DRM_ERROR("invalid pointer\n");
+		return -EINVAL;
+	}
+
+	/* TP10 is 96x96 and all others are 128x128 */
+	if (SDE_FORMAT_IS_YUV(fmt) && SDE_FORMAT_IS_DX(fmt) &&
+			(fmt->num_planes == 2) && fmt->unpack_tight)
+		*w = *h = 96;
+	else
+		*w = *h = 128;
+
+	return 0;
+}
+
 uint32_t sde_format_get_framebuffer_size(
 		const uint32_t format,
 		const uint32_t width,
@@ -703,7 +811,7 @@
 	if (!fmt)
 		return 0;
 
-	if (_sde_format_get_plane_sizes(fmt, width, height, &layout))
+	if (sde_format_get_plane_sizes(fmt, width, height, &layout))
 		layout.total_size = 0;
 
 	return layout.total_size;
@@ -715,6 +823,7 @@
 		struct sde_hw_fmt_layout *layout)
 {
 	uint32_t base_addr;
+	bool meta;
 
 	if (!fb || !layout) {
 		DRM_ERROR("invalid pointers\n");
@@ -727,6 +836,8 @@
 		return -EFAULT;
 	}
 
+	meta = SDE_FORMAT_IS_UBWC(layout->format);
+
 	/* Per-format logic for verifying active planes */
 	if (SDE_FORMAT_IS_YUV(layout->format)) {
 		/************************************************/
@@ -756,6 +867,9 @@
 		layout->plane_addr[1] = base_addr + layout->plane_size[0]
 			+ layout->plane_size[2] + layout->plane_size[3];
 
+		if (!meta)
+			goto done;
+
 		/* configure Y metadata plane */
 		layout->plane_addr[2] = base_addr;
 
@@ -783,10 +897,14 @@
 
 		layout->plane_addr[0] = base_addr + layout->plane_size[2];
 		layout->plane_addr[1] = 0;
+
+		if (!meta)
+			goto done;
+
 		layout->plane_addr[2] = base_addr;
 		layout->plane_addr[3] = 0;
 	}
-
+done:
 	return 0;
 }
 
@@ -840,7 +958,7 @@
 	layout->format = to_sde_format(msm_framebuffer_format(fb));
 
 	/* Populate the plane sizes etc via get_format */
-	ret = _sde_format_get_plane_sizes(layout->format, fb->width, fb->height,
+	ret = sde_format_get_plane_sizes(layout->format, fb->width, fb->height,
 			layout);
 	if (ret)
 		return ret;
@@ -849,7 +967,8 @@
 		plane_addr[i] = layout->plane_addr[i];
 
 	/* Populate the addresses given the fb */
-	if (SDE_FORMAT_IS_UBWC(layout->format))
+	if (SDE_FORMAT_IS_UBWC(layout->format) ||
+			SDE_FORMAT_IS_TILE(layout->format))
 		ret = _sde_format_populate_addrs_ubwc(mmu_id, fb, layout);
 	else
 		ret = _sde_format_populate_addrs_linear(mmu_id, fb, layout);
@@ -943,7 +1062,7 @@
 	fmt = to_sde_format(msm_fmt);
 	num_base_fmt_planes = drm_format_num_planes(fmt->base.pixel_format);
 
-	ret = _sde_format_get_plane_sizes(fmt, cmd->width, cmd->height,
+	ret = sde_format_get_plane_sizes(fmt, cmd->width, cmd->height,
 			&layout);
 	if (ret)
 		return ret;
@@ -981,14 +1100,14 @@
 	 * All planes used must specify the same modifier.
 	 */
 	if (modifiers_len && !modifiers) {
-		DRM_ERROR("invalid modifiers array\n");
+		SDE_ERROR("invalid modifiers array\n");
 		return NULL;
 	} else if (modifiers && modifiers_len && modifiers[0]) {
 		mod0 = modifiers[0];
-		DBG("plane format modifier 0x%llX", mod0);
+		SDE_DEBUG("plane format modifier 0x%llX\n", mod0);
 		for (i = 1; i < modifiers_len; i++) {
 			if (modifiers[i] != mod0) {
-				DRM_ERROR("bad fmt mod 0x%llX on plane %d\n",
+				SDE_ERROR("bad fmt mod 0x%llX on plane %d\n",
 					modifiers[i], i);
 				return NULL;
 			}
@@ -1001,29 +1120,55 @@
 		map_size = ARRAY_SIZE(sde_format_map);
 		break;
 	case DRM_FORMAT_MOD_QCOM_COMPRESSED:
+	case DRM_FORMAT_MOD_QCOM_COMPRESSED | DRM_FORMAT_MOD_QCOM_TILE:
 		map = sde_format_map_ubwc;
 		map_size = ARRAY_SIZE(sde_format_map_ubwc);
-		DBG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED", format);
+		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
+				format);
 		break;
 	case DRM_FORMAT_MOD_QCOM_DX:
 		map = sde_format_map_p010;
 		map_size = ARRAY_SIZE(sde_format_map_p010);
-		DBG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_DX", format);
+		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_DX\n", format);
 		break;
 	case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED):
+	case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED |
+			DRM_FORMAT_MOD_QCOM_TILE):
 		map = sde_format_map_p010_ubwc;
 		map_size = ARRAY_SIZE(sde_format_map_p010_ubwc);
-		DBG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED/DX", format);
+		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED/DX\n",
+				format);
 		break;
 	case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED |
 		DRM_FORMAT_MOD_QCOM_TIGHT):
+	case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED |
+		DRM_FORMAT_MOD_QCOM_TIGHT | DRM_FORMAT_MOD_QCOM_TILE):
 		map = sde_format_map_tp10_ubwc;
 		map_size = ARRAY_SIZE(sde_format_map_tp10_ubwc);
-		DBG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED/DX/TIGHT",
+		SDE_DEBUG(
+			"found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED/DX/TIGHT\n",
 			format);
 		break;
+	case DRM_FORMAT_MOD_QCOM_TILE:
+		map = sde_format_map_tile;
+		map_size = ARRAY_SIZE(sde_format_map_tile);
+		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_TILE\n", format);
+		break;
+	case (DRM_FORMAT_MOD_QCOM_TILE | DRM_FORMAT_MOD_QCOM_DX):
+		map = sde_format_map_p010_tile;
+		map_size = ARRAY_SIZE(sde_format_map_p010_tile);
+		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_TILE/DX\n",
+				format);
+		break;
+	case (DRM_FORMAT_MOD_QCOM_TILE | DRM_FORMAT_MOD_QCOM_DX |
+			DRM_FORMAT_MOD_QCOM_TIGHT):
+		map = sde_format_map_tp10_tile;
+		map_size = ARRAY_SIZE(sde_format_map_tp10_tile);
+		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_TILE/DX/TIGHT\n",
+				format);
+		break;
 	default:
-		DRM_ERROR("unsupported format modifier %llX\n", mod0);
+		SDE_ERROR("unsupported format modifier %llX\n", mod0);
 		return NULL;
 	}
 
@@ -1035,10 +1180,10 @@
 	}
 
 	if (fmt == NULL)
-		DRM_ERROR("unsupported fmt 0x%X modifier 0x%llX\n",
+		SDE_ERROR("unsupported fmt 0x%X modifier 0x%llX\n",
 				format, mod0);
 	else
-		DBG("fmt %s mod 0x%llX ubwc %d yuv %d",
+		SDE_DEBUG("fmt %s mod 0x%llX ubwc %d yuv %d\n",
 				drm_get_format_name(format), mod0,
 				SDE_FORMAT_IS_UBWC(fmt),
 				SDE_FORMAT_IS_YUV(fmt));
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.h b/drivers/gpu/drm/msm/sde/sde_formats.h
index 894dee9..40aab22 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.h
+++ b/drivers/gpu/drm/msm/sde/sde_formats.h
@@ -58,6 +58,33 @@
 		uint32_t pixel_formats_max);
 
 /**
+ * sde_format_get_plane_sizes - calculate size and layout of given buffer format
+ * @fmt:             pointer to sde_format
+ * @w:               width of the buffer
+ * @h:               height of the buffer
+ * @layout:          layout of the buffer
+ *
+ * Return: size of the buffer
+ */
+int sde_format_get_plane_sizes(
+		const struct sde_format *fmt,
+		const uint32_t w,
+		const uint32_t h,
+		struct sde_hw_fmt_layout *layout);
+
+/**
+ * sde_format_get_block_size - get block size of given format when
+ *	operating in block mode
+ * @fmt:             pointer to sde_format
+ * @w:               pointer to width of the block
+ * @h:               pointer to height of the block
+ *
+ * Return: 0 if success; error oode otherwise
+ */
+int sde_format_get_block_size(const struct sde_format *fmt,
+		uint32_t *w, uint32_t *h);
+
+/**
  * sde_format_check_modified_format - validate format and buffers for
  *                   sde non-standard, i.e. modified format
  * @kms:             kms driver
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
new file mode 100644
index 0000000..7d2f67d
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
@@ -0,0 +1,900 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <drm/msm_drm_pp.h>
+#include "sde_hw_catalog.h"
+#include "sde_hw_util.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_lm.h"
+#include "sde_ad4.h"
+
+#define IDLE_2_RUN(x) ((x) == (ad4_init | ad4_cfg | ad4_mode | ad4_input))
+#define MERGE_WIDTH_RIGHT 4
+#define MERGE_WIDTH_LEFT 3
+
+enum ad4_ops_bitmask {
+	ad4_init = BIT(AD_INIT),
+	ad4_cfg = BIT(AD_CFG),
+	ad4_mode = BIT(AD_MODE),
+	ad4_input = BIT(AD_INPUT),
+	ad4_ops_max = BIT(31),
+};
+
+enum ad4_state {
+	ad4_state_idle,
+	ad4_state_run,
+	ad4_state_max,
+};
+
+typedef int (*ad4_prop_setup)(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *ad);
+
+static int ad4_mode_setup_common(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_init_setup_idle(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_cfg_setup_idle(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_input_setup_idle(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_mode_setup(struct sde_hw_dspp *dspp, enum ad4_modes mode);
+static int ad4_init_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg);
+static int ad4_cfg_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg);
+static int ad4_input_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_suspend_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_params_check(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_assertive_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_backlight_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+
+static ad4_prop_setup prop_set_func[ad4_state_max][AD_PROPMAX] = {
+	[ad4_state_idle][AD_MODE] = ad4_mode_setup_common,
+	[ad4_state_idle][AD_INIT] = ad4_init_setup_idle,
+	[ad4_state_idle][AD_CFG] = ad4_cfg_setup_idle,
+	[ad4_state_idle][AD_INPUT] = ad4_input_setup_idle,
+	[ad4_state_idle][AD_SUSPEND] = ad4_suspend_setup,
+	[ad4_state_idle][AD_ASSERTIVE] = ad4_assertive_setup,
+	[ad4_state_idle][AD_BACKLIGHT] = ad4_backlight_setup,
+	[ad4_state_run][AD_MODE] = ad4_mode_setup_common,
+	[ad4_state_run][AD_INIT] = ad4_init_setup,
+	[ad4_state_run][AD_CFG] = ad4_cfg_setup,
+	[ad4_state_run][AD_INPUT] = ad4_input_setup,
+	[ad4_state_run][AD_SUSPEND] = ad4_suspend_setup,
+	[ad4_state_run][AD_ASSERTIVE] = ad4_assertive_setup,
+	[ad4_state_run][AD_BACKLIGHT] = ad4_backlight_setup,
+};
+
+struct ad4_info {
+	enum ad4_state state;
+	u32 completed_ops_mask;
+	bool ad4_support;
+	enum ad4_modes cached_mode;
+	u32 cached_als;
+};
+
+static struct ad4_info info[DSPP_MAX] = {
+	[DSPP_0] = {ad4_state_idle, 0, true, AD4_OFF},
+	[DSPP_1] = {ad4_state_idle, 0, true, AD4_OFF},
+	[DSPP_2] = {ad4_state_max, 0, false, AD4_OFF},
+	[DSPP_3] = {ad4_state_max, 0, false, AD4_OFF},
+};
+
+void sde_setup_dspp_ad4(struct sde_hw_dspp *dspp, void *ad_cfg)
+{
+	int ret = 0;
+	struct sde_ad_hw_cfg *cfg = ad_cfg;
+
+	ret = ad4_params_check(dspp, ad_cfg);
+	if (ret)
+		return;
+
+	ret = prop_set_func[info[dspp->idx].state][cfg->prop](dspp, ad_cfg);
+	if (ret)
+		DRM_ERROR("op failed %d ret %d\n", cfg->prop, ret);
+}
+
+int sde_validate_dspp_ad4(struct sde_hw_dspp *dspp, u32 *prop)
+{
+
+	if (!dspp || !prop) {
+		DRM_ERROR("invalid params dspp %pK prop %pK\n", dspp, prop);
+		return -EINVAL;
+	}
+
+	if (*prop >= AD_PROPMAX) {
+		DRM_ERROR("invalid prop set %d\n", *prop);
+		return -EINVAL;
+	}
+
+	if (dspp->idx > DSPP_MAX || !info[dspp->idx].ad4_support) {
+		DRM_ERROR("ad4 not supported for dspp idx %d\n", dspp->idx);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ad4_params_check(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	struct sde_hw_mixer *hw_lm;
+
+	if (!dspp || !cfg || !cfg->hw_cfg) {
+		DRM_ERROR("invalid dspp %pK cfg %pk hw_cfg %pK\n",
+			dspp, cfg, ((cfg) ? (cfg->hw_cfg) : NULL));
+		return -EINVAL;
+	}
+
+	if (!cfg->hw_cfg->mixer_info) {
+		DRM_ERROR("invalid mixed info\n");
+		return -EINVAL;
+	}
+
+	if (dspp->idx > DSPP_MAX || !info[dspp->idx].ad4_support) {
+		DRM_ERROR("ad4 not supported for dspp idx %d\n", dspp->idx);
+		return -EINVAL;
+	}
+
+	if (cfg->prop >= AD_PROPMAX) {
+		DRM_ERROR("invalid prop set %d\n", cfg->prop);
+		return -EINVAL;
+	}
+
+	if (info[dspp->idx].state >= ad4_state_max) {
+		DRM_ERROR("in max state for dspp idx %d\n", dspp->idx);
+		return -EINVAL;
+	}
+
+	if (!prop_set_func[info[dspp->idx].state][cfg->prop]) {
+		DRM_ERROR("prop set not implemented for state %d prop %d\n",
+				info[dspp->idx].state, cfg->prop);
+		return -EINVAL;
+	}
+
+	if (!cfg->hw_cfg->num_of_mixers ||
+	    cfg->hw_cfg->num_of_mixers > CRTC_DUAL_MIXERS) {
+		DRM_ERROR("invalid mixer cnt %d\n",
+				cfg->hw_cfg->num_of_mixers);
+		return -EINVAL;
+	}
+	hw_lm = cfg->hw_cfg->mixer_info;
+
+	if (cfg->hw_cfg->num_of_mixers == 1 &&
+	    hw_lm->cfg.out_height != cfg->hw_cfg->displayv &&
+	    hw_lm->cfg.out_width != cfg->hw_cfg->displayh) {
+		DRM_ERROR("single_lm lmh %d lmw %d displayh %d displayw %d\n",
+			hw_lm->cfg.out_height, hw_lm->cfg.out_width,
+			cfg->hw_cfg->displayh, cfg->hw_cfg->displayv);
+		return -EINVAL;
+	} else if (hw_lm->cfg.out_height != cfg->hw_cfg->displayv &&
+		    hw_lm->cfg.out_width != (cfg->hw_cfg->displayh >> 2)) {
+		DRM_ERROR("dual_lm lmh %d lmw %d displayh %d displayw %d\n",
+			hw_lm->cfg.out_height, hw_lm->cfg.out_width,
+			cfg->hw_cfg->displayh, cfg->hw_cfg->displayv);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ad4_mode_setup(struct sde_hw_dspp *dspp, enum ad4_modes mode)
+{
+	u32 blk_offset;
+
+	blk_offset = 0x04;
+	if (mode == AD4_OFF) {
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+				0x101);
+		info[dspp->idx].state = ad4_state_idle;
+		info[dspp->idx].completed_ops_mask = 0;
+	} else {
+		info[dspp->idx].state = ad4_state_run;
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+				0);
+	}
+
+	return 0;
+}
+
+static int ad4_init_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg)
+{
+	u32 frame_start, frame_end, proc_start, proc_end;
+	struct sde_hw_mixer *hw_lm;
+	u32 blk_offset, tile_ctl, val, i;
+	u32 off1, off2, off3, off4, off5, off6;
+	struct drm_msm_ad4_init *init;
+
+	if (!cfg->hw_cfg->payload) {
+		info[dspp->idx].completed_ops_mask &= ~ad4_init;
+		return 0;
+	}
+
+	if (cfg->hw_cfg->len != sizeof(struct drm_msm_ad4_init)) {
+		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+			sizeof(struct drm_msm_ad4_init), cfg->hw_cfg->len,
+			cfg->hw_cfg->payload);
+		return -EINVAL;
+	}
+
+	hw_lm = cfg->hw_cfg->mixer_info;
+	if (cfg->hw_cfg->num_of_mixers == 1) {
+		frame_start = 0;
+		frame_end = 0xffff;
+		proc_start = 0;
+		proc_end = 0xffff;
+		tile_ctl = 0;
+	} else {
+		tile_ctl = 0x5;
+		if (hw_lm->cfg.right_mixer) {
+			frame_start = (cfg->hw_cfg->displayh >> 1) -
+				MERGE_WIDTH_RIGHT;
+			frame_end = cfg->hw_cfg->displayh - 1;
+			proc_start = (cfg->hw_cfg->displayh >> 1);
+			proc_end = frame_end;
+			tile_ctl |= 0x10;
+		} else {
+			frame_start = 0;
+			frame_end = (cfg->hw_cfg->displayh >> 1) +
+				MERGE_WIDTH_LEFT;
+			proc_start = 0;
+			proc_end = (cfg->hw_cfg->displayh >> 1) - 1;
+		}
+	}
+
+	init = cfg->hw_cfg->payload;
+	blk_offset = 8;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+	    init->init_param_009);
+
+	blk_offset = 0xc;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+	    init->init_param_010);
+
+	init->init_param_012 = cfg->hw_cfg->displayv & (BIT(17) - 1);
+	init->init_param_011 = cfg->hw_cfg->displayh & (BIT(17) - 1);
+	blk_offset = 0x10;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+	    ((init->init_param_011 << 16) | init->init_param_012));
+
+	blk_offset = 0x14;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			tile_ctl);
+
+	blk_offset = 0x44;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		((((init->init_param_013) & (BIT(17) - 1)) << 16) |
+		 (init->init_param_014 & (BIT(17) - 1))));
+
+	blk_offset = 0x5c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_015 & (BIT(16) - 1)));
+	blk_offset = 0x60;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_016 & (BIT(8) - 1)));
+	blk_offset = 0x64;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_017 & (BIT(12) - 1)));
+	blk_offset = 0x68;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_018 & (BIT(12) - 1)));
+	blk_offset = 0x6c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_019 & (BIT(12) - 1)));
+	blk_offset = 0x70;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_020 & (BIT(16) - 1)));
+	blk_offset = 0x74;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_021 & (BIT(8) - 1)));
+	blk_offset = 0x78;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_022 & (BIT(8) - 1)));
+	blk_offset = 0x7c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_023 & (BIT(16) - 1)));
+	blk_offset = 0x80;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		(((init->init_param_024 & (BIT(16) - 1)) << 16) |
+		((init->init_param_025 & (BIT(16) - 1)))));
+	blk_offset = 0x84;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		(((init->init_param_026 & (BIT(16) - 1)) << 16) |
+		((init->init_param_027 & (BIT(16) - 1)))));
+
+	blk_offset = 0x90;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_028 & (BIT(16) - 1)));
+	blk_offset = 0x94;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_029 & (BIT(16) - 1)));
+
+	blk_offset = 0x98;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		(((init->init_param_035 & (BIT(16) - 1)) << 16) |
+		((init->init_param_030 & (BIT(16) - 1)))));
+
+	blk_offset = 0x9c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		(((init->init_param_032 & (BIT(16) - 1)) << 16) |
+		((init->init_param_031 & (BIT(16) - 1)))));
+	blk_offset = 0xa0;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		(((init->init_param_034 & (BIT(16) - 1)) << 16) |
+		((init->init_param_033 & (BIT(16) - 1)))));
+
+	blk_offset = 0xb4;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_036 & (BIT(8) - 1)));
+	blk_offset = 0xcc;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_037 & (BIT(8) - 1)));
+	blk_offset = 0xc0;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_038 & (BIT(8) - 1)));
+	blk_offset = 0xd8;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_039 & (BIT(8) - 1)));
+
+	blk_offset = 0xe8;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_040 & (BIT(16) - 1)));
+
+	blk_offset = 0xf4;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_041 & (BIT(8) - 1)));
+
+	blk_offset = 0x100;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_042 & (BIT(16) - 1)));
+
+	blk_offset = 0x10c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_043 & (BIT(8) - 1)));
+
+	blk_offset = 0x120;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_044 & (BIT(16) - 1)));
+	blk_offset = 0x124;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_045 & (BIT(16) - 1)));
+
+	blk_offset = 0x128;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_046 & (BIT(1) - 1)));
+	blk_offset = 0x12c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_047 & (BIT(8) - 1)));
+
+	blk_offset = 0x13c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_048 & (BIT(5) - 1)));
+	blk_offset = 0x140;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_049 & (BIT(8) - 1)));
+
+	blk_offset = 0x144;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_050 & (BIT(8) - 1)));
+	blk_offset = 0x148;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		(((init->init_param_051 & (BIT(8) - 1)) << 8) |
+		((init->init_param_052 & (BIT(8) - 1)))));
+
+	blk_offset = 0x14c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_053 & (BIT(10) - 1)));
+	blk_offset = 0x150;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_054 & (BIT(10) - 1)));
+	blk_offset = 0x154;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_055 & (BIT(8) - 1)));
+
+	blk_offset = 0x158;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_056 & (BIT(8) - 1)));
+	blk_offset = 0x164;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_057 & (BIT(8) - 1)));
+	blk_offset = 0x168;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_058 & (BIT(4) - 1)));
+
+	blk_offset = 0x17c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(frame_start & (BIT(16) - 1)));
+	blk_offset = 0x180;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(frame_end & (BIT(16) - 1)));
+	blk_offset = 0x184;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(proc_start & (BIT(16) - 1)));
+	blk_offset = 0x188;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(proc_end & (BIT(16) - 1)));
+
+	blk_offset = 0x18c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_059 & (BIT(4) - 1)));
+
+	blk_offset = 0x190;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		(((init->init_param_061 & (BIT(8) - 1)) << 8) |
+		((init->init_param_060 & (BIT(8) - 1)))));
+
+	blk_offset = 0x194;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_062 & (BIT(10) - 1)));
+
+	blk_offset = 0x1a0;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_063 & (BIT(10) - 1)));
+	blk_offset = 0x1a4;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_064 & (BIT(10) - 1)));
+	blk_offset = 0x1a8;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_065 & (BIT(10) - 1)));
+	blk_offset = 0x1ac;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_066 & (BIT(8) - 1)));
+	blk_offset = 0x1b0;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_067 & (BIT(8) - 1)));
+	blk_offset = 0x1b4;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_068 & (BIT(6) - 1)));
+
+	blk_offset = 0x460;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_069 & (BIT(16) - 1)));
+	blk_offset = 0x464;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_070 & (BIT(10) - 1)));
+	blk_offset = 0x468;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_071 & (BIT(10) - 1)));
+	blk_offset = 0x46c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_072 & (BIT(10) - 1)));
+	blk_offset = 0x470;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_073 & (BIT(8) - 1)));
+	blk_offset = 0x474;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_074 & (BIT(10) - 1)));
+	blk_offset = 0x478;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_075 & (BIT(10) - 1)));
+
+	off1 = 0x1c0;
+	off2 = 0x210;
+	off3 = 0x260;
+	off4 = 0x2b0;
+	off5 = 0x380;
+	off6 = 0x3d0;
+	for (i = 0; i < AD4_LUT_GRP0_SIZE - 1; i = i + 2) {
+		val = (init->init_param_001[i] & (BIT(16) - 1));
+		val |= ((init->init_param_001[i + 1] & (BIT(16) - 1))
+				<< 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off1, val);
+		off1 += 4;
+
+		val = (init->init_param_002[i] & (BIT(16) - 1));
+		val |= ((init->init_param_002[i + 1] & (BIT(16) - 1))
+				<< 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off2, val);
+		off2 += 4;
+
+		val = (init->init_param_003[i] & (BIT(16) - 1));
+		val |= ((init->init_param_003[i + 1] & (BIT(16) - 1))
+				<< 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off3, val);
+		off3 += 4;
+
+		val = (init->init_param_004[i] & (BIT(16) - 1));
+		val |= ((init->init_param_004[i + 1] & (BIT(16) - 1))
+				<< 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off4, val);
+		off4 += 4;
+
+		val = (init->init_param_007[i] & (BIT(16) - 1));
+		val |= ((init->init_param_007[i + 1] &
+				(BIT(16) - 1)) << 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off5, val);
+		off5 += 4;
+
+		val = (init->init_param_008[i] & (BIT(12) - 1));
+		val |= ((init->init_param_008[i + 1] &
+				(BIT(12) - 1)) << 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off6, val);
+		off6 += 4;
+	}
+	/* write last index data */
+	i = AD4_LUT_GRP0_SIZE - 1;
+	val = ((init->init_param_001[i] & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off1, val);
+	val = ((init->init_param_002[i] & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off2, val);
+	val = ((init->init_param_003[i] & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off3, val);
+	val = ((init->init_param_004[i] & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off4, val);
+	val = ((init->init_param_007[i] & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off5, val);
+	val = ((init->init_param_008[i] & (BIT(12) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off6, val);
+
+	off1 = 0x300;
+	off2 = 0x340;
+	for (i = 0; i < AD4_LUT_GRP1_SIZE; i = i + 2) {
+		val = (init->init_param_005[i] & (BIT(16) - 1));
+		val |= ((init->init_param_005[i + 1] &
+				(BIT(16) - 1)) << 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off1, val);
+		off1 += 4;
+
+		val = (init->init_param_006[i] & (BIT(16) - 1));
+		val |= ((init->init_param_006[i + 1] & (BIT(16) - 1))
+				<< 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off2, val);
+		off2 += 4;
+	}
+
+	return 0;
+}
+
+static int ad4_cfg_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg)
+{
+	u32 blk_offset, val;
+	struct drm_msm_ad4_cfg *ad_cfg;
+
+	if (!cfg->hw_cfg->payload) {
+		info[dspp->idx].completed_ops_mask &= ~ad4_cfg;
+		return 0;
+	}
+
+	if (cfg->hw_cfg->len != sizeof(struct drm_msm_ad4_cfg)) {
+		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+			sizeof(struct drm_msm_ad4_cfg), cfg->hw_cfg->len,
+			cfg->hw_cfg->payload);
+		return -EINVAL;
+	}
+	ad_cfg = cfg->hw_cfg->payload;
+
+	blk_offset = 0x18;
+	val = (ad_cfg->cfg_param_002 & (BIT(16) - 1));
+	val |= ((ad_cfg->cfg_param_001 & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_004 & (BIT(16) - 1));
+	val |= ((ad_cfg->cfg_param_003 & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_005 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_006 & (BIT(7) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0x30;
+	val = (ad_cfg->cfg_param_007 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_008 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_009 & (BIT(10) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_010 & (BIT(12) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = ((ad_cfg->cfg_param_011 & (BIT(16) - 1)) << 16);
+	val |= (ad_cfg->cfg_param_012 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+
+	blk_offset = 0x88;
+	val = (ad_cfg->cfg_param_013 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_014 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0xa4;
+	val = (ad_cfg->cfg_param_015 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_016 & (BIT(10) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_017 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_018 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0xc4;
+	val = (ad_cfg->cfg_param_019 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_020 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0xb8;
+	val = (ad_cfg->cfg_param_021 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_022 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0xd0;
+	val = (ad_cfg->cfg_param_023 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_024 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0xdc;
+	val = (ad_cfg->cfg_param_025 & (BIT(16) - 1));
+	val |= ((ad_cfg->cfg_param_026 & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_027 & (BIT(16) - 1));
+	val |= ((ad_cfg->cfg_param_028 & (BIT(16) - 1)) << 16);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_029 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0xec;
+	val = (ad_cfg->cfg_param_030 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_031 & (BIT(12) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0xf8;
+	val = (ad_cfg->cfg_param_032 & (BIT(10) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_033 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0x104;
+	val = (ad_cfg->cfg_param_034 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_035 & (BIT(12) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0x110;
+	val = (ad_cfg->cfg_param_036 & (BIT(12) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_037 & (BIT(12) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_038 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_039 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0x134;
+	val = (ad_cfg->cfg_param_040 & (BIT(12) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_041 & (BIT(7) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0x15c;
+	val = (ad_cfg->cfg_param_042 & (BIT(10) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_043 & (BIT(10) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0x16c;
+	val = (ad_cfg->cfg_param_044 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_045 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_046 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	return 0;
+}
+
+static int ad4_input_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	u64 *val, als;
+	u32 blk_offset;
+
+	if (cfg->hw_cfg->len != sizeof(u64) && cfg->hw_cfg->payload) {
+		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+			sizeof(u64), cfg->hw_cfg->len, cfg->hw_cfg->payload);
+		return -EINVAL;
+	}
+
+	blk_offset = 0x28;
+	if (cfg->hw_cfg->payload) {
+		val = cfg->hw_cfg->payload;
+	} else {
+		als = 0;
+		val = &als;
+	}
+	info[dspp->idx].cached_als = *val;
+	info[dspp->idx].completed_ops_mask |= ad4_input;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(*val & (BIT(16) - 1)));
+
+	return 0;
+}
+
+static int ad4_suspend_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	info[dspp->idx].state = ad4_state_idle;
+	info[dspp->idx].completed_ops_mask = 0;
+	return 0;
+}
+
+static int ad4_mode_setup_common(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+
+	if (cfg->hw_cfg->len != sizeof(u64) || !cfg->hw_cfg->payload) {
+		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+			sizeof(u64), cfg->hw_cfg->len, cfg->hw_cfg->payload);
+		return -EINVAL;
+	}
+
+	info[dspp->idx].cached_mode = *((enum ad4_modes *)
+					(cfg->hw_cfg->payload));
+	info[dspp->idx].completed_ops_mask |= ad4_mode;
+
+	if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
+		ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+
+	return 0;
+}
+
+static int ad4_init_setup_idle(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	int ret;
+
+	if (!cfg->hw_cfg->payload) {
+		info[dspp->idx].completed_ops_mask &= ~ad4_init;
+		return 0;
+	}
+
+	ret = ad4_init_setup(dspp, cfg);
+	if (ret)
+		return ret;
+
+	info[dspp->idx].completed_ops_mask |= ad4_init;
+
+	if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
+		ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+
+	return 0;
+}
+
+static int ad4_cfg_setup_idle(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	int ret;
+
+	if (!cfg->hw_cfg->payload) {
+		info[dspp->idx].completed_ops_mask &= ~ad4_cfg;
+		return 0;
+	}
+
+	ret = ad4_cfg_setup(dspp, cfg);
+	if (ret)
+		return ret;
+
+	info[dspp->idx].completed_ops_mask |= ad4_cfg;
+	if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
+		ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+	return 0;
+}
+
+static int ad4_input_setup_idle(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	int ret;
+
+	ret = ad4_input_setup(dspp, cfg);
+	if (ret)
+		return ret;
+
+	info[dspp->idx].completed_ops_mask |= ad4_input;
+	if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
+		ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+
+	return 0;
+}
+
+static int ad4_assertive_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	u64 *val, assertive;
+	u32 blk_offset;
+
+	if (cfg->hw_cfg->len != sizeof(u64) && cfg->hw_cfg->payload) {
+		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+			sizeof(u64), cfg->hw_cfg->len, cfg->hw_cfg->payload);
+		return -EINVAL;
+	}
+
+	blk_offset = 0x30;
+	if (cfg->hw_cfg->payload) {
+		val = cfg->hw_cfg->payload;
+	} else {
+		assertive = 0;
+		val = &assertive;
+	}
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(*val & (BIT(8) - 1)));
+	return 0;
+}
+
+static int ad4_backlight_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	u64 *val, bl;
+	u32 blk_offset;
+
+	if (cfg->hw_cfg->len != sizeof(u64) && cfg->hw_cfg->payload) {
+		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+			sizeof(u64), cfg->hw_cfg->len, cfg->hw_cfg->payload);
+		return -EINVAL;
+	}
+
+	blk_offset = 0x2c;
+	if (cfg->hw_cfg->payload) {
+		val = cfg->hw_cfg->payload;
+	} else {
+		bl = 0;
+		val = &bl;
+	}
+
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(*val & (BIT(16) - 1)));
+	return 0;
+}
+
+void sde_read_intr_resp_ad4(struct sde_hw_dspp *dspp, u32 event, u32 *resp)
+{
+	if (!dspp || !resp) {
+		DRM_ERROR("invalid params dspp %pK resp %pK\n", dspp, resp);
+		return;
+	}
+
+	switch (event) {
+	case AD4_BACKLIGHT:
+		*resp = SDE_REG_READ(&dspp->hw,
+				dspp->cap->sblk->ad.base + 0x48);
+		break;
+	default:
+		break;
+	}
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_blk.c b/drivers/gpu/drm/msm/sde/sde_hw_blk.c
new file mode 100644
index 0000000..f59864d
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_blk.c
@@ -0,0 +1,155 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include "sde_hw_mdss.h"
+#include "sde_hw_blk.h"
+
+/* Serialization lock for sde_hw_blk_list */
+static DEFINE_MUTEX(sde_hw_blk_lock);
+
+/* List of all hw block objects */
+static LIST_HEAD(sde_hw_blk_list);
+
+/**
+ * sde_hw_blk_init - initialize hw block object
+ * @type: hw block type - enum sde_hw_blk_type
+ * @id: instance id of the hw block
+ * @ops: Pointer to block operations
+ * return: 0 if success; error code otherwise
+ */
+int sde_hw_blk_init(struct sde_hw_blk *hw_blk, u32 type, int id,
+		struct sde_hw_blk_ops *ops)
+{
+	if (!hw_blk) {
+		pr_err("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	INIT_LIST_HEAD(&hw_blk->list);
+	hw_blk->type = type;
+	hw_blk->id = id;
+	atomic_set(&hw_blk->refcount, 0);
+
+	if (ops)
+		hw_blk->ops = *ops;
+
+	mutex_lock(&sde_hw_blk_lock);
+	list_add(&hw_blk->list, &sde_hw_blk_list);
+	mutex_unlock(&sde_hw_blk_lock);
+
+	return 0;
+}
+
+/**
+ * sde_hw_blk_destroy - destroy hw block object.
+ * @hw_blk:  pointer to hw block object
+ * return: none
+ */
+void sde_hw_blk_destroy(struct sde_hw_blk *hw_blk)
+{
+	if (!hw_blk) {
+		pr_err("invalid parameters\n");
+		return;
+	}
+
+	if (atomic_read(&hw_blk->refcount))
+		pr_err("hw_blk:%d.%d invalid refcount\n", hw_blk->type,
+				hw_blk->id);
+
+	mutex_lock(&sde_hw_blk_lock);
+	list_del(&hw_blk->list);
+	mutex_unlock(&sde_hw_blk_lock);
+}
+
+/**
+ * sde_hw_blk_get - get hw_blk from free pool
+ * @hw_blk: if specified, increment reference count only
+ * @type: if hw_blk is not specified, allocate the next available of this type
+ * @id: if specified (>= 0), allocate the given instance of the above type
+ * return: pointer to hw block object
+ */
+struct sde_hw_blk *sde_hw_blk_get(struct sde_hw_blk *hw_blk, u32 type, int id)
+{
+	struct sde_hw_blk *curr;
+	int rc, refcount;
+
+	if (!hw_blk) {
+		mutex_lock(&sde_hw_blk_lock);
+		list_for_each_entry(curr, &sde_hw_blk_list, list) {
+			if ((curr->type != type) ||
+					(id >= 0 && curr->id != id) ||
+					(id < 0 &&
+						atomic_read(&curr->refcount)))
+				continue;
+
+			hw_blk = curr;
+			break;
+		}
+		mutex_unlock(&sde_hw_blk_lock);
+	}
+
+	if (!hw_blk) {
+		pr_debug("no hw_blk:%d\n", type);
+		return NULL;
+	}
+
+	refcount = atomic_inc_return(&hw_blk->refcount);
+
+	if (refcount == 1 && hw_blk->ops.start) {
+		rc = hw_blk->ops.start(hw_blk);
+		if (rc) {
+			pr_err("failed to start  hw_blk:%d rc:%d\n", type, rc);
+			goto error_start;
+		}
+	}
+
+	pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type,
+			hw_blk->id, refcount);
+	return hw_blk;
+
+error_start:
+	sde_hw_blk_put(hw_blk);
+	return ERR_PTR(rc);
+}
+
+/**
+ * sde_hw_blk_put - put hw_blk to free pool if decremented refcount is zero
+ * @hw_blk: hw block to be freed
+ * @free_blk: function to be called when reference count goes to zero
+ */
+void sde_hw_blk_put(struct sde_hw_blk *hw_blk)
+{
+	if (!hw_blk) {
+		pr_err("invalid parameters\n");
+		return;
+	}
+
+	pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type, hw_blk->id,
+			atomic_read(&hw_blk->refcount));
+
+	if (!atomic_read(&hw_blk->refcount)) {
+		pr_err("hw_blk:%d.%d invalid put\n", hw_blk->type, hw_blk->id);
+		return;
+	}
+
+	if (atomic_dec_return(&hw_blk->refcount))
+		return;
+
+	if (hw_blk->ops.stop)
+		hw_blk->ops.stop(hw_blk);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_blk.h b/drivers/gpu/drm/msm/sde/sde_hw_blk.h
new file mode 100644
index 0000000..d979091
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_blk.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_BLK_H
+#define _SDE_HW_BLK_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+
+struct sde_hw_blk;
+
+/**
+ * struct sde_hw_blk_ops - common hardware block operations
+ * @start: start operation on first get
+ * @stop: stop operation on last put
+ */
+struct sde_hw_blk_ops {
+	int (*start)(struct sde_hw_blk *);
+	void (*stop)(struct sde_hw_blk *);
+};
+
+/**
+ * struct sde_hw_blk - definition of hardware block object
+ * @list: list of hardware blocks
+ * @type: hardware block type
+ * @id: instance id
+ * @refcount: reference/usage count
+ */
+struct sde_hw_blk {
+	struct list_head list;
+	u32 type;
+	int id;
+	atomic_t refcount;
+	struct sde_hw_blk_ops ops;
+};
+
+int sde_hw_blk_init(struct sde_hw_blk *hw_blk, u32 type, int id,
+		struct sde_hw_blk_ops *ops);
+void sde_hw_blk_destroy(struct sde_hw_blk *hw_blk);
+
+struct sde_hw_blk *sde_hw_blk_get(struct sde_hw_blk *hw_blk, u32 type, int id);
+void sde_hw_blk_put(struct sde_hw_blk *hw_blk);
+#endif /*_SDE_HW_BLK_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 369d5d1..9285487 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -13,6 +13,8 @@
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
 #include <linux/slab.h>
 #include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/llcc-qcom.h>
 
 #include "sde_hw_mdss.h"
 #include "sde_hw_catalog.h"
@@ -89,6 +91,8 @@
  */
 #define PROP_BITVALUE_ACCESS(p, i, j, k)	((p + i)->bit_value[j][k])
 
+#define DEFAULT_SBUF_HEADROOM		(20)
+
 /*************************************************************
  *  DTSI PROPERTY INDEX
  *************************************************************/
@@ -561,6 +565,7 @@
 				rc = -EINVAL;
 			}
 			*off_count = 0;
+			memset(prop_count, 0, sizeof(int *) * prop_size);
 			return rc;
 		}
 	}
@@ -629,7 +634,7 @@
 			rc = 0;
 			prop_count[i] = 0;
 		}
-		if (!off_count && prop_count[i] < 0) {
+		if (prop_count[i] < 0) {
 			prop_count[i] = 0;
 			if (sde_prop[i].is_mandatory) {
 				SDE_ERROR("prop:%s count:%d is negative\n",
@@ -776,6 +781,9 @@
 			"sspp_scaler%u", sspp->id);
 	}
 
+	if (sde_cfg->has_sbuf)
+		set_bit(SDE_SSPP_SBUF, &sspp->features);
+
 	sblk->csc_blk.id = SDE_SSPP_CSC;
 	snprintf(sblk->csc_blk.name, SDE_HW_BLK_NAME_LEN,
 			"sspp_csc%u", sspp->id);
@@ -1140,6 +1148,8 @@
 			set_bit(SDE_CTL_SPLIT_DISPLAY, &ctl->features);
 		if (i < MAX_PP_SPLIT_DISPLAY_CTL)
 			set_bit(SDE_CTL_PINGPONG_SPLIT, &ctl->features);
+		if (sde_cfg->has_sbuf)
+			set_bit(SDE_CTL_SBUF, &ctl->features);
 	}
 
 end:
@@ -1368,6 +1378,9 @@
 			intf->controller_id = none_count;
 			none_count++;
 		}
+
+		if (sde_cfg->has_sbuf)
+			set_bit(SDE_INTF_ROT_START, &intf->features);
 	}
 
 end:
@@ -1571,6 +1584,73 @@
 	}
 }
 
+static int sde_rot_parse_dt(struct device_node *np,
+		struct sde_mdss_cfg *sde_cfg)
+{
+	struct sde_rot_cfg *rot;
+	struct platform_device *pdev;
+	struct of_phandle_args phargs;
+	struct llcc_slice_desc *slice;
+	int rc = 0, i;
+
+	if (!sde_cfg) {
+		SDE_ERROR("invalid argument\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	for (i = 0; i < ROT_MAX; i++) {
+		rot = sde_cfg->rot + sde_cfg->rot_count;
+		rot->base = 0;
+		rot->len = 0;
+
+		rc = of_parse_phandle_with_args(np,
+				"qcom,sde-inline-rotator", "#list-cells",
+				i, &phargs);
+		if (rc) {
+			rc = 0;
+			break;
+		} else if (!phargs.np || !phargs.args_count) {
+			rc = -EINVAL;
+			break;
+		}
+
+		rot->id = ROT_0 + phargs.args[0];
+
+		pdev = of_find_device_by_node(phargs.np);
+		if (pdev) {
+			slice = llcc_slice_getd(&pdev->dev, "rotator");
+			if (IS_ERR_OR_NULL(slice)) {
+				rot->pdev = NULL;
+				SDE_ERROR("failed to get system cache %ld\n",
+						PTR_ERR(slice));
+			} else {
+				rot->scid = llcc_get_slice_id(slice);
+				rot->slice_size = llcc_get_slice_size(slice);
+				rot->pdev = pdev;
+				llcc_slice_putd(slice);
+				sde_cfg->rot_count++;
+				SDE_DEBUG("rot:%d scid:%d slice_size:%zukb\n",
+						rot->id, rot->scid,
+						rot->slice_size);
+			}
+		} else {
+			rot->pdev = NULL;
+			SDE_ERROR("invalid sde rotator node\n");
+		}
+
+		of_node_put(phargs.np);
+	}
+
+	if (sde_cfg->rot_count) {
+		sde_cfg->has_sbuf = true;
+		sde_cfg->sbuf_headroom = DEFAULT_SBUF_HEADROOM;
+	}
+
+end:
+	return rc;
+}
+
 static int sde_dspp_parse_dt(struct device_node *np,
 						struct sde_mdss_cfg *sde_cfg)
 {
@@ -1668,6 +1748,7 @@
 					blocks_prop_exists, blocks_prop_value);
 
 		sblk->ad.id = SDE_DSPP_AD;
+		sde_cfg->ad_count = ad_off_count;
 		if (ad_prop_value && (i < ad_off_count) &&
 		    ad_prop_exists[AD_OFF]) {
 			sblk->ad.base = PROP_VALUE_ACCESS(ad_prop_value,
@@ -2291,6 +2372,8 @@
 	if (!sde_cfg)
 		return -EINVAL;
 
+	rc = sde_hardware_format_caps(sde_cfg, hw_rev);
+
 	switch (hw_rev) {
 	case SDE_HW_VER_170:
 	case SDE_HW_VER_171:
@@ -2301,9 +2384,10 @@
 	case SDE_HW_VER_301:
 	case SDE_HW_VER_400:
 		/* update msm8998 and sdm845 target here */
-		rc = sde_hardware_format_caps(sde_cfg, hw_rev);
 		sde_cfg->has_wb_ubwc = true;
 		break;
+	default:
+		break;
 	}
 
 	return rc;
@@ -2367,6 +2451,10 @@
 	if (rc)
 		goto end;
 
+	rc = sde_rot_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
 	rc = sde_ctl_parse_dt(np, sde_cfg);
 	if (rc)
 		goto end;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 2b34016..97da08f 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -110,6 +110,7 @@
  * @SDE_SSPP_EXCL_RECT,      SSPP supports exclusion rect
  * @SDE_SSPP_SMART_DMA_V1,   SmartDMA 1.0 support
  * @SDE_SSPP_SMART_DMA_V2,   SmartDMA 2.0 support
+ * @SDE_SSPP_SBUF,           SSPP support inline stream buffer
  * @SDE_SSPP_MAX             maximum value
  */
 enum {
@@ -128,6 +129,7 @@
 	SDE_SSPP_EXCL_RECT,
 	SDE_SSPP_SMART_DMA_V1,
 	SDE_SSPP_SMART_DMA_V2,
+	SDE_SSPP_SBUF,
 	SDE_SSPP_MAX
 };
 
@@ -199,15 +201,27 @@
  * CTL sub-blocks
  * @SDE_CTL_SPLIT_DISPLAY       CTL supports video mode split display
  * @SDE_CTL_PINGPONG_SPLIT      CTL supports pingpong split
+ * @SDE_CTL_SBUF                CTL supports inline stream buffer
  * @SDE_CTL_MAX
  */
 enum {
 	SDE_CTL_SPLIT_DISPLAY = 0x1,
 	SDE_CTL_PINGPONG_SPLIT,
+	SDE_CTL_SBUF,
 	SDE_CTL_MAX
 };
 
 /**
+ * INTF sub-blocks
+ * @SDE_INTF_ROT_START          INTF supports rotator start trigger
+ * @SDE_INTF_MAX
+ */
+enum {
+	SDE_INTF_ROT_START = 0x1,
+	SDE_INTF_MAX
+};
+
+/**
  * WB sub-blocks and features
  * @SDE_WB_LINE_MODE        Writeback module supports line/linear mode
  * @SDE_WB_BLOCK_MODE       Writeback module supports block mode read
@@ -599,6 +613,23 @@
 };
 
 /**
+ * struct sde_rot_cfg - information of rotator blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @len                length of hardware block
+ * @features           bit mask identifying sub-blocks/features
+ * @pdev               private device handle
+ * @scid               subcache identifier
+ * @slice_size         subcache slice size
+ */
+struct sde_rot_cfg {
+	SDE_HW_BLK_INFO;
+	void *pdev;
+	int scid;
+	size_t slice_size;
+};
+
+/**
  * struct sde_vbif_dynamic_ot_cfg - dynamic OT setting
  * @pps                pixel per seconds
  * @ot_limit           OT limit to use up to specified pixel per second
@@ -680,6 +711,8 @@
  * @has_cdp            Client driver prefetch feature status
  * @has_wb_ubwc        UBWC feature supported on WB
  * @ubwc_version       UBWC feature version (0x0 for not supported)
+ * @has_sbuf           indicate if stream buffer is available
+ * @sbuf_headroom      stream buffer headroom in lines
  * @dma_formats        Supported formats for dma pipe
  * @cursor_formats     Supported formats for cursor pipe
  * @vig_formats        Supported formats for vig pipe
@@ -700,6 +733,8 @@
 	bool has_dim_layer;
 	bool has_wb_ubwc;
 	u32 ubwc_version;
+	bool has_sbuf;
+	u32 sbuf_headroom;
 
 	u32 mdss_count;
 	struct sde_mdss_base_cfg mdss[MAX_BLOCKS];
@@ -734,11 +769,17 @@
 	u32 wb_count;
 	struct sde_wb_cfg wb[MAX_BLOCKS];
 
+	u32 rot_count;
+	struct sde_rot_cfg rot[MAX_BLOCKS];
+
 	u32 vbif_count;
 	struct sde_vbif_cfg vbif[MAX_BLOCKS];
 
 	u32 reg_dma_count;
 	struct sde_reg_dma_cfg dma_cfg;
+
+	u32 ad_count;
+
 	/* Add additional block data structures here */
 
 	struct sde_perf_cfg perf;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h
index 354b892..cdb3450 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h
@@ -16,17 +16,17 @@
 	{DRM_FORMAT_ARGB8888, 0},
 	{DRM_FORMAT_ABGR8888, 0},
 	{DRM_FORMAT_RGBA8888, 0},
-	{DRM_FORMAT_RGBA8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
 	{DRM_FORMAT_BGRA8888, 0},
 	{DRM_FORMAT_XRGB8888, 0},
 	{DRM_FORMAT_RGBX8888, 0},
 	{DRM_FORMAT_BGRX8888, 0},
 	{DRM_FORMAT_XBGR8888, 0},
-	{DRM_FORMAT_RGBX8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
 	{DRM_FORMAT_RGB888, 0},
 	{DRM_FORMAT_BGR888, 0},
 	{DRM_FORMAT_RGB565, 0},
-	{DRM_FORMAT_RGB565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
 	{DRM_FORMAT_BGR565, 0},
 	{DRM_FORMAT_ARGB1555, 0},
 	{DRM_FORMAT_ABGR1555, 0},
@@ -52,16 +52,16 @@
 	{DRM_FORMAT_ABGR8888, 0},
 	{DRM_FORMAT_RGBA8888, 0},
 	{DRM_FORMAT_BGRX8888, 0},
-	{DRM_FORMAT_RGBA8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
 	{DRM_FORMAT_BGRA8888, 0},
 	{DRM_FORMAT_XRGB8888, 0},
 	{DRM_FORMAT_XBGR8888, 0},
 	{DRM_FORMAT_RGBX8888, 0},
-	{DRM_FORMAT_RGBX8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
 	{DRM_FORMAT_RGB888, 0},
 	{DRM_FORMAT_BGR888, 0},
 	{DRM_FORMAT_RGB565, 0},
-	{DRM_FORMAT_RGB565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
 	{DRM_FORMAT_BGR565, 0},
 	{DRM_FORMAT_ARGB1555, 0},
 	{DRM_FORMAT_ABGR1555, 0},
@@ -113,14 +113,14 @@
 
 static const struct sde_format_extended wb2_formats[] = {
 	{DRM_FORMAT_RGB565, 0},
-	{DRM_FORMAT_RGB565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
 	{DRM_FORMAT_RGB888, 0},
 	{DRM_FORMAT_ARGB8888, 0},
 	{DRM_FORMAT_RGBA8888, 0},
-	{DRM_FORMAT_RGBA8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
 	{DRM_FORMAT_XRGB8888, 0},
 	{DRM_FORMAT_RGBX8888, 0},
-	{DRM_FORMAT_RGBX8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
 	{DRM_FORMAT_ARGB1555, 0},
 	{DRM_FORMAT_RGBA5551, 0},
 	{DRM_FORMAT_XRGB1555, 0},
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index e6b2fd5..82f1c09 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -28,6 +28,9 @@
 #define   CTL_START                     0x01C
 #define   CTL_SW_RESET                  0x030
 #define   CTL_LAYER_EXTN_OFFSET         0x40
+#define   CTL_ROT_TOP                   0x0C0
+#define   CTL_ROT_FLUSH                 0x0C4
+#define   CTL_ROT_START                 0x0CC
 
 #define CTL_MIXER_BORDER_OUT            BIT(24)
 #define CTL_FLUSH_MASK_CTL              BIT(17)
@@ -75,6 +78,11 @@
 	SDE_REG_WRITE(&ctx->hw, CTL_START, 0x1);
 }
 
+static inline void sde_hw_ctl_trigger_rot_start(struct sde_hw_ctl *ctx)
+{
+	SDE_REG_WRITE(&ctx->hw, CTL_ROT_START, BIT(0));
+}
+
 static inline void sde_hw_ctl_clear_pending_flush(struct sde_hw_ctl *ctx)
 {
 	ctx->pending_flush_mask = 0x0;
@@ -99,6 +107,12 @@
 	SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
 }
 
+static inline u32 sde_hw_ctl_get_flush_register(struct sde_hw_ctl *ctx)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+	return SDE_REG_READ(c, CTL_FLUSH);
+}
 
 static inline uint32_t sde_hw_ctl_get_bitmask_sspp(struct sde_hw_ctl *ctx,
 	enum sde_sspp sspp)
@@ -241,6 +255,19 @@
 	return 0;
 }
 
+static inline int sde_hw_ctl_get_bitmask_rot(struct sde_hw_ctl *ctx,
+		u32 *flushbits, enum sde_rot rot)
+{
+	switch (rot) {
+	case ROT_0:
+		*flushbits |= BIT(27);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
 static inline int sde_hw_ctl_get_bitmask_cdm(struct sde_hw_ctl *ctx,
 		u32 *flushbits, enum sde_cdm cdm)
 {
@@ -490,6 +517,17 @@
 	SDE_REG_WRITE(c, CTL_TOP, intf_cfg);
 }
 
+static void sde_hw_ctl_setup_sbuf_cfg(struct sde_hw_ctl *ctx,
+	struct sde_ctl_sbuf_cfg *cfg)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+	u32 val;
+
+	val = cfg->rot_op_mode & 0x3;
+
+	SDE_REG_WRITE(c, CTL_ROT_TOP, val);
+}
+
 static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
 		unsigned long cap)
 {
@@ -497,6 +535,7 @@
 	ops->update_pending_flush = sde_hw_ctl_update_pending_flush;
 	ops->get_pending_flush = sde_hw_ctl_get_pending_flush;
 	ops->trigger_flush = sde_hw_ctl_trigger_flush;
+	ops->get_flush_register = sde_hw_ctl_get_flush_register;
 	ops->trigger_start = sde_hw_ctl_trigger_start;
 	ops->setup_intf_cfg = sde_hw_ctl_intf_cfg;
 	ops->reset = sde_hw_ctl_reset_control;
@@ -509,6 +548,11 @@
 	ops->get_bitmask_intf = sde_hw_ctl_get_bitmask_intf;
 	ops->get_bitmask_cdm = sde_hw_ctl_get_bitmask_cdm;
 	ops->get_bitmask_wb = sde_hw_ctl_get_bitmask_wb;
+	if (cap & BIT(SDE_CTL_SBUF)) {
+		ops->get_bitmask_rot = sde_hw_ctl_get_bitmask_rot;
+		ops->setup_sbuf_cfg = sde_hw_ctl_setup_sbuf_cfg;
+		ops->trigger_rot_start = sde_hw_ctl_trigger_rot_start;
+	}
 };
 
 struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
index 4d1170e..7ae43b7 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -28,6 +28,20 @@
 	SDE_CTL_MODE_SEL_CMD
 };
 
+/**
+ * sde_ctl_rot_op_mode - inline rotation mode
+ * SDE_CTL_ROT_OP_MODE_OFFLINE: offline rotation
+ * SDE_CTL_ROT_OP_MODE_RESERVED: reserved
+ * SDE_CTL_ROT_OP_MODE_INLINE_SYNC: inline rotation synchronous mode
+ * SDE_CTL_ROT_OP_MODE_INLINE_ASYNC: inline rotation asynchronous mode
+ */
+enum sde_ctl_rot_op_mode {
+	SDE_CTL_ROT_OP_MODE_OFFLINE,
+	SDE_CTL_ROT_OP_MODE_RESERVED,
+	SDE_CTL_ROT_OP_MODE_INLINE_SYNC,
+	SDE_CTL_ROT_OP_MODE_INLINE_ASYNC,
+};
+
 struct sde_hw_ctl;
 /**
  * struct sde_hw_stage_cfg - blending stage cfg
@@ -57,6 +71,14 @@
 };
 
 /**
+ * struct sde_ctl_sbuf_cfg - control for stream buffer configuration
+ * @rot_op_mode: rotator operation mode
+ */
+struct sde_ctl_sbuf_cfg {
+	enum sde_ctl_rot_op_mode rot_op_mode;
+};
+
+/**
  * struct sde_hw_ctl_ops - Interface to the wb Hw driver functions
  * Assumption is these functions will be called after clocks are enabled
  */
@@ -69,6 +91,13 @@
 	void (*trigger_start)(struct sde_hw_ctl *ctx);
 
 	/**
+	 * kickoff rotator operation for Sw controlled interfaces
+	 * DSI cmd mode and WB interface are SW controlled
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*trigger_rot_start)(struct sde_hw_ctl *ctx);
+
+	/**
 	 * Clear the value of the cached pending_flush_mask
 	 * No effect on hardware
 	 * @ctx       : ctl path ctx pointer
@@ -98,6 +127,13 @@
 	void (*trigger_flush)(struct sde_hw_ctl *ctx);
 
 	/**
+	 * Read the value of the flush register
+	 * @ctx       : ctl path ctx pointer
+	 * @Return: value of the ctl flush register.
+	 */
+	u32 (*get_flush_register)(struct sde_hw_ctl *ctx);
+
+	/**
 	 * Setup ctl_path interface config
 	 * @ctx
 	 * @cfg    : interface config structure pointer
@@ -140,6 +176,10 @@
 		u32 *flushbits,
 		enum sde_wb blk);
 
+	int (*get_bitmask_rot)(struct sde_hw_ctl *ctx,
+		u32 *flushbits,
+		enum sde_rot blk);
+
 	/**
 	 * Set all blend stages to disabled
 	 * @ctx       : ctl path ctx pointer
@@ -154,6 +194,9 @@
 	 */
 	void (*setup_blendstage)(struct sde_hw_ctl *ctx,
 		enum sde_lm lm, struct sde_hw_stage_cfg *cfg, u32 index);
+
+	void (*setup_sbuf_cfg)(struct sde_hw_ctl *ctx,
+		struct sde_ctl_sbuf_cfg *cfg);
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
index 6110a07..f1b9c32 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
@@ -16,6 +16,7 @@
 #include "sde_hw_dspp.h"
 #include "sde_hw_color_processing.h"
 #include "sde_dbg.h"
+#include "sde_ad4.h"
 
 static struct sde_dspp_cfg *_dspp_offset(enum sde_dspp dspp,
 		struct sde_mdss_cfg *m,
@@ -96,6 +97,15 @@
 						sde_setup_dspp_gc_v1_7;
 			}
 			break;
+		case SDE_DSPP_AD:
+			if (c->cap->sblk->ad.version ==
+			    SDE_COLOR_PROCESS_VER(4, 0)) {
+				c->ops.setup_ad = sde_setup_dspp_ad4;
+				c->ops.ad_read_intr_resp =
+					sde_read_intr_resp_ad4;
+				c->ops.validate_ad = sde_validate_dspp_ad4;
+			}
+			break;
 		default:
 			break;
 		}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
index 25e1f3b..6020476 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
@@ -139,6 +139,29 @@
 	 * @cfg: Pointer to configuration
 	 */
 	void (*setup_gamut)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * validate_ad - check if ad property can be set
+	 * @ctx: Pointer to dspp context
+	 * @prop: Pointer to ad property being validated
+	 */
+	int (*validate_ad)(struct sde_hw_dspp *ctx, u32 *prop);
+
+	/**
+	 * setup_ad - update the ad property
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to ad configuration
+	 */
+	void (*setup_ad)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * ad_read_intr_resp - function to get interrupt response for ad
+	 * @event: Event for which response needs to be read
+	 * @resp: Pointer to u32 where response value is dumped.
+	 */
+	void (*ad_read_intr_resp)(struct sde_hw_dspp *ctx, u32 event,
+			u32 *resp);
+
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
index e68e3c9..47fb07f 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
@@ -29,6 +29,11 @@
 #define MDP_INTF_2_OFF			0x6C000
 #define MDP_INTF_3_OFF			0x6C800
 #define MDP_INTF_4_OFF			0x6D000
+#define MDP_AD4_0_OFF			0x7D000
+#define MDP_AD4_1_OFF			0x7E000
+#define MDP_AD4_INTR_EN_OFF 0x41c
+#define MDP_AD4_INTR_CLEAR_OFF 0x424
+#define MDP_AD4_INTR_STATUS_OFF 0x420
 
 /**
  * WB interrupt status bit definitions
@@ -155,6 +160,14 @@
 #define SDE_INTR_PROG_LINE BIT(8)
 
 /**
+ * AD4 interrupt status bit definitions
+ */
+#define SDE_INTR_BRIGHTPR_UPDATED BIT(4)
+#define SDE_INTR_DARKENH_UPDATED BIT(3)
+#define SDE_INTR_STREN_OUTROI_UPDATED BIT(2)
+#define SDE_INTR_STREN_INROI_UPDATED BIT(1)
+#define SDE_INTR_BACKLIGHT_UPDATED BIT(0)
+/**
  * struct sde_intr_reg - array of SDE register sets
  * @clr_off:	offset to CLEAR reg
  * @en_off:	offset to ENABLE reg
@@ -223,6 +236,16 @@
 		MDP_INTF_4_OFF+INTF_INTR_CLEAR,
 		MDP_INTF_4_OFF+INTF_INTR_EN,
 		MDP_INTF_4_OFF+INTF_INTR_STATUS
+	},
+	{
+		MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
+		MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
+		MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
+	},
+	{
+		MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
+		MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
+		MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
 	}
 };
 
@@ -648,6 +671,10 @@
 	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
 	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
 	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+
+	/* irq_idx: 256-257 */
+	{ SDE_IRQ_TYPE_AD4_BL_DONE, DSPP_0, SDE_INTR_BACKLIGHT_UPDATED, 8},
+	{ SDE_IRQ_TYPE_AD4_BL_DONE, DSPP_1, SDE_INTR_BACKLIGHT_UPDATED, 9}
 };
 
 static int sde_hw_intr_irqidx_lookup(enum sde_intr_type intr_type,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
index 261ef64..7805df1 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -54,6 +54,7 @@
  * @SDE_IRQ_TYPE_SFI_CMD_2_IN:		DSI CMD2 static frame INTR into static
  * @SDE_IRQ_TYPE_SFI_CMD_2_OUT:		DSI CMD2 static frame INTR out-of static
  * @SDE_IRQ_TYPE_PROG_LINE:		Programmable Line interrupt
+ * @SDE_IRQ_TYPE_AD4_BL_DONE:		AD4 backlight
  * @SDE_IRQ_TYPE_RESERVED:		Reserved for expansion
  */
 enum sde_intr_type {
@@ -82,6 +83,7 @@
 	SDE_IRQ_TYPE_SFI_CMD_2_IN,
 	SDE_IRQ_TYPE_SFI_CMD_2_OUT,
 	SDE_IRQ_TYPE_PROG_LINE,
+	SDE_IRQ_TYPE_AD4_BL_DONE,
 	SDE_IRQ_TYPE_RESERVED,
 };
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
index c17844d..1f17378 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_intf.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
@@ -58,6 +58,7 @@
 #define   INTF_TPG_BLK_WHITE_PATTERN_FRAMES   0x118
 #define   INTF_TPG_RGB_MAPPING          0x11C
 #define   INTF_PROG_FETCH_START         0x170
+#define   INTF_PROG_ROT_START           0x174
 
 #define   INTF_FRAME_LINE_COUNT_EN      0x0A8
 #define   INTF_FRAME_COUNT              0x0AC
@@ -66,12 +67,6 @@
 #define INTF_MISR_CTRL			0x180
 #define INTF_MISR_SIGNATURE		0x184
 
-#define MISR_FRAME_COUNT_MASK		0xFF
-#define MISR_CTRL_ENABLE		BIT(8)
-#define MISR_CTRL_STATUS		BIT(9)
-#define MISR_CTRL_STATUS_CLEAR		BIT(10)
-#define INTF_MISR_CTRL_FREE_RUN_MASK	BIT(31)
-
 static struct sde_intf_cfg *_intf_offset(enum sde_intf intf,
 		struct sde_mdss_cfg *m,
 		void __iomem *addr,
@@ -234,6 +229,25 @@
 	SDE_REG_WRITE(c, INTF_CONFIG, fetch_enable);
 }
 
+static void sde_hw_intf_setup_rot_start(
+		struct sde_hw_intf *intf,
+		const struct intf_prog_fetch *fetch)
+{
+	struct sde_hw_blk_reg_map *c = &intf->hw;
+	int fetch_enable;
+
+	fetch_enable = SDE_REG_READ(c, INTF_CONFIG);
+	if (fetch->enable) {
+		fetch_enable |= BIT(19);
+		SDE_REG_WRITE(c, INTF_PROG_ROT_START,
+				fetch->fetch_start);
+	} else {
+		fetch_enable &= ~BIT(19);
+	}
+
+	SDE_REG_WRITE(c, INTF_CONFIG, fetch_enable);
+}
+
 static void sde_hw_intf_get_status(
 		struct sde_hw_intf *intf,
 		struct intf_status *s)
@@ -250,48 +264,28 @@
 	}
 }
 
-static void sde_hw_intf_set_misr(struct sde_hw_intf *intf,
-		struct sde_misr_params *misr_map)
+static void sde_hw_intf_setup_misr(struct sde_hw_intf *intf,
+						bool enable, u32 frame_count)
 {
 	struct sde_hw_blk_reg_map *c = &intf->hw;
 	u32 config = 0;
 
-	if (!misr_map)
-		return;
-
 	SDE_REG_WRITE(c, INTF_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
-	/* Clear data */
+	/* clear misr data */
 	wmb();
 
-	if (misr_map->enable) {
-		config = (MISR_FRAME_COUNT_MASK & 1) |
-			(MISR_CTRL_ENABLE);
+	if (enable)
+		config = (frame_count & MISR_FRAME_COUNT_MASK) |
+			MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
 
-		SDE_REG_WRITE(c, INTF_MISR_CTRL, config);
-	} else {
-		SDE_REG_WRITE(c, INTF_MISR_CTRL, 0);
-	}
+	SDE_REG_WRITE(c, INTF_MISR_CTRL, config);
 }
 
-static void sde_hw_intf_collect_misr(struct sde_hw_intf *intf,
-		struct sde_misr_params *misr_map)
+static u32 sde_hw_intf_collect_misr(struct sde_hw_intf *intf)
 {
 	struct sde_hw_blk_reg_map *c = &intf->hw;
 
-	if (!misr_map)
-		return;
-
-	if (misr_map->enable) {
-		if (misr_map->last_idx < misr_map->frame_count &&
-			misr_map->last_idx < SDE_CRC_BATCH_SIZE)
-			misr_map->crc_value[misr_map->last_idx] =
-				SDE_REG_READ(c, INTF_MISR_SIGNATURE);
-	}
-
-	misr_map->enable =
-		misr_map->enable & (misr_map->last_idx <= SDE_CRC_BATCH_SIZE);
-
-	misr_map->last_idx++;
+	return SDE_REG_READ(c, INTF_MISR_SIGNATURE);
 }
 
 static void _setup_intf_ops(struct sde_hw_intf_ops *ops,
@@ -301,8 +295,10 @@
 	ops->setup_prg_fetch  = sde_hw_intf_setup_prg_fetch;
 	ops->get_status = sde_hw_intf_get_status;
 	ops->enable_timing = sde_hw_intf_enable_timing_engine;
-	ops->setup_misr = sde_hw_intf_set_misr;
+	ops->setup_misr = sde_hw_intf_setup_misr;
 	ops->collect_misr = sde_hw_intf_collect_misr;
+	if (cap & BIT(SDE_INTF_ROT_START))
+		ops->setup_rot_start = sde_hw_intf_setup_rot_start;
 }
 
 struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.h b/drivers/gpu/drm/msm/sde/sde_hw_intf.h
index f4a01cb..d24e83a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_intf.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,24 +19,6 @@
 
 struct sde_hw_intf;
 
-/* Batch size of frames for collecting MISR data */
-#define SDE_CRC_BATCH_SIZE 16
-
-/**
- * struct sde_misr_params : Interface for getting and setting MISR data
- *  Assumption is these functions will be called after clocks are enabled
- * @ enable : enables/disables MISR
- * @ frame_count : represents number of frames for which MISR is enabled
- * @ last_idx: number of frames for which MISR data is collected
- * @ crc_value: stores the collected MISR data
- */
-struct sde_misr_params {
-	bool enable;
-	u32 frame_count;
-	u32 last_idx;
-	u32 crc_value[SDE_CRC_BATCH_SIZE];
-};
-
 /* intf timing settings */
 struct intf_timing_params {
 	u32 width;		/* active width */
@@ -74,6 +56,7 @@
  *  Assumption is these functions will be called after clocks are enabled
  * @ setup_timing_gen : programs the timing engine
  * @ setup_prog_fetch : enables/disables the programmable fetch logic
+ * @ setup_rot_start  : enables/disables the rotator start trigger
  * @ enable_timing: enable/disable timing engine
  * @ get_status: returns if timing engine is enabled or not
  * @ setup_misr: enables/disables MISR in HW register
@@ -87,6 +70,9 @@
 	void (*setup_prg_fetch)(struct sde_hw_intf *intf,
 			const struct intf_prog_fetch *fetch);
 
+	void (*setup_rot_start)(struct sde_hw_intf *intf,
+			const struct intf_prog_fetch *fetch);
+
 	void (*enable_timing)(struct sde_hw_intf *intf,
 			u8 enable);
 
@@ -94,10 +80,9 @@
 			struct intf_status *status);
 
 	void (*setup_misr)(struct sde_hw_intf *intf,
-			struct sde_misr_params *misr_map);
+			bool enable, u32 frame_count);
 
-	void (*collect_misr)(struct sde_hw_intf *intf,
-			struct sde_misr_params *misr_map);
+	u32 (*collect_misr)(struct sde_hw_intf *intf);
 };
 
 struct sde_hw_intf {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
index 520c7b1..7780c5b 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
@@ -33,6 +33,9 @@
 #define LM_BLEND0_FG_ALPHA               0x04
 #define LM_BLEND0_BG_ALPHA               0x08
 
+#define LM_MISR_CTRL			0x310
+#define LM_MISR_SIGNATURE		0x314
+
 static struct sde_lm_cfg *_lm_offset(enum sde_lm mixer,
 		struct sde_mdss_cfg *m,
 		void __iomem *addr,
@@ -224,6 +227,30 @@
 	SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, val);
 }
 
+static void sde_hw_lm_setup_misr(struct sde_hw_mixer *ctx,
+				bool enable, u32 frame_count)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+	u32 config = 0;
+
+	SDE_REG_WRITE(c, LM_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+	/* clear misr data */
+	wmb();
+
+	if (enable)
+		config = (frame_count & MISR_FRAME_COUNT_MASK) |
+			MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
+
+	SDE_REG_WRITE(c, LM_MISR_CTRL, config);
+}
+
+static u32 sde_hw_lm_collect_misr(struct sde_hw_mixer *ctx)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+	return SDE_REG_READ(c, LM_MISR_SIGNATURE);
+}
+
 static void _setup_mixer_ops(struct sde_mdss_cfg *m,
 		struct sde_hw_lm_ops *ops,
 		unsigned long features)
@@ -236,6 +263,8 @@
 	ops->setup_alpha_out = sde_hw_lm_setup_color3;
 	ops->setup_border_color = sde_hw_lm_setup_border_color;
 	ops->setup_gc = sde_hw_lm_gc;
+	ops->setup_misr = sde_hw_lm_setup_misr;
+	ops->collect_misr = sde_hw_lm_collect_misr;
 
 	if (test_bit(SDE_DIM_LAYER, &features)) {
 		ops->setup_dim_layer = sde_hw_lm_setup_dim_layer;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.h b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
index 5af260a..45c0fc9 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
@@ -79,6 +79,13 @@
 	 * @ctx: Pointer to layer mixer context
 	 */
 	void (*clear_dim_layer)(struct sde_hw_mixer *ctx);
+
+	/* setup_misr: enables/disables MISR in HW register */
+	void (*setup_misr)(struct sde_hw_mixer *ctx,
+			bool enable, u32 frame_count);
+
+	/* collect_misr: reads and stores MISR data from HW register */
+	u32 (*collect_misr)(struct sde_hw_mixer *ctx);
 };
 
 struct sde_hw_mixer {
@@ -93,6 +100,9 @@
 
 	/* ops */
 	struct sde_hw_lm_ops ops;
+
+	/* store mixer info specific to display */
+	struct sde_hw_mixer_cfg cfg;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
index c4917d3..31aa031 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
@@ -46,17 +46,24 @@
 enum sde_format_flags {
 	SDE_FORMAT_FLAG_YUV_BIT,
 	SDE_FORMAT_FLAG_DX_BIT,
+	SDE_FORMAT_FLAG_COMPRESSED_BIT,
 	SDE_FORMAT_FLAG_BIT_MAX,
 };
 
 #define SDE_FORMAT_FLAG_YUV		BIT(SDE_FORMAT_FLAG_YUV_BIT)
 #define SDE_FORMAT_FLAG_DX		BIT(SDE_FORMAT_FLAG_DX_BIT)
+#define SDE_FORMAT_FLAG_COMPRESSED	BIT(SDE_FORMAT_FLAG_COMPRESSED_BIT)
 #define SDE_FORMAT_IS_YUV(X)		\
 	(test_bit(SDE_FORMAT_FLAG_YUV_BIT, (X)->flag))
 #define SDE_FORMAT_IS_DX(X)		\
 	(test_bit(SDE_FORMAT_FLAG_DX_BIT, (X)->flag))
 #define SDE_FORMAT_IS_LINEAR(X)		((X)->fetch_mode == SDE_FETCH_LINEAR)
-#define SDE_FORMAT_IS_UBWC(X)		((X)->fetch_mode == SDE_FETCH_UBWC)
+#define SDE_FORMAT_IS_TILE(X) \
+	(((X)->fetch_mode == SDE_FETCH_UBWC) && \
+			!test_bit(SDE_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+#define SDE_FORMAT_IS_UBWC(X) \
+	(((X)->fetch_mode == SDE_FETCH_UBWC) && \
+			test_bit(SDE_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
 
 #define SDE_BLEND_FG_ALPHA_FG_CONST	(0 << 0)
 #define SDE_BLEND_FG_ALPHA_BG_CONST	(1 << 0)
@@ -86,6 +93,7 @@
 	SDE_HW_BLK_INTF,
 	SDE_HW_BLK_WB,
 	SDE_HW_BLK_DSC,
+	SDE_HW_BLK_ROT,
 	SDE_HW_BLK_MAX,
 };
 
@@ -270,6 +278,11 @@
 	SDE_IOMMU_DOMAIN_MAX
 };
 
+enum sde_rot {
+	ROT_0 = 1,
+	ROT_MAX
+};
+
 /**
  * SDE HW,Component order color map
  */
@@ -455,6 +468,7 @@
 #define SDE_DBG_MASK_TOP      (1 << 9)
 #define SDE_DBG_MASK_VBIF     (1 << 10)
 #define SDE_DBG_MASK_DSC      (1 << 11)
+#define SDE_DBG_MASK_ROT      (1 << 12)
 
 /**
  * struct sde_hw_cp_cfg: hardware dspp/lm feature payload.
@@ -462,12 +476,20 @@
  * @len: Length of the payload.
  * @ctl: control pointer associated with dspp/lm.
  * @last_feature: last feature that will be set.
+ * @num_of_mixers: number of layer mixers for the display.
+ * @mixer_info: mixer info pointer associated with lm.
+ * @displayv: height of the display.
+ * @displayh: width of the display.
  */
 struct sde_hw_cp_cfg {
 	void *payload;
 	u32 len;
 	void *ctl;
 	u32 last_feature;
+	u32 num_of_mixers;
+	void *mixer_info;
+	u32 displayv;
+	u32 displayh;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.c b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
new file mode 100644
index 0000000..01fe3c8
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
@@ -0,0 +1,933 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#include "sde_kms.h"
+#include "sde_hw_mdss.h"
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_rot.h"
+#include "sde_formats.h"
+#include "sde_rotator_inline.h"
+
+#define SDE_MODIFLER(_modifier_) ((_modifier_) & 0x00ffffffffffffffULL)
+#define SDE_MODIFIER_IS_TILE(_modifier_) \
+	SDE_MODIFLER((_modifier_) & DRM_FORMAT_MOD_QCOM_TILE)
+#define SDE_MODIFIER_IS_UBWC(_modifier_) \
+	SDE_MODIFLER((_modifier_) & DRM_FORMAT_MOD_QCOM_COMPRESSED)
+#define SDE_MODIFIER_IS_10B(_modifier_) \
+	SDE_MODIFLER((_modifier_) & DRM_FORMAT_MOD_QCOM_DX)
+#define SDE_MODIFIER_IS_TIGHT(_modifier_) \
+	SDE_MODIFLER((_modifier_) & DRM_FORMAT_MOD_QCOM_TIGHT)
+
+/**
+ * _rot_offset - update register map of the given rotator instance
+ * @rot: rotator identifier
+ * @m: Pointer to mdss catalog
+ * @addr: i/o address mapping
+ * @b: Pointer to register block mapping structure
+ * return: Pointer to rotator configuration of the given instance
+ */
+static struct sde_rot_cfg *_rot_offset(enum sde_rot rot,
+		struct sde_mdss_cfg *m,
+		void __iomem *addr,
+		struct sde_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->rot_count; i++) {
+		if (rot == m->rot[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->rot[i].base;
+			b->hwversion = m->hwversion;
+			b->log_mask = SDE_DBG_MASK_ROT;
+			return &m->rot[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+/**
+ * sde_hw_rot_start - start rotator before any commit
+ * @hw: Pointer to rotator hardware driver
+ * return: 0 if success; error code otherwise
+ */
+static int sde_hw_rot_start(struct sde_hw_rot *hw)
+{
+	struct platform_device *pdev;
+	int rc;
+
+	if (!hw || !hw->caps || !hw->caps->pdev) {
+		SDE_ERROR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	pdev = hw->caps->pdev;
+
+	hw->rot_ctx = sde_rotator_inline_open(pdev);
+	if (IS_ERR_OR_NULL(hw->rot_ctx)) {
+		rc = PTR_ERR(hw->rot_ctx);
+		hw->rot_ctx = NULL;
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * sde_hw_rot_stop - stop rotator after final commit
+ * @hw: Pointer to rotator hardware driver
+ * return: none
+ */
+static void sde_hw_rot_stop(struct sde_hw_rot *hw)
+{
+	if (!hw) {
+		SDE_ERROR("invalid parameter\n");
+		return;
+	}
+
+	sde_rotator_inline_release(hw->rot_ctx);
+	hw->rot_ctx = NULL;
+}
+
+/**
+ * sde_hw_rot_to_v4l2_pixfmt - convert drm pixel format to v4l2 pixel format
+ * @drm_pixfmt: drm fourcc pixel format
+ * @drm_modifier: drm pixel format modifier
+ * @pixfmt: Pointer to v4l2 fourcc pixel format (output)
+ * return: 0 if success; error code otherwise
+ */
+static int sde_hw_rot_to_v4l2_pixfmt(u32 drm_pixfmt, u64 drm_modifier,
+		u32 *pixfmt)
+{
+	u32 rc = 0;
+
+	if (!pixfmt)
+		return -EINVAL;
+
+	switch (drm_pixfmt) {
+	case DRM_FORMAT_BGR565:
+		if (SDE_MODIFIER_IS_UBWC(drm_modifier))
+			*pixfmt = SDE_PIX_FMT_RGB_565_UBWC;
+		else
+			*pixfmt = SDE_PIX_FMT_RGB_565;
+		break;
+	case DRM_FORMAT_BGRA8888:
+		if (SDE_MODIFIER_IS_TILE(drm_modifier))
+			*pixfmt = SDE_PIX_FMT_ARGB_8888_TILE;
+		else
+			*pixfmt = SDE_PIX_FMT_ARGB_8888;
+		break;
+	case DRM_FORMAT_BGRX8888:
+		if (SDE_MODIFIER_IS_TILE(drm_modifier))
+			*pixfmt = SDE_PIX_FMT_XRGB_8888_TILE;
+		else
+			*pixfmt = SDE_PIX_FMT_XRGB_8888;
+		break;
+	case DRM_FORMAT_RGBA8888:
+		if (SDE_MODIFIER_IS_TILE(drm_modifier))
+			*pixfmt = SDE_PIX_FMT_ABGR_8888_TILE;
+		else
+			*pixfmt = SDE_PIX_FMT_ABGR_8888;
+		break;
+	case DRM_FORMAT_RGBX8888:
+		if (SDE_MODIFIER_IS_TILE(drm_modifier))
+			*pixfmt = SDE_PIX_FMT_XBGR_8888_TILE;
+		else
+			*pixfmt = SDE_PIX_FMT_XBGR_8888;
+		break;
+	case DRM_FORMAT_ABGR8888:
+		if (SDE_MODIFIER_IS_UBWC(drm_modifier))
+			*pixfmt = SDE_PIX_FMT_RGBA_8888_UBWC;
+		else if (SDE_MODIFIER_IS_TILE(drm_modifier))
+			*pixfmt = SDE_PIX_FMT_RGBA_8888_TILE;
+		else
+			*pixfmt = SDE_PIX_FMT_RGBA_8888;
+		break;
+	case DRM_FORMAT_XBGR8888:
+		if (SDE_MODIFIER_IS_UBWC(drm_modifier))
+			*pixfmt = SDE_PIX_FMT_RGBX_8888_UBWC;
+		else if (SDE_MODIFIER_IS_TILE(drm_modifier))
+			*pixfmt = SDE_PIX_FMT_RGBX_8888_TILE;
+		else
+			*pixfmt = SDE_PIX_FMT_RGBX_8888;
+		break;
+	case DRM_FORMAT_ARGB8888:
+		if (SDE_MODIFIER_IS_TILE(drm_modifier))
+			*pixfmt = SDE_PIX_FMT_BGRA_8888_TILE;
+		else
+			*pixfmt = SDE_PIX_FMT_BGRA_8888;
+		break;
+	case DRM_FORMAT_XRGB8888:
+		if (SDE_MODIFIER_IS_TILE(drm_modifier))
+			*pixfmt = SDE_PIX_FMT_BGRX_8888_TILE;
+		else
+			*pixfmt = SDE_PIX_FMT_BGRX_8888;
+		break;
+	case DRM_FORMAT_NV12:
+		if (SDE_MODIFIER_IS_UBWC(drm_modifier)) {
+			if (SDE_MODIFIER_IS_10B(drm_modifier)) {
+				if (SDE_MODIFIER_IS_TIGHT(drm_modifier))
+					*pixfmt =
+					SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC;
+				else
+					*pixfmt =
+					SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC;
+			} else {
+				*pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_UBWC;
+			}
+		} else if (SDE_MODIFIER_IS_TILE(drm_modifier)) {
+			if (SDE_MODIFIER_IS_10B(drm_modifier)) {
+				if (SDE_MODIFIER_IS_TIGHT(drm_modifier))
+					*pixfmt =
+					SDE_PIX_FMT_Y_CBCR_H2V2_TP10;
+				else
+					*pixfmt =
+					SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE;
+			} else {
+				*pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_TILE;
+			}
+		} else {
+			if (SDE_MODIFIER_IS_10B(drm_modifier)) {
+				if (SDE_MODIFIER_IS_TIGHT(drm_modifier))
+					*pixfmt =
+					SDE_PIX_FMT_Y_CBCR_H2V2_TP10;
+				else
+					*pixfmt =
+					SDE_PIX_FMT_Y_CBCR_H2V2_P010;
+			} else {
+				*pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2;
+			}
+		}
+		break;
+	case DRM_FORMAT_NV21:
+		if (SDE_MODIFIER_IS_TILE(drm_modifier))
+			*pixfmt = SDE_PIX_FMT_Y_CRCB_H2V2_TILE;
+		else
+			*pixfmt = SDE_PIX_FMT_Y_CRCB_H2V2;
+		break;
+	case DRM_FORMAT_BGRA1010102:
+		if (SDE_MODIFIER_IS_TILE(drm_modifier))
+			*pixfmt = SDE_PIX_FMT_ARGB_2101010_TILE;
+		else
+			*pixfmt = SDE_PIX_FMT_ARGB_2101010;
+		break;
+	case DRM_FORMAT_BGRX1010102:
+		if (SDE_MODIFIER_IS_TILE(drm_modifier))
+			*pixfmt = SDE_PIX_FMT_XRGB_2101010_TILE;
+		else
+			*pixfmt = SDE_PIX_FMT_XRGB_2101010;
+		break;
+	case DRM_FORMAT_RGBA1010102:
+		if (SDE_MODIFIER_IS_TILE(drm_modifier))
+			*pixfmt = SDE_PIX_FMT_ABGR_2101010_TILE;
+		else
+			*pixfmt = SDE_PIX_FMT_ABGR_2101010;
+		break;
+	case DRM_FORMAT_RGBX1010102:
+		if (SDE_MODIFIER_IS_TILE(drm_modifier))
+			*pixfmt = SDE_PIX_FMT_XBGR_2101010_TILE;
+		else
+			*pixfmt = SDE_PIX_FMT_XBGR_2101010;
+		break;
+	case DRM_FORMAT_ARGB2101010:
+		if (SDE_MODIFIER_IS_TILE(drm_modifier))
+			*pixfmt = SDE_PIX_FMT_BGRA_1010102_TILE;
+		else
+			*pixfmt = SDE_PIX_FMT_BGRA_1010102;
+		break;
+	case DRM_FORMAT_XRGB2101010:
+		if (SDE_MODIFIER_IS_TILE(drm_modifier))
+			*pixfmt = SDE_PIX_FMT_BGRX_1010102_TILE;
+		else
+			*pixfmt = SDE_PIX_FMT_BGRX_1010102;
+		break;
+	case DRM_FORMAT_ABGR2101010:
+		if (SDE_MODIFIER_IS_UBWC(drm_modifier))
+			*pixfmt = SDE_PIX_FMT_RGBA_1010102_UBWC;
+		else if (SDE_MODIFIER_IS_TILE(drm_modifier))
+			*pixfmt = SDE_PIX_FMT_RGBA_1010102_TILE;
+		else
+			*pixfmt = SDE_PIX_FMT_RGBA_1010102;
+		break;
+	case DRM_FORMAT_XBGR2101010:
+		if (SDE_MODIFIER_IS_UBWC(drm_modifier))
+			*pixfmt = SDE_PIX_FMT_RGBX_1010102_UBWC;
+		else if (SDE_MODIFIER_IS_TILE(drm_modifier))
+			*pixfmt = SDE_PIX_FMT_RGBX_1010102_TILE;
+		else
+			*pixfmt = SDE_PIX_FMT_RGBX_1010102;
+		break;
+	default:
+		SDE_ERROR("invalid drm pixel format %c%c%c%c/%llx\n",
+				drm_pixfmt >> 0, drm_pixfmt >> 8,
+				drm_pixfmt >> 16, drm_pixfmt >> 24,
+				drm_modifier);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * sde_hw_rot_to_drm_pixfmt - convert v4l2 pixel format to drm pixel format
+ * @pixfmt: v4l2 fourcc pixel format
+ * @drm_pixfmt: Pointer to drm forucc pixel format (output)
+ * @drm_modifier: Pointer to drm pixel format modifier (output)
+ * return: 0 if success; error code otherwise
+ */
+static int sde_hw_rot_to_drm_pixfmt(u32 pixfmt, u32 *drm_pixfmt,
+		u64 *drm_modifier)
+{
+	u32 rc = 0;
+
+	switch (pixfmt) {
+	case SDE_PIX_FMT_RGB_565:
+		*drm_pixfmt = DRM_FORMAT_BGR565;
+		*drm_modifier = 0;
+		break;
+	case SDE_PIX_FMT_RGB_565_UBWC:
+		*drm_pixfmt = DRM_FORMAT_BGR565;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED |
+				DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_RGBA_8888:
+		*drm_pixfmt = DRM_FORMAT_ABGR8888;
+		*drm_modifier = 0;
+		break;
+	case SDE_PIX_FMT_RGBX_8888:
+		*drm_pixfmt = DRM_FORMAT_XBGR8888;
+		*drm_modifier = 0;
+		break;
+	case SDE_PIX_FMT_BGRA_8888:
+		*drm_pixfmt = DRM_FORMAT_ARGB8888;
+		*drm_modifier = 0;
+		break;
+	case SDE_PIX_FMT_BGRX_8888:
+		*drm_pixfmt = DRM_FORMAT_XRGB8888;
+		*drm_modifier = 0;
+		break;
+	case SDE_PIX_FMT_Y_CBCR_H2V2_UBWC:
+		*drm_pixfmt = DRM_FORMAT_NV12;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED |
+				DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_Y_CRCB_H2V2:
+		*drm_pixfmt = DRM_FORMAT_NV21;
+		*drm_modifier = 0;
+		break;
+	case SDE_PIX_FMT_RGBA_8888_UBWC:
+		*drm_pixfmt = DRM_FORMAT_ABGR8888;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED |
+				DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_RGBX_8888_UBWC:
+		*drm_pixfmt = DRM_FORMAT_XBGR8888;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED |
+				DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_Y_CBCR_H2V2:
+		*drm_pixfmt = DRM_FORMAT_NV12;
+		*drm_modifier = 0;
+		break;
+	case SDE_PIX_FMT_ARGB_8888:
+		*drm_pixfmt = DRM_FORMAT_BGRA8888;
+		*drm_modifier = 0;
+		break;
+	case SDE_PIX_FMT_XRGB_8888:
+		*drm_pixfmt = DRM_FORMAT_BGRX8888;
+		*drm_modifier = 0;
+		break;
+	case SDE_PIX_FMT_ABGR_8888:
+		*drm_pixfmt = DRM_FORMAT_RGBA8888;
+		*drm_modifier = 0;
+		break;
+	case SDE_PIX_FMT_XBGR_8888:
+		*drm_pixfmt = DRM_FORMAT_RGBX8888;
+		*drm_modifier = 0;
+		break;
+	case SDE_PIX_FMT_ARGB_2101010:
+		*drm_pixfmt = DRM_FORMAT_BGRA1010102;
+		*drm_modifier = 0;
+		break;
+	case SDE_PIX_FMT_XRGB_2101010:
+		*drm_pixfmt = DRM_FORMAT_BGRX1010102;
+		*drm_modifier = 0;
+		break;
+	case SDE_PIX_FMT_ABGR_2101010:
+		*drm_pixfmt = DRM_FORMAT_RGBA1010102;
+		*drm_modifier = 0;
+		break;
+	case SDE_PIX_FMT_XBGR_2101010:
+		*drm_pixfmt = DRM_FORMAT_RGBX1010102;
+		*drm_modifier = 0;
+		break;
+	case SDE_PIX_FMT_BGRA_1010102:
+		*drm_pixfmt = DRM_FORMAT_ARGB2101010;
+		*drm_modifier = 0;
+		break;
+	case SDE_PIX_FMT_BGRX_1010102:
+		*drm_pixfmt = DRM_FORMAT_XRGB2101010;
+		*drm_modifier = 0;
+		break;
+	case SDE_PIX_FMT_RGBA_8888_TILE:
+		*drm_pixfmt = DRM_FORMAT_ABGR8888;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_RGBX_8888_TILE:
+		*drm_pixfmt = DRM_FORMAT_XBGR8888;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_BGRA_8888_TILE:
+		*drm_pixfmt = DRM_FORMAT_ARGB8888;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_BGRX_8888_TILE:
+		*drm_pixfmt = DRM_FORMAT_XRGB8888;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_Y_CRCB_H2V2_TILE:
+		*drm_pixfmt = DRM_FORMAT_NV21;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_Y_CBCR_H2V2_TILE:
+		*drm_pixfmt = DRM_FORMAT_NV12;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_ARGB_8888_TILE:
+		*drm_pixfmt = DRM_FORMAT_BGRA8888;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_XRGB_8888_TILE:
+		*drm_pixfmt = DRM_FORMAT_BGRX8888;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_ABGR_8888_TILE:
+		*drm_pixfmt = DRM_FORMAT_RGBA8888;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_XBGR_8888_TILE:
+		*drm_pixfmt = DRM_FORMAT_RGBX8888;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_ARGB_2101010_TILE:
+		*drm_pixfmt = DRM_FORMAT_BGRA1010102;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_XRGB_2101010_TILE:
+		*drm_pixfmt = DRM_FORMAT_BGRX1010102;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_ABGR_2101010_TILE:
+		*drm_pixfmt = DRM_FORMAT_RGBA1010102;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_XBGR_2101010_TILE:
+		*drm_pixfmt = DRM_FORMAT_RGBX1010102;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_BGRA_1010102_TILE:
+		*drm_pixfmt = DRM_FORMAT_ARGB2101010;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_BGRX_1010102_TILE:
+		*drm_pixfmt = DRM_FORMAT_XRGB2101010;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_RGBA_1010102_UBWC:
+		*drm_pixfmt = DRM_FORMAT_ABGR2101010;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED |
+				DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_RGBX_1010102_UBWC:
+		*drm_pixfmt = DRM_FORMAT_XBGR2101010;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED |
+				DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_Y_CBCR_H2V2_P010:
+		*drm_pixfmt = DRM_FORMAT_NV12;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_DX;
+		break;
+	case SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE:
+		*drm_pixfmt = DRM_FORMAT_NV12;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE |
+				DRM_FORMAT_MOD_QCOM_DX;
+		break;
+	case SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC:
+		*drm_pixfmt = DRM_FORMAT_NV12;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED |
+				DRM_FORMAT_MOD_QCOM_TILE |
+				DRM_FORMAT_MOD_QCOM_DX;
+		break;
+	case SDE_PIX_FMT_Y_CBCR_H2V2_TP10:
+		*drm_pixfmt = DRM_FORMAT_NV12;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE |
+				DRM_FORMAT_MOD_QCOM_DX |
+				DRM_FORMAT_MOD_QCOM_TIGHT;
+		break;
+	case SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC:
+		*drm_pixfmt = DRM_FORMAT_NV12;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED |
+				DRM_FORMAT_MOD_QCOM_TILE |
+				DRM_FORMAT_MOD_QCOM_DX |
+				DRM_FORMAT_MOD_QCOM_TIGHT;
+		break;
+	default:
+		SDE_DEBUG("invalid v4l2 pixel format %c%c%c%c\n",
+				pixfmt >> 0, pixfmt >> 8,
+				pixfmt >> 16, pixfmt >> 24);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * sde_hw_rot_to_v4l2_buffer - convert drm buffer to v4l2 buffer
+ * @drm_pixfmt: pixel format in drm fourcc
+ * @drm_modifier: pixel format modifier
+ * @drm_addr: drm buffer address per plane
+ * @drm_len: drm buffer length per plane
+ * @drm_planes: drm buffer number of planes
+ * @v4l_addr: v4l2 buffer address per plane
+ * @v4l_len: v4l2 buffer length per plane
+ * @v4l_planes: v4l2 buffer number of planes
+ */
+static void sde_hw_rot_to_v4l2_buffer(u32 drm_pixfmt, u64 drm_modifier,
+		dma_addr_t *drm_addr, u32 *drm_len, u32 *drm_planes,
+		dma_addr_t *v4l_addr, u32 *v4l_len, u32 *v4l_planes)
+{
+	int i, total_size = 0;
+
+	for (i = 0; i < SDE_ROTATOR_INLINE_PLANE_MAX; i++) {
+		v4l_addr[i] = drm_addr[i];
+		v4l_len[i] = drm_len[i];
+		total_size += drm_len[i];
+		SDE_DEBUG("drm[%d]:%pad/%x\n", i, &drm_addr[i], drm_len[i]);
+	}
+
+	if (SDE_MODIFIER_IS_UBWC(drm_modifier)) {
+		/* v4l2 driver uses plane[0] as single ubwc buffer plane */
+		v4l_addr[0] = drm_addr[2];
+		v4l_len[0] = total_size;
+		*v4l_planes = 1;
+		SDE_DEBUG("v4l2[0]:%pad/%x/%d\n", &v4l_addr[0], v4l_len[0],
+				*v4l_planes);
+	} else {
+		*v4l_planes = *drm_planes;
+	}
+}
+
+/**
+ * sde_hw_rot_commit - commit/execute given rotator command
+ * @hw: Pointer to rotator hardware driver
+ * @data: Pointer to command descriptor
+ * @hw_cmd: type of command to be executed
+ * return: 0 if success; error code otherwise
+ */
+static int sde_hw_rot_commit(struct sde_hw_rot *hw, struct sde_hw_rot_cmd *data,
+		enum sde_hw_rot_cmd_type hw_cmd)
+{
+	struct sde_rotator_inline_cmd rot_cmd;
+	enum sde_rotator_inline_cmd_type cmd_type;
+	void *priv_handle = NULL;
+	int rc;
+
+	if (!hw || !data) {
+		SDE_ERROR("invalid parameter\n");
+		return -EINVAL;
+	}
+
+	memset(&rot_cmd, 0, sizeof(struct sde_rotator_inline_cmd));
+
+	switch (hw_cmd) {
+	case SDE_HW_ROT_CMD_VALIDATE:
+		cmd_type = SDE_ROTATOR_INLINE_CMD_VALIDATE;
+		break;
+	case SDE_HW_ROT_CMD_COMMIT:
+		cmd_type = SDE_ROTATOR_INLINE_CMD_COMMIT;
+		break;
+	case SDE_HW_ROT_CMD_CLEANUP:
+		cmd_type = SDE_ROTATOR_INLINE_CMD_CLEANUP;
+		priv_handle = data->priv_handle;
+		break;
+	default:
+		SDE_ERROR("invalid hw rotator command %d\n", hw_cmd);
+		return -EINVAL;
+	}
+
+	rot_cmd.video_mode = data->video_mode;
+	rot_cmd.fps = data->fps;
+	rot_cmd.rot90 = data->rot90;
+	rot_cmd.hflip = data->hflip;
+	rot_cmd.vflip = data->vflip;
+	rot_cmd.secure = data->secure;
+	rot_cmd.clkrate = data->clkrate;
+	rot_cmd.data_bw = 0;
+	rot_cmd.prefill_bw = data->prefill_bw;
+	rot_cmd.src_width = data->src_width;
+	rot_cmd.src_height = data->src_height;
+	rot_cmd.src_rect_x = data->src_rect_x;
+	rot_cmd.src_rect_y = data->src_rect_y;
+	rot_cmd.src_rect_w = data->src_rect_w;
+	rot_cmd.src_rect_h = data->src_rect_h;
+	rot_cmd.dst_writeback = data->dst_writeback;
+	rot_cmd.dst_rect_x = data->dst_rect_x;
+	rot_cmd.dst_rect_y = data->dst_rect_y;
+	rot_cmd.dst_rect_w = data->dst_rect_w;
+	rot_cmd.dst_rect_h = data->dst_rect_h;
+	rot_cmd.priv_handle = priv_handle;
+
+	rc = sde_hw_rot_to_v4l2_pixfmt(data->src_pixel_format,
+			data->src_modifier, &rot_cmd.src_pixfmt);
+	if (rc) {
+		SDE_ERROR("invalid src format %d\n", rc);
+		return rc;
+	}
+
+	/* calculate preferred output format during validation */
+	if (hw_cmd == SDE_HW_ROT_CMD_VALIDATE) {
+		rc = sde_rotator_inline_get_dst_pixfmt(hw->caps->pdev,
+				rot_cmd.src_pixfmt, &rot_cmd.dst_pixfmt);
+		if (rc) {
+			SDE_ERROR("invalid src format %d\n", rc);
+			return rc;
+		}
+
+		rc = sde_hw_rot_to_drm_pixfmt(rot_cmd.dst_pixfmt,
+				&data->dst_pixel_format, &data->dst_modifier);
+		if (rc) {
+			SDE_ERROR("invalid dst format %c%c%c%c\n",
+					rot_cmd.dst_pixfmt >> 0,
+					rot_cmd.dst_pixfmt >> 8,
+					rot_cmd.dst_pixfmt >> 16,
+					rot_cmd.dst_pixfmt >> 24);
+			return rc;
+		}
+
+		data->dst_format = sde_get_sde_format_ext(
+				data->dst_pixel_format, &data->dst_modifier, 1);
+		if (!data->dst_format) {
+			SDE_ERROR("failed to get dst format\n");
+			return -EINVAL;
+		}
+	} else if (hw_cmd == SDE_HW_ROT_CMD_COMMIT) {
+		rc = sde_hw_rot_to_v4l2_pixfmt(data->dst_pixel_format,
+				data->dst_modifier, &rot_cmd.dst_pixfmt);
+		if (rc) {
+			SDE_ERROR("invalid dst format %d\n", rc);
+			return rc;
+		}
+
+		sde_hw_rot_to_v4l2_buffer(data->src_pixel_format,
+				data->src_modifier,
+				data->src_iova, data->src_len,
+				&data->src_planes,
+				rot_cmd.src_addr, rot_cmd.src_len,
+				&rot_cmd.src_planes);
+
+		sde_hw_rot_to_v4l2_buffer(data->dst_pixel_format,
+				data->dst_modifier,
+				data->dst_iova, data->dst_len,
+				&data->dst_planes,
+				rot_cmd.dst_addr, rot_cmd.dst_len,
+				&rot_cmd.dst_planes);
+	}
+
+	/* only process any command if client is master or for validation */
+	if (data->master || hw_cmd == SDE_HW_ROT_CMD_VALIDATE) {
+		SDE_DEBUG("dispatch seq:%d cmd:%d\n", data->sequence_id,
+				hw_cmd);
+
+		rc = sde_rotator_inline_commit(hw->rot_ctx, &rot_cmd, cmd_type);
+		if (rc) {
+			SDE_ERROR("failed to commit inline rotation %d\n", rc);
+			return rc;
+		}
+
+		/* return to caller */
+		data->priv_handle = rot_cmd.priv_handle;
+	} else {
+		SDE_DEBUG("bypass seq:%d cmd:%d\n", data->sequence_id, hw_cmd);
+	}
+
+	return 0;
+}
+
+/**
+ * sde_hw_rot_get_format_caps - get pixel format capability
+ * @hw: Pointer to rotator hardware driver
+ * return: Pointer to pixel format capability array: NULL otherwise
+ */
+static const struct sde_format_extended *sde_hw_rot_get_format_caps(
+		struct sde_hw_rot *hw)
+{
+	int rc, i, j, len;
+	u32 *v4l_pixfmts;
+	struct sde_format_extended *drm_pixfmts;
+	struct platform_device *pdev;
+
+	if (!hw || !hw->caps || !hw->caps->pdev) {
+		SDE_ERROR("invalid rotator hw\n");
+		return NULL;
+	}
+
+	pdev = hw->caps->pdev;
+
+	if (hw->format_caps)
+		return hw->format_caps;
+
+	len = sde_rotator_inline_get_pixfmt_caps(pdev, true, NULL, 0);
+	if (len < 0) {
+		SDE_ERROR("invalid pixfmt caps %d\n", len);
+		return NULL;
+	}
+
+	v4l_pixfmts = kcalloc(len, sizeof(u32), GFP_KERNEL);
+	if (!v4l_pixfmts)
+		goto done;
+
+	sde_rotator_inline_get_pixfmt_caps(pdev, true, v4l_pixfmts, len);
+
+	/* allocate one more to indicate termination */
+	drm_pixfmts = kzalloc((len + 1) * sizeof(struct sde_format_extended),
+			GFP_KERNEL);
+	if (!drm_pixfmts)
+		goto done;
+
+	for (i = 0, j = 0; i < len; i++) {
+		rc = sde_hw_rot_to_drm_pixfmt(v4l_pixfmts[i],
+				&drm_pixfmts[j].fourcc_format,
+				&drm_pixfmts[j].modifier);
+		if (!rc) {
+			SDE_DEBUG("%d: vl42:%c%c%c%c => drm:%c%c%c%c/0x%llx\n",
+				i, v4l_pixfmts[i] >> 0, v4l_pixfmts[i] >> 8,
+				v4l_pixfmts[i] >> 16, v4l_pixfmts[i] >> 24,
+				drm_pixfmts[j].fourcc_format >> 0,
+				drm_pixfmts[j].fourcc_format >> 8,
+				drm_pixfmts[j].fourcc_format >> 16,
+				drm_pixfmts[j].fourcc_format >> 24,
+				drm_pixfmts[j].modifier);
+			j++;
+		} else {
+			SDE_DEBUG("%d: vl42:%c%c%c%c not mapped\n",
+				i, v4l_pixfmts[i] >> 0, v4l_pixfmts[i] >> 8,
+				v4l_pixfmts[i] >> 16, v4l_pixfmts[i] >> 24);
+		}
+	}
+
+	hw->format_caps = drm_pixfmts;
+done:
+	kfree(v4l_pixfmts);
+
+	return hw->format_caps;
+}
+
+/**
+ * sde_hw_rot_get_downscale_caps - get scaling capability string
+ * @hw: Pointer to rotator hardware driver
+ * return: Pointer to capability string: NULL otherwise
+ */
+static const char *sde_hw_rot_get_downscale_caps(struct sde_hw_rot *hw)
+{
+	int len;
+	struct platform_device *pdev;
+
+	if (!hw || !hw->caps || !hw->caps->pdev) {
+		SDE_ERROR("invalid rotator hw\n");
+		return NULL;
+	}
+
+	pdev = hw->caps->pdev;
+
+	if (hw->downscale_caps)
+		return hw->downscale_caps;
+
+	len = sde_rotator_inline_get_downscale_caps(pdev, NULL, 0);
+	if (len < 0) {
+		SDE_ERROR("invalid scaling caps %d\n", len);
+		return NULL;
+	}
+
+	/* add one for ending zero */
+	len += 1;
+	hw->downscale_caps = kzalloc(len, GFP_KERNEL);
+	sde_rotator_inline_get_downscale_caps(pdev, hw->downscale_caps, len);
+
+	return hw->downscale_caps;
+}
+
+/**
+ * sde_hw_rot_get_cache_size - get cache size
+ * @hw: Pointer to rotator hardware driver
+ * return: size of cache
+ */
+static size_t sde_hw_rot_get_cache_size(struct sde_hw_rot *hw)
+{
+	if (!hw || !hw->caps) {
+		SDE_ERROR("invalid rotator hw\n");
+		return 0;
+	}
+
+	return hw->caps->slice_size;
+}
+
+/**
+ * sde_hw_rot_get_maxlinewidth - get maximum line width of rotator
+ * @hw: Pointer to rotator hardware driver
+ * return: maximum line width
+ */
+static int sde_hw_rot_get_maxlinewidth(struct sde_hw_rot *hw)
+{
+	struct platform_device *pdev;
+
+	if (!hw || !hw->caps || !hw->caps->pdev) {
+		SDE_ERROR("invalid rotator hw\n");
+		return 0;
+	}
+
+	pdev = hw->caps->pdev;
+
+	return sde_rotator_inline_get_maxlinewidth(pdev);
+}
+
+/**
+ * _setup_rot_ops - setup rotator operations
+ * @ops: Pointer to operation table
+ * @features: available feature bitmask
+ * return: none
+ */
+static void _setup_rot_ops(struct sde_hw_rot_ops *ops, unsigned long features)
+{
+	ops->commit = sde_hw_rot_commit;
+	ops->get_format_caps = sde_hw_rot_get_format_caps;
+	ops->get_downscale_caps = sde_hw_rot_get_downscale_caps;
+	ops->get_cache_size = sde_hw_rot_get_cache_size;
+	ops->get_maxlinewidth = sde_hw_rot_get_maxlinewidth;
+}
+
+/**
+ * sde_hw_rot_blk_stop - stop rotator block
+ * @hw_blk: Pointer to base hardware block
+ * return: none
+ */
+static void sde_hw_rot_blk_stop(struct sde_hw_blk *hw_blk)
+{
+	struct sde_hw_rot *hw_rot = to_sde_hw_rot(hw_blk);
+
+	SDE_DEBUG("type:%d id:%d\n", hw_blk->type, hw_blk->id);
+
+	sde_hw_rot_stop(hw_rot);
+}
+
+/**
+ * sde_hw_rot_blk_start - art rotator block
+ * @hw_blk: Pointer to base hardware block
+ * return: 0 if success; error code otherwise
+ */
+static int sde_hw_rot_blk_start(struct sde_hw_blk *hw_blk)
+{
+	struct sde_hw_rot *hw_rot = to_sde_hw_rot(hw_blk);
+	int rc = 0;
+
+	SDE_DEBUG("type:%d id:%d\n", hw_blk->type, hw_blk->id);
+
+	rc = sde_hw_rot_start(hw_rot);
+
+	return rc;
+}
+
+static struct sde_hw_blk_ops sde_hw_rot_ops = {
+	.start = sde_hw_rot_blk_start,
+	.stop = sde_hw_rot_blk_stop,
+};
+
+/**
+ * sde_hw_rot_init - create/initialize given rotator instance
+ * @idx: index of given rotator
+ * @addr: i/o address mapping
+ * @m: Pointer to mdss catalog
+ * return: Pointer to hardware rotator driver of the given instance
+ */
+struct sde_hw_rot *sde_hw_rot_init(enum sde_rot idx,
+		void __iomem *addr,
+		struct sde_mdss_cfg *m)
+{
+	struct sde_hw_rot *c;
+	struct sde_rot_cfg *cfg;
+	int rc;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _rot_offset(idx, m, addr, &c->hw);
+	if (IS_ERR(cfg)) {
+		WARN(1, "Unable to find rot idx=%d\n", idx);
+		kfree(c);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Assign ops */
+	c->idx = idx;
+	c->caps = cfg;
+	_setup_rot_ops(&c->ops, c->caps->features);
+
+	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_ROT, idx,
+			&sde_hw_rot_ops);
+	if (rc) {
+		SDE_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
+}
+
+/**
+ * sde_hw_rot_destroy - destroy given hardware rotator driver
+ * @hw_rot: Pointer to hardware rotator driver
+ * return: none
+ */
+void sde_hw_rot_destroy(struct sde_hw_rot *hw_rot)
+{
+	sde_hw_blk_destroy(&hw_rot->base);
+	kfree(hw_rot->downscale_caps);
+	kfree(hw_rot->format_caps);
+	kfree(hw_rot);
+}
+
+struct sde_hw_rot *sde_hw_rot_get(struct sde_hw_rot *hw_rot)
+{
+	struct sde_hw_blk *hw_blk = sde_hw_blk_get(hw_rot ? &hw_rot->base :
+			NULL, SDE_HW_BLK_ROT, -1);
+
+	return IS_ERR_OR_NULL(hw_blk) ? NULL : to_sde_hw_rot(hw_blk);
+}
+
+void sde_hw_rot_put(struct sde_hw_rot *hw_rot)
+{
+	struct sde_hw_blk *hw_blk = hw_rot ? &hw_rot->base : NULL;
+
+	sde_hw_blk_put(hw_blk);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.h b/drivers/gpu/drm/msm/sde/sde_hw_rot.h
new file mode 100644
index 0000000..a4f5b49
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.h
@@ -0,0 +1,187 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_ROT_H
+#define _SDE_HW_ROT_H
+
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+#include "sde_hw_blk.h"
+
+struct sde_hw_rot;
+
+/**
+ * enum sde_hw_rot_cmd_type - type of rotator hardware command
+ * @SDE_HW_ROT_CMD_VALDIATE: validate rotator command; do not commit
+ * @SDE_HW_ROT_CMD_COMMIT: commit/execute rotator command
+ * @SDE_HW_ROT_CMD_CLEANUP: cleanup rotator command after it is done
+ */
+enum sde_hw_rot_cmd_type {
+	SDE_HW_ROT_CMD_VALIDATE,
+	SDE_HW_ROT_CMD_COMMIT,
+	SDE_HW_ROT_CMD_CLEANUP,
+};
+
+/**
+ * struct sde_hw_rot_cmd - definition of hardware rotation command
+ * @master: true if client is the master in source split inline rotation
+ * @sequence_id: command sequence identifier
+ * @fps: frame rate of the stream in frame per second
+ * @rot90: true if rotation 90 in counter clockwise is required
+ * @hflip: true if horizontal flip is required prior to rotation
+ * @vflip: true if vertical flip is required prior to rotation
+ * @secure: true if image content is in secure domain
+ * @video_mode: true if rotator is feeding into video interface
+ * @clkrate : clock rate in Hz
+ * @prefill_bw: prefill bandwidth in Bps (video mode only)
+ * @src_iova: source i/o virtual address
+ * @src_len: source i/o buffer length
+ * @src_planes: source plane number
+ * @src_format: pointer to source sde pixel format
+ * @src_pixel_format: source pixel format in drm fourcc
+ * @src_modifier: source pixel format modifier
+ * @src_width: source width in pixel
+ * @src_height: source height in pixel
+ * @src_rect_x: source rectangle x coordinate
+ * @src_rect_y: source rectangle y coordinate
+ * @src_rect_w: source rectangle width
+ * @src_rect_h: source rectangle height
+ * @dst_writeback: true if writeback of rotated output is required
+ * @dst_iova: destination i/o virtual address
+ * @dst_len: destination i/o buffer length
+ * @dst_planes: destination plane number
+ * @dst_format: pointer to destination sde pixel format (input/output)
+ * @dst_pixel_format: destination pixel format in drm fourcc (input/output)
+ * @dst_modifier: destination pixel format modifier (input/output)
+ * @dst_rect_x: destination rectangle x coordinate
+ * @dst_rect_y: destination rectangle y coordinate
+ * @dst_rect_w: destination rectangle width
+ * @dst_rect_h: destination rectangle height
+ * @priv_handle: private handle of rotator driver (output)
+ */
+struct sde_hw_rot_cmd {
+	bool master;
+	u32 sequence_id;
+	u32 fps;
+	bool rot90;
+	bool hflip;
+	bool vflip;
+	bool secure;
+	bool video_mode;
+	u64 clkrate;
+	u64 prefill_bw;
+	dma_addr_t src_iova[4];
+	u32 src_len[4];
+	u32 src_planes;
+	const struct sde_format *src_format;
+	u32 src_pixel_format;
+	u64 src_modifier;
+	u32 src_width;
+	u32 src_height;
+	u32 src_stride;
+	u32 src_rect_x;
+	u32 src_rect_y;
+	u32 src_rect_w;
+	u32 src_rect_h;
+	bool dst_writeback;
+	dma_addr_t dst_iova[4];
+	u32 dst_len[4];
+	u32 dst_planes;
+	const struct sde_format *dst_format;
+	u32 dst_pixel_format;
+	u64 dst_modifier;
+	u32 dst_rect_x;
+	u32 dst_rect_y;
+	u32 dst_rect_w;
+	u32 dst_rect_h;
+	void *priv_handle;
+};
+
+/**
+ * struct sde_hw_rot_ops - interface to the rotator hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_rot_ops {
+	int (*commit)(struct sde_hw_rot *hw, struct sde_hw_rot_cmd *data,
+			enum sde_hw_rot_cmd_type cmd);
+	const struct sde_format_extended *(*get_format_caps)(
+			struct sde_hw_rot *hw);
+	const char *(*get_downscale_caps)(struct sde_hw_rot *hw);
+	size_t (*get_cache_size)(struct sde_hw_rot *hw);
+	int (*get_maxlinewidth)(struct sde_hw_rot *hw);
+};
+
+/**
+ * struct sde_hw_rot : ROT driver object
+ * @base: hw block base object
+ * @hw: hardware address map
+ * @idx: instance index
+ * @caps: capabilities bitmask
+ * @ops: operation table
+ * @rot_ctx: pointer to private rotator context
+ * @format_caps: pointer to pixel format capability  array
+ * @downscale_caps: pointer to scaling capability string
+ */
+struct sde_hw_rot {
+	struct sde_hw_blk base;
+	struct sde_hw_blk_reg_map hw;
+	int idx;
+	const struct sde_rot_cfg *caps;
+	struct sde_hw_rot_ops ops;
+	void *rot_ctx;
+	struct sde_format_extended *format_caps;
+	char *downscale_caps;
+};
+
+/**
+ * sde_hw_rot_init - initialize and return rotator hw driver object.
+ * @idx:  wb_path index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ */
+struct sde_hw_rot *sde_hw_rot_init(enum sde_rot idx,
+		void __iomem *addr,
+		struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_rot_destroy - destroy rotator hw driver object.
+ * @hw_rot:  Pointer to rotator hw driver object
+ */
+void sde_hw_rot_destroy(struct sde_hw_rot *hw_rot);
+
+/**
+ * to_sde_hw_rot - convert base object sde_hw_base to rotator object
+ * @hw: Pointer to base hardware block
+ * return: Pointer to rotator hardware block
+ */
+static inline struct sde_hw_rot *to_sde_hw_rot(struct sde_hw_blk *hw)
+{
+	return container_of(hw, struct sde_hw_rot, base);
+}
+
+/**
+ * sde_hw_rot_get - get next available hardware rotator, or increment reference
+ *	count if hardware rotator provided
+ * @hw_rot: Pointer to hardware rotator
+ * return: Pointer to rotator hardware block if success; NULL otherwise
+ */
+struct sde_hw_rot *sde_hw_rot_get(struct sde_hw_rot *hw_rot);
+
+/**
+ * sde_hw_rot_put - put the given hardware rotator
+ * @hw_rot: Pointer to hardware rotator
+ * return: none
+ */
+void sde_hw_rot_put(struct sde_hw_rot *hw_rot);
+
+#endif /*_SDE_HW_ROT_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index 71c3855..37fb81d 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -93,6 +93,11 @@
 #define SSPP_QOS_CTRL_CREQ_VBLANK_MASK     0x3
 #define SSPP_QOS_CTRL_CREQ_VBLANK_OFF      20
 
+#define SSPP_SYS_CACHE_MODE                0x1BC
+#define SSPP_SBUF_STATUS_PLANE0            0x1C0
+#define SSPP_SBUF_STATUS_PLANE1            0x1C4
+#define SSPP_SBUF_STATUS_PLANE_EMPTY       BIT(16)
+
 /* SDE_SSPP_SCALER_QSEED2 */
 #define SCALE_CONFIG                       0x04
 #define COMP0_3_PHASE_STEP_X               0x10
@@ -994,6 +999,45 @@
 	SDE_REG_WRITE(&ctx->hw, SSPP_QOS_CTRL + idx, qos_ctrl);
 }
 
+static void sde_hw_sspp_setup_sys_cache(struct sde_hw_pipe *ctx,
+		struct sde_hw_pipe_sc_cfg *cfg)
+{
+	u32 idx, val;
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+		return;
+
+	if (!cfg)
+		return;
+
+	val = ((cfg->op_mode & 0x3) << 18) |
+			((cfg->rd_en & 0x1) << 15) |
+			((cfg->rd_scid & 0x1f) << 8) |
+			((cfg->rd_noallocate & 0x1) << 4) |
+			((cfg->rd_op_type & 0xf) << 0);
+
+	SDE_REG_WRITE(&ctx->hw, SSPP_SYS_CACHE_MODE + idx, val);
+}
+
+static void sde_hw_sspp_get_sbuf_status(struct sde_hw_pipe *ctx,
+		struct sde_hw_pipe_sbuf_status *status)
+{
+	u32 idx, val;
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+		return;
+
+	if (!status)
+		return;
+
+	val = SDE_REG_READ(&ctx->hw, SSPP_SBUF_STATUS_PLANE0 + idx);
+	status->empty[0] = val & SSPP_SBUF_STATUS_PLANE_EMPTY ? true : false;
+	status->rd_ptr[0] = val & 0xffff;
+	val = SDE_REG_READ(&ctx->hw, SSPP_SBUF_STATUS_PLANE1 + idx);
+	status->empty[1] = val & SSPP_SBUF_STATUS_PLANE_EMPTY ? true : false;
+	status->rd_ptr[1] = val & 0xffff;
+}
+
 static void _setup_layer_ops(struct sde_hw_pipe *c,
 		unsigned long features)
 {
@@ -1047,6 +1091,11 @@
 			c->ops.setup_pa_memcolor =
 				sde_setup_pipe_pa_memcol_v1_7;
 	}
+
+	if (test_bit(SDE_SSPP_SBUF, &features)) {
+		c->ops.setup_sys_cache = sde_hw_sspp_setup_sys_cache;
+		c->ops.get_sbuf_status = sde_hw_sspp_get_sbuf_status;
+	}
 }
 
 static struct sde_sspp_cfg *_sspp_offset(enum sde_sspp sspp,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
index 2fa01e4..1b81e54 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
@@ -308,6 +308,56 @@
 };
 
 /**
+ * enum system cache rotation operation mode
+ */
+enum {
+	SDE_PIPE_SC_OP_MODE_OFFLINE,
+	SDE_PIPE_SC_OP_MODE_INLINE_SINGLE,
+	SDE_PIPE_SC_OP_MODE_INLINE_LEFT,
+	SDE_PIPE_SC_OP_MODE_INLINE_RIGHT,
+};
+
+/**
+ * enum system cache read operation type
+ */
+enum {
+	SDE_PIPE_SC_RD_OP_TYPE_CACHEABLE,
+	SDE_PIPE_SC_RD_OP_TYPE_INVALIDATE,
+	SDE_PIPE_SC_RD_OP_TYPE_EVICTION,
+};
+
+/**
+ * struct sde_hw_pipe_sc_cfg - system cache configuration
+ * @op_mode: rotation operating mode
+ * @rd_en: system cache read enable
+ * @rd_scid: system cache read block id
+ * @rd_noallocate: system cache read no allocate attribute
+ * @rd_op_type: system cache read operation type
+ */
+struct sde_hw_pipe_sc_cfg {
+	u32 op_mode;
+	bool rd_en;
+	u32 rd_scid;
+	bool rd_noallocate;
+	u32 rd_op_type;
+};
+
+/**
+ * Maximum number of stream buffer plane
+ */
+#define SDE_PIPE_SBUF_PLANE_NUM	2
+
+/**
+ * struct sde_hw_pipe_sbuf_status - stream buffer status
+ * @empty: indicate if stream buffer is empty of not
+ * @rd_ptr: current read pointer of stream buffer
+ */
+struct sde_hw_pipe_sbuf_status {
+	bool empty[SDE_PIPE_SBUF_PLANE_NUM];
+	u32 rd_ptr[SDE_PIPE_SBUF_PLANE_NUM];
+};
+
+/**
  * struct sde_hw_sspp_ops - interface to the SSPP Hw driver functions
  * Caller must call the init function to get the pipe context for each pipe
  * Assumption is these functions will be called after clocks are enabled
@@ -488,6 +538,22 @@
 		struct sde_hw_pipe_cfg *pipe_cfg,
 		struct sde_hw_pixel_ext *pe_cfg,
 		void *scaler_cfg);
+
+	/**
+	 * setup_sys_cache - setup system cache configuration
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to system cache configuration
+	 */
+	void (*setup_sys_cache)(struct sde_hw_pipe *ctx,
+			struct sde_hw_pipe_sc_cfg *cfg);
+
+	/**
+	 * get_sbuf_status - get stream buffer status
+	 * @ctx: Pointer to pipe context
+	 * @status: Pointer to stream buffer status
+	 */
+	void (*get_sbuf_status)(struct sde_hw_pipe *ctx,
+			struct sde_hw_pipe_sbuf_status *status);
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_util.h b/drivers/gpu/drm/msm/sde/sde_hw_util.h
index 008b657..c1bfb79 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_util.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_util.h
@@ -47,6 +47,12 @@
 #define SDE_REG_WRITE(c, off, val) sde_reg_write(c, off, val, #off)
 #define SDE_REG_READ(c, off) sde_reg_read(c, off)
 
+#define MISR_FRAME_COUNT_MASK		0xFF
+#define MISR_CTRL_ENABLE		BIT(8)
+#define MISR_CTRL_STATUS		BIT(9)
+#define MISR_CTRL_STATUS_CLEAR		BIT(10)
+#define INTF_MISR_CTRL_FREE_RUN_MASK	BIT(31)
+
 void *sde_hw_util_get_dir(void);
 
 void sde_hw_csc_setup(struct sde_hw_blk_reg_map  *c,
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index c7cb190..8bc6a2b 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -21,6 +21,7 @@
 #include <drm/drm_crtc.h>
 #include <linux/debugfs.h>
 #include <linux/of_irq.h>
+#include <linux/dma-buf.h>
 
 #include "msm_drv.h"
 #include "msm_mmu.h"
@@ -80,7 +81,8 @@
 
 static int sde_kms_hw_init(struct msm_kms *kms);
 static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms);
-
+static int _sde_kms_register_events(struct msm_kms *kms,
+		struct drm_mode_object *obj, u32 event, bool en);
 bool sde_is_custom_client(void)
 {
 	return sdecustom;
@@ -322,18 +324,6 @@
 
 static void _sde_debugfs_destroy(struct sde_kms *sde_kms)
 {
-	return 0;
-}
-
-static void sde_debugfs_danger_destroy(struct sde_kms *sde_kms,
-		struct dentry *parent)
-{
-}
-
-static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
-		struct dentry *parent)
-{
-	return 0;
 }
 #endif
 
@@ -808,6 +798,284 @@
 	return ret;
 }
 
+/**
+ * struct sde_kms_fbo_fb - framebuffer creation list
+ * @list: list of framebuffer attached to framebuffer object
+ * @fb: Pointer to framebuffer attached to framebuffer object
+ */
+struct sde_kms_fbo_fb {
+	struct list_head list;
+	struct drm_framebuffer *fb;
+};
+
+struct drm_framebuffer *sde_kms_fbo_create_fb(struct drm_device *dev,
+		struct sde_kms_fbo *fbo)
+{
+	struct drm_framebuffer *fb = NULL;
+	struct sde_kms_fbo_fb *fbo_fb;
+	struct drm_mode_fb_cmd2 mode_cmd = {0};
+	u32 base_offset = 0;
+	int i, ret;
+
+	if (!dev) {
+		SDE_ERROR("invalid drm device node\n");
+		return NULL;
+	}
+
+	fbo_fb = kzalloc(sizeof(struct sde_kms_fbo_fb), GFP_KERNEL);
+	if (!fbo_fb)
+		return NULL;
+
+	mode_cmd.pixel_format = fbo->pixel_format;
+	mode_cmd.width = fbo->width;
+	mode_cmd.height = fbo->height;
+	mode_cmd.flags = fbo->flags;
+
+	for (i = 0; i < fbo->nplane; i++) {
+		mode_cmd.offsets[i] = base_offset;
+		mode_cmd.pitches[i] = fbo->layout.plane_pitch[i];
+		mode_cmd.modifier[i] = fbo->modifier[i];
+		base_offset += fbo->layout.plane_size[i];
+		SDE_DEBUG("offset[%d]:%x\n", i, mode_cmd.offsets[i]);
+	}
+
+	fb = msm_framebuffer_init(dev, &mode_cmd, fbo->bo);
+	if (IS_ERR(fb)) {
+		ret = PTR_ERR(fb);
+		fb = NULL;
+		SDE_ERROR("failed to allocate fb %d\n", ret);
+		goto fail;
+	}
+
+	/* need to take one reference for gem object */
+	for (i = 0; i < fbo->nplane; i++)
+		drm_gem_object_reference(fbo->bo[i]);
+
+	SDE_DEBUG("register private fb:%d\n", fb->base.id);
+
+	INIT_LIST_HEAD(&fbo_fb->list);
+	fbo_fb->fb = fb;
+	drm_framebuffer_reference(fbo_fb->fb);
+	list_add_tail(&fbo_fb->list, &fbo->fb_list);
+
+	return fb;
+
+fail:
+	kfree(fbo_fb);
+	return NULL;
+}
+
+static void sde_kms_fbo_destroy(struct sde_kms_fbo *fbo)
+{
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+	struct drm_device *dev;
+	struct sde_kms_fbo_fb *curr, *next;
+	int i;
+
+	if (!fbo) {
+		SDE_ERROR("invalid drm device node\n");
+		return;
+	}
+	dev = fbo->dev;
+
+	if (!dev || !dev->dev_private) {
+		SDE_ERROR("invalid drm device node\n");
+		return;
+	}
+	priv = dev->dev_private;
+
+	if (!priv->kms) {
+		SDE_ERROR("invalid kms handle\n");
+		return;
+	}
+	sde_kms = to_sde_kms(priv->kms);
+
+	SDE_DEBUG("%dx%d@%c%c%c%c/%llx/%x\n", fbo->width, fbo->height,
+			fbo->pixel_format >> 0, fbo->pixel_format >> 8,
+			fbo->pixel_format >> 16, fbo->pixel_format >> 24,
+			fbo->modifier[0], fbo->flags);
+
+	list_for_each_entry_safe(curr, next, &fbo->fb_list, list) {
+		SDE_DEBUG("unregister private fb:%d\n", curr->fb->base.id);
+		drm_framebuffer_unregister_private(curr->fb);
+		drm_framebuffer_unreference(curr->fb);
+		list_del(&curr->list);
+		kfree(curr);
+	}
+
+	for (i = 0; i < fbo->layout.num_planes; i++) {
+		if (fbo->bo[i]) {
+			mutex_lock(&dev->struct_mutex);
+			drm_gem_object_unreference(fbo->bo[i]);
+			mutex_unlock(&dev->struct_mutex);
+			fbo->bo[i] = NULL;
+		}
+	}
+
+	if (fbo->dma_buf) {
+		dma_buf_put(fbo->dma_buf);
+		fbo->dma_buf = NULL;
+	}
+
+	if (sde_kms->iclient && fbo->ihandle) {
+		ion_free(sde_kms->iclient, fbo->ihandle);
+		fbo->ihandle = NULL;
+	}
+}
+
+struct sde_kms_fbo *sde_kms_fbo_alloc(struct drm_device *dev, u32 width,
+		u32 height, u32 pixel_format, u64 modifier[4], u32 flags)
+{
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+	struct sde_kms_fbo *fbo;
+	int i, ret;
+
+	if (!dev || !dev->dev_private) {
+		SDE_ERROR("invalid drm device node\n");
+		return NULL;
+	}
+	priv = dev->dev_private;
+
+	if (!priv->kms) {
+		SDE_ERROR("invalid kms handle\n");
+		return NULL;
+	}
+	sde_kms = to_sde_kms(priv->kms);
+
+	SDE_DEBUG("%dx%d@%c%c%c%c/%llx/%x\n", width, height,
+			pixel_format >> 0, pixel_format >> 8,
+			pixel_format >> 16, pixel_format >> 24,
+			modifier[0], flags);
+
+	fbo = kzalloc(sizeof(struct sde_kms_fbo), GFP_KERNEL);
+	if (!fbo)
+		return NULL;
+
+	atomic_set(&fbo->refcount, 0);
+	INIT_LIST_HEAD(&fbo->fb_list);
+	fbo->dev = dev;
+	fbo->width = width;
+	fbo->height = height;
+	fbo->pixel_format = pixel_format;
+	fbo->flags = flags;
+	for (i = 0; i < ARRAY_SIZE(fbo->modifier); i++)
+		fbo->modifier[i] = modifier[i];
+	fbo->nplane = drm_format_num_planes(fbo->pixel_format);
+	fbo->fmt = sde_get_sde_format_ext(fbo->pixel_format, fbo->modifier,
+			fbo->nplane);
+	if (!fbo->fmt) {
+		ret = -EINVAL;
+		SDE_ERROR("failed to find pixel format\n");
+		goto done;
+	}
+
+	ret = sde_format_get_plane_sizes(fbo->fmt, fbo->width, fbo->height,
+			&fbo->layout);
+	if (ret) {
+		SDE_ERROR("failed to get plane sizes\n");
+		goto done;
+	}
+
+	/* allocate backing buffer object */
+	if (sde_kms->iclient) {
+		u32 heap_id = fbo->flags & DRM_MODE_FB_SECURE ?
+				ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID) :
+				ION_HEAP(ION_SYSTEM_HEAP_ID);
+
+		fbo->ihandle = ion_alloc(sde_kms->iclient,
+				fbo->layout.total_size, SZ_4K, heap_id, 0);
+		if (IS_ERR_OR_NULL(fbo->ihandle)) {
+			SDE_ERROR("failed to alloc ion memory\n");
+			ret = PTR_ERR(fbo->ihandle);
+			fbo->ihandle = NULL;
+			goto done;
+		}
+
+		fbo->dma_buf = ion_share_dma_buf(sde_kms->iclient,
+				fbo->ihandle);
+		if (IS_ERR(fbo->dma_buf)) {
+			SDE_ERROR("failed to share ion memory\n");
+			ret = -ENOMEM;
+			fbo->dma_buf = NULL;
+			goto done;
+		}
+
+		fbo->bo[0] = dev->driver->gem_prime_import(dev,
+				fbo->dma_buf);
+		if (IS_ERR(fbo->bo[0])) {
+			SDE_ERROR("failed to import ion memory\n");
+			ret = PTR_ERR(fbo->bo[0]);
+			fbo->bo[0] = NULL;
+			goto done;
+		}
+	} else {
+		mutex_lock(&dev->struct_mutex);
+		fbo->bo[0] = msm_gem_new(dev, fbo->layout.total_size,
+				MSM_BO_SCANOUT | MSM_BO_WC);
+		if (IS_ERR(fbo->bo[0])) {
+			mutex_unlock(&dev->struct_mutex);
+			SDE_ERROR("failed to new gem buffer\n");
+			ret = PTR_ERR(fbo->bo[0]);
+			fbo->bo[0] = NULL;
+			goto done;
+		}
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	for (i = 1; i < fbo->layout.num_planes; i++) {
+		fbo->bo[i] = fbo->bo[0];
+		drm_gem_object_reference(fbo->bo[i]);
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+done:
+	if (ret) {
+		sde_kms_fbo_destroy(fbo);
+		kfree(fbo);
+		fbo = NULL;
+	} else {
+		sde_kms_fbo_reference(fbo);
+	}
+
+	return fbo;
+}
+
+int sde_kms_fbo_reference(struct sde_kms_fbo *fbo)
+{
+	if (!fbo) {
+		SDE_ERROR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("%pS refcount:%d\n", __builtin_return_address(0),
+			atomic_read(&fbo->refcount));
+
+	atomic_inc(&fbo->refcount);
+
+	return 0;
+}
+
+void sde_kms_fbo_unreference(struct sde_kms_fbo *fbo)
+{
+	if (!fbo) {
+		SDE_ERROR("invalid parameters\n");
+		return;
+	}
+
+	SDE_DEBUG("%pS refcount:%d\n", __builtin_return_address(0),
+			atomic_read(&fbo->refcount));
+
+	if (!atomic_read(&fbo->refcount)) {
+		SDE_ERROR("invalid refcount\n");
+		return;
+	} else if (atomic_dec_return(&fbo->refcount) == 0) {
+		sde_kms_fbo_destroy(fbo);
+	}
+}
+
 static int sde_kms_postinit(struct msm_kms *kms)
 {
 	struct sde_kms *sde_kms = to_sde_kms(kms);
@@ -861,7 +1129,11 @@
 	/* safe to call these more than once during shutdown */
 	_sde_debugfs_destroy(sde_kms);
 	_sde_kms_mmu_destroy(sde_kms);
-	sde_core_perf_destroy(&sde_kms->perf);
+
+	if (sde_kms->iclient) {
+		ion_client_destroy(sde_kms->iclient);
+		sde_kms->iclient = NULL;
+	}
 
 	if (sde_kms->catalog) {
 		for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
@@ -948,6 +1220,7 @@
 	.get_format      = sde_get_msm_format,
 	.round_pixclk    = sde_kms_round_pixclk,
 	.destroy         = sde_kms_destroy,
+	.register_events = _sde_kms_register_events,
 };
 
 /* the caller api needs to turn on clock before calling it */
@@ -1143,6 +1416,8 @@
 	sde_kms->core_client = sde_power_client_create(&priv->phandle, "core");
 	if (IS_ERR_OR_NULL(sde_kms->core_client)) {
 		rc = PTR_ERR(sde_kms->core_client);
+		if (!sde_kms->core_client)
+			rc = -EINVAL;
 		SDE_ERROR("sde power client create failed: %d\n", rc);
 		sde_kms->core_client = NULL;
 		goto error;
@@ -1162,6 +1437,8 @@
 	sde_kms->catalog = sde_hw_catalog_init(dev, sde_kms->core_rev);
 	if (IS_ERR_OR_NULL(sde_kms->catalog)) {
 		rc = PTR_ERR(sde_kms->catalog);
+		if (!sde_kms->catalog)
+			rc = -EINVAL;
 		SDE_ERROR("catalog init failed: %d\n", rc);
 		sde_kms->catalog = NULL;
 		goto power_error;
@@ -1189,6 +1466,8 @@
 	sde_kms->hw_mdp = sde_rm_get_mdp(&sde_kms->rm);
 	if (IS_ERR_OR_NULL(sde_kms->hw_mdp)) {
 		rc = PTR_ERR(sde_kms->hw_mdp);
+		if (!sde_kms->hw_mdp)
+			rc = -EINVAL;
 		SDE_ERROR("failed to get hw_mdp: %d\n", rc);
 		sde_kms->hw_mdp = NULL;
 		goto power_error;
@@ -1201,12 +1480,21 @@
 				sde_kms->vbif[vbif_idx], sde_kms->catalog);
 		if (IS_ERR_OR_NULL(sde_kms->hw_vbif[vbif_idx])) {
 			rc = PTR_ERR(sde_kms->hw_vbif[vbif_idx]);
+			if (!sde_kms->hw_vbif[vbif_idx])
+				rc = -EINVAL;
 			SDE_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
 			sde_kms->hw_vbif[vbif_idx] = NULL;
 			goto power_error;
 		}
 	}
 
+	sde_kms->iclient = msm_ion_client_create(dev->unique);
+	if (IS_ERR(sde_kms->iclient)) {
+		rc = PTR_ERR(sde_kms->iclient);
+		SDE_DEBUG("msm_ion_client not available: %d\n", rc);
+		sde_kms->iclient = NULL;
+	}
+
 	/*
 	 * Now we need to read the HW catalog and initialize resources such as
 	 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
@@ -1252,6 +1540,8 @@
 	sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
 	if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
 		rc = PTR_ERR(sde_kms->hw_intr);
+		if (!sde_kms->hw_intr)
+			rc = -EINVAL;
 		SDE_ERROR("hw_intr init failed: %d\n", rc);
 		sde_kms->hw_intr = NULL;
 		goto hw_intr_init_err;
@@ -1296,3 +1586,32 @@
 
 	return &sde_kms->base;
 }
+
+static int _sde_kms_register_events(struct msm_kms *kms,
+		struct drm_mode_object *obj, u32 event, bool en)
+{
+	int ret = 0;
+	struct drm_crtc *crtc = NULL;
+	struct drm_connector *conn = NULL;
+	struct sde_kms *sde_kms = NULL;
+
+	if (!kms || !obj) {
+		SDE_ERROR("invalid argument kms %pK obj %pK\n", kms, obj);
+		return -EINVAL;
+	}
+
+	sde_kms = to_sde_kms(kms);
+	switch (obj->type) {
+	case DRM_MODE_OBJECT_CRTC:
+		crtc = obj_to_crtc(obj);
+		ret = sde_crtc_register_custom_event(sde_kms, crtc, event, en);
+		break;
+	case DRM_MODE_OBJECT_CONNECTOR:
+		conn = obj_to_connector(obj);
+		ret = sde_connector_register_custom_event(sde_kms, conn, event,
+				en);
+		break;
+	}
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 1acdf00..ebc277e 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -19,6 +19,8 @@
 #ifndef __SDE_KMS_H__
 #define __SDE_KMS_H__
 
+#include <linux/msm_ion.h>
+
 #include "msm_drv.h"
 #include "msm_kms.h"
 #include "msm_mmu.h"
@@ -115,6 +117,37 @@
 	struct dentry *debugfs_file;
 };
 
+/**
+ * struct sde_kms_fbo - framebuffer memory object
+ * @refcount: reference/usage count of this object
+ * @dev: Pointer to containing drm device
+ * @width: width of the framebuffer
+ * @height: height of the framebuffer
+ * @flags: drm framebuffer flags
+ * @modifier: pixel format modifier of the framebuffer
+ * @fmt: Pointer to sde format descriptor
+ * @layout: sde format layout descriptor
+ * @ihandle: framebuffer object ion handle
+ * @dma_buf: framebuffer object dma buffer
+ * @bo: per plane buffer object
+ * @fb_list: llist of fb created from this buffer object
+ */
+struct sde_kms_fbo {
+	atomic_t refcount;
+	struct drm_device *dev;
+	u32 width, height;
+	u32 pixel_format;
+	u32 flags;
+	u64 modifier[4];
+	int nplane;
+	const struct sde_format *fmt;
+	struct sde_hw_fmt_layout layout;
+	struct ion_handle *ihandle;
+	struct dma_buf *dma_buf;
+	struct drm_gem_object *bo[4];
+	struct list_head fb_list;
+};
+
 struct sde_kms {
 	struct msm_kms base;
 	struct drm_device *dev;
@@ -125,6 +158,8 @@
 	int mmu_id[MSM_SMMU_DOMAIN_MAX];
 	struct sde_power_client *core_client;
 
+	struct ion_client *iclient;
+
 	/* directory entry for debugfs */
 	struct dentry *debugfs_danger;
 	struct dentry *debugfs_vbif;
@@ -376,4 +411,42 @@
 int sde_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
 void sde_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
 
+/**
+ * sde_kms_fbo_create_fb - create framebuffer from given framebuffer object
+ * @dev: Pointer to drm device
+ * @fbo: Pointer to framebuffer object
+ * return: Pointer to drm framebuffer on success; NULL on error
+ */
+struct drm_framebuffer *sde_kms_fbo_create_fb(struct drm_device *dev,
+		struct sde_kms_fbo *fbo);
+
+/**
+ * sde_kms_fbo_alloc - create framebuffer object with given format parameters
+ * @dev: pointer to drm device
+ * @width: width of framebuffer
+ * @height: height of framebuffer
+ * @pixel_format: pixel format of framebuffer
+ * @modifier: pixel format modifier
+ * @flags: DRM_MODE_FB flags
+ * return: Pointer to framebuffer memory object on success; NULL on error
+ */
+struct sde_kms_fbo *sde_kms_fbo_alloc(struct drm_device *dev,
+		u32 width, u32 height, u32 pixel_format,
+		u64 modifiers[4], u32 flags);
+
+/**
+ * sde_kms_fbo_reference - increment reference count of given framebuffer object
+ * @fbo: Pointer to framebuffer memory object
+ * return: 0 on success; error code otherwise
+ */
+int sde_kms_fbo_reference(struct sde_kms_fbo *fbo);
+
+/**
+ * sde_kms_fbo_unreference - decrement reference count of given framebuffer
+ *	object
+ * @fbo: Pointer to framebuffer memory object
+ * return: 0 on success; error code otherwise
+ */
+void sde_kms_fbo_unreference(struct sde_kms_fbo *fbo);
+
 #endif /* __sde_kms_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 78c596d..6e1fe33 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -19,6 +19,7 @@
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
 
 #include <linux/debugfs.h>
+#include <linux/dma-buf.h>
 #include <uapi/drm/sde_drm.h>
 #include <uapi/drm/msm_drm_pp.h>
 
@@ -35,6 +36,15 @@
 #include "sde_vbif.h"
 #include "sde_plane.h"
 #include "sde_color_processing.h"
+#include "sde_hw_rot.h"
+
+static bool suspend_blank = true;
+module_param(suspend_blank, bool, 0400);
+MODULE_PARM_DESC(suspend_blank,
+		"If set, active planes will force their outputs to black,\n"
+		"by temporarily enabling the color fill, when recovering\n"
+		"from a system resume instead of attempting to display the\n"
+		"last provided frame buffer.");
 
 #define SDE_DEBUG_PLANE(pl, fmt, ...) SDE_DEBUG("plane%d " fmt,\
 		(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
@@ -74,6 +84,8 @@
 #define SDE_QSEED3_DEFAULT_PRELOAD_H 0x4
 #define SDE_QSEED3_DEFAULT_PRELOAD_V 0x3
 
+#define DEFAULT_REFRESH_RATE	60
+
 /**
  * enum sde_plane_qos - Different qos configurations for each pipe
  *
@@ -93,6 +105,10 @@
  * @csc_cfg: Decoded user configuration for csc
  * @csc_usr_ptr: Points to csc_cfg if valid user config available
  * @csc_ptr: Points to sde_csc_cfg structure to use for current
+ * @catalog: Points to sde catalog structure
+ * @sbuf_mode: force stream buffer mode if set
+ * @sbuf_writeback: fource stream buffer writeback if set
+ * @blob_rot_caps: Pointer to rotator capability blob
  */
 struct sde_plane {
 	struct drm_plane base;
@@ -115,6 +131,9 @@
 	bool is_error;
 	bool is_rt_pipe;
 	bool is_virtual;
+	struct sde_mdss_cfg *catalog;
+	u32 sbuf_mode;
+	u32 sbuf_writeback;
 
 	struct sde_hw_pixel_ext pixel_ext;
 	bool pixel_ext_usr;
@@ -130,6 +149,7 @@
 	struct msm_property_info property_info;
 	struct msm_property_data property_data[PLANE_PROP_COUNT];
 	struct drm_property_blob *blob_info;
+	struct drm_property_blob *blob_rot_caps;
 
 	/* debugfs related stuff */
 	struct dentry *debugfs_root;
@@ -141,11 +161,61 @@
 
 #define to_sde_plane(x) container_of(x, struct sde_plane, base)
 
+static struct sde_kms *_sde_plane_get_kms(struct drm_plane *plane)
+{
+	struct msm_drm_private *priv;
+
+	if (!plane || !plane->dev)
+		return NULL;
+	priv = plane->dev->dev_private;
+	if (!priv)
+		return NULL;
+	return to_sde_kms(priv->kms);
+}
+
+/**
+ * _sde_plane_get_crtc_state - obtain crtc state attached to given plane state
+ * @pstate: Pointer to drm plane state
+ * return: Pointer to crtc state if success; pointer error, otherwise
+ */
+static struct drm_crtc_state *_sde_plane_get_crtc_state(
+		struct drm_plane_state *pstate)
+{
+	struct drm_crtc_state *cstate;
+
+	if (!pstate || !pstate->crtc)
+		return NULL;
+
+	if (pstate->state)
+		cstate = drm_atomic_get_crtc_state(pstate->state, pstate->crtc);
+	else
+		cstate = pstate->crtc->state;
+
+	return cstate;
+}
+
 static bool sde_plane_enabled(struct drm_plane_state *state)
 {
 	return state && state->fb && state->crtc;
 }
 
+static bool sde_plane_sspp_enabled(struct drm_plane_state *state)
+{
+	return state && to_sde_plane_state(state)->rot.out_fb && state->crtc;
+}
+
+/**
+ * sde_plane_crtc_enabled - determine if crtc of given plane state is enabled
+ * @state: Pointer to drm plane state
+ * return: true if plane and the associated crtc are both enabled
+ */
+static bool sde_plane_crtc_enabled(struct drm_plane_state *state)
+{
+	return sde_plane_enabled(state) && state->crtc->state &&
+			state->crtc->state->active &&
+			state->crtc->state->enable;
+}
+
 /**
  * _sde_plane_calc_fill_level - calculate fill level of the given source format
  * @plane:		Pointer to drm plane
@@ -699,8 +769,6 @@
 				psde->pipe_cfg.horz_decimation);
 		scale_cfg->src_height[i] = DECIMATED_DIMENSION(src_h,
 				psde->pipe_cfg.vert_decimation);
-		if (SDE_FORMAT_IS_YUV(fmt))
-			scale_cfg->src_width[i] &= ~0x1;
 		if (i == SDE_SSPP_COMP_1_2 || i == SDE_SSPP_COMP_2) {
 			scale_cfg->src_width[i] /= chroma_subsmpl_h;
 			scale_cfg->src_height[i] /= chroma_subsmpl_v;
@@ -1131,6 +1199,7 @@
 		psde->pipe_cfg.src_rect.y = 0;
 		psde->pipe_cfg.src_rect.w = psde->pipe_cfg.dst_rect.w;
 		psde->pipe_cfg.src_rect.h = psde->pipe_cfg.dst_rect.h;
+		_sde_plane_setup_scaler(psde, fmt, 0);
 
 		if (psde->pipe_hw->ops.setup_format)
 			psde->pipe_hw->ops.setup_format(psde->pipe_hw,
@@ -1142,7 +1211,6 @@
 					&psde->pipe_cfg,
 					pstate->multirect_index);
 
-		_sde_plane_setup_scaler(psde, fmt, 0);
 		if (psde->pipe_hw->ops.setup_pe)
 			psde->pipe_hw->ops.setup_pe(psde->pipe_hw,
 					&psde->pixel_ext);
@@ -1151,12 +1219,992 @@
 	return 0;
 }
 
-static int _sde_plane_mode_set(struct drm_plane *plane,
+/**
+ * _sde_plane_fb_get/put - framebuffer callback for crtc res ops
+ */
+static void *_sde_plane_fb_get(void *fb, u32 type, u64 tag)
+{
+	drm_framebuffer_reference(fb);
+	return fb;
+}
+static void _sde_plane_fb_put(void *fb)
+{
+	drm_framebuffer_unreference(fb);
+}
+static struct sde_crtc_res_ops fb_res_ops = {
+	.put = _sde_plane_fb_put,
+	.get = _sde_plane_fb_get,
+};
+
+/**
+ * _sde_plane_fbo_get/put - framebuffer object callback for crtc res ops
+ */
+static void *_sde_plane_fbo_get(void *fbo, u32 type, u64 tag)
+{
+	sde_kms_fbo_reference(fbo);
+	return fbo;
+}
+static void _sde_plane_fbo_put(void *fbo)
+{
+	sde_kms_fbo_unreference(fbo);
+}
+static struct sde_crtc_res_ops fbo_res_ops = {
+	.put = _sde_plane_fbo_put,
+	.get = _sde_plane_fbo_get,
+};
+
+/**
+ * sde_plane_rot_calc_prefill - calculate rotator start prefill
+ * @plane: Pointer to drm plane
+ * return: prefill time in line
+ */
+static u32 sde_plane_rot_calc_prefill(struct drm_plane *plane)
+{
+	struct drm_plane_state *state;
+	struct drm_crtc_state *cstate;
+	struct sde_plane_state *pstate;
+	struct sde_plane_rot_state *rstate;
+	struct sde_kms *sde_kms;
+	u32 blocksize = 128;
+	u32 prefill_line = 0;
+
+	if (!plane || !plane->state || !plane->state->fb ||
+			!plane->state->crtc || !plane->state->crtc->state) {
+		SDE_ERROR("invalid parameters\n");
+		return 0;
+	}
+
+	sde_kms = _sde_plane_get_kms(plane);
+	state = plane->state;
+	cstate = state->crtc->state;
+	pstate = to_sde_plane_state(state);
+	rstate = &pstate->rot;
+
+	if (!rstate->rot_hw || !rstate->rot_hw->caps || !rstate->out_src_h ||
+			!sde_kms || !sde_kms->catalog) {
+		SDE_ERROR("invalid parameters\n");
+		return 0;
+	}
+
+	sde_format_get_block_size(rstate->out_fb_format, &blocksize,
+			&blocksize);
+	prefill_line = blocksize + sde_kms->catalog->sbuf_headroom;
+
+	SDE_DEBUG("plane%d prefill:%u\n", plane->base.id, prefill_line);
+
+	return prefill_line;
+}
+
+/**
+ * sde_plane_is_sbuf_mode - check if sspp of given plane is in streaming
+ *	buffer mode
+ * @plane: Pointer to drm plane
+ * @prefill: Pointer to prefill line count
+ * return: true if sspp is in stream buffer mode
+ */
+bool sde_plane_is_sbuf_mode(struct drm_plane *plane, u32 *prefill)
+{
+	struct sde_plane_state *pstate = plane && plane->state ?
+			to_sde_plane_state(plane->state) : NULL;
+	struct sde_plane_rot_state *rstate = pstate ? &pstate->rot : NULL;
+	bool sbuf_mode = rstate ? rstate->out_sbuf : false;
+
+	if (prefill && sbuf_mode)
+		*prefill = sde_plane_rot_calc_prefill(plane);
+
+	return sbuf_mode;
+}
+
+/**
+ * sde_plane_rot_calc_cfg - calculate rotator/sspp configuration by
+ *	enumerating over all planes attached to the same rotator
+ * @plane: Pointer to drm plane
+ * @state: Pointer to drm state to be updated
+ * return: none
+ */
+static void sde_plane_rot_calc_cfg(struct drm_plane *plane,
+		struct drm_plane_state *state)
+{
+	struct sde_plane_state *pstate;
+	struct sde_plane_rot_state *rstate;
+	struct sde_hw_blk *hw_blk;
+	struct drm_crtc_state *cstate;
+	struct drm_rect *in_rot, *out_rot;
+	struct drm_plane *attached_plane;
+	u32 dst_x, dst_y, dst_w, dst_h;
+	int found = 0;
+	int xpos = 0;
+	int ret;
+
+	if (!plane || !state || !state->state) {
+		SDE_ERROR("invalid parameters\n");
+		return;
+	}
+
+	cstate = _sde_plane_get_crtc_state(state);
+	if (IS_ERR_OR_NULL(cstate)) {
+		ret = PTR_ERR(cstate);
+		SDE_ERROR("invalid crtc state %d\n", ret);
+		return;
+	}
+
+	pstate = to_sde_plane_state(state);
+	rstate = &pstate->rot;
+
+	if (!rstate->rot_hw) {
+		SDE_ERROR("invalid rotator hw\n");
+		return;
+	}
+
+	in_rot = &rstate->in_rot_rect;
+	in_rot->x1 = state->src_x;
+	in_rot->y1 = state->src_y;
+	in_rot->x2 = state->src_x + state->src_w;
+	in_rot->y2 = state->src_y + state->src_h;
+
+	out_rot = &rstate->out_rot_rect;
+	dst_x = sde_plane_get_property(pstate, PLANE_PROP_ROT_DST_X);
+	dst_y = sde_plane_get_property(pstate, PLANE_PROP_ROT_DST_Y);
+	dst_w = sde_plane_get_property(pstate, PLANE_PROP_ROT_DST_W);
+	dst_h = sde_plane_get_property(pstate, PLANE_PROP_ROT_DST_H);
+
+	if (!dst_w && !dst_h) {
+		rstate->out_rot_rect = rstate->in_rot_rect;
+		drm_rect_rotate(&rstate->out_rot_rect, state->fb->width << 16,
+				state->fb->height << 16, rstate->in_rotation);
+	} else {
+		out_rot->x1 = dst_x;
+		out_rot->y1 = dst_y;
+		out_rot->x2 = dst_x + dst_w;
+		out_rot->y2 = dst_y + dst_h;
+	}
+
+	rstate->out_src_rect = rstate->out_rot_rect;
+
+	hw_blk = &rstate->rot_hw->base;
+
+	/* enumerating over all planes attached to the same rotator */
+	drm_atomic_crtc_state_for_each_plane(attached_plane, cstate) {
+		struct drm_plane_state *attached_state;
+		struct sde_plane_state *attached_pstate;
+		struct sde_plane_rot_state *attached_rstate;
+		struct drm_rect attached_out_rect;
+
+		attached_state = drm_atomic_get_existing_plane_state(
+				state->state, attached_plane);
+
+		if (!attached_state)
+			continue;
+
+		attached_pstate = to_sde_plane_state(attached_state);
+		attached_rstate = &attached_pstate->rot;
+
+		if (attached_rstate->rot_hw != rstate->rot_hw)
+			continue;
+
+		found++;
+
+		/* skip itself */
+		if (attached_plane == plane)
+			continue;
+
+		/* find bounding rotator source roi */
+		if (attached_state->src_x < in_rot->x1)
+			in_rot->x1 = attached_state->src_x;
+
+		if (attached_state->src_y < in_rot->y1)
+			in_rot->y1 = attached_state->src_y;
+
+		if (attached_state->src_x + attached_state->src_w > in_rot->x2)
+			in_rot->x2 = attached_state->src_x +
+				attached_state->src_w;
+
+		if (attached_state->src_y + attached_state->src_h > in_rot->y2)
+			in_rot->y2 = attached_state->src_y +
+				attached_state->src_h;
+
+		/* find bounding rotator destination roi */
+		dst_x = sde_plane_get_property(attached_pstate,
+				PLANE_PROP_ROT_DST_X);
+		dst_y = sde_plane_get_property(attached_pstate,
+				PLANE_PROP_ROT_DST_Y);
+		dst_w = sde_plane_get_property(attached_pstate,
+				PLANE_PROP_ROT_DST_W);
+		dst_h = sde_plane_get_property(attached_pstate,
+				PLANE_PROP_ROT_DST_H);
+		if (!dst_w && !dst_h) {
+			attached_out_rect.x1 = attached_state->src_x;
+			attached_out_rect.y1 = attached_state->src_y;
+			attached_out_rect.x2 = attached_out_rect.x1 +
+					attached_state->src_w;
+			attached_out_rect.y2 = attached_out_rect.y1 +
+					attached_state->src_h;
+			drm_rect_rotate(&attached_out_rect,
+					state->fb->width << 16,
+					state->fb->height << 16,
+					rstate->in_rotation);
+		} else {
+			attached_out_rect.x1 = dst_x;
+			attached_out_rect.y1 = dst_y;
+			attached_out_rect.x2 = dst_x + dst_w;
+			attached_out_rect.y2 = dst_y + dst_h;
+		}
+
+		/* find relative sspp position */
+		if (attached_out_rect.x1 < rstate->out_src_rect.x1)
+			xpos++;
+
+		if (attached_out_rect.x1 < out_rot->x1)
+			out_rot->x1 = attached_out_rect.x1;
+
+		if (attached_out_rect.y1 < out_rot->y1)
+			out_rot->y1 = attached_out_rect.y1;
+
+		if (attached_out_rect.x2 > out_rot->x2)
+			out_rot->x2 = attached_out_rect.x2;
+
+		if (attached_out_rect.y2 > out_rot->y2)
+			out_rot->y2 = attached_out_rect.y2;
+
+		SDE_DEBUG("plane%d.%u src_x:%d sspp:%dx%d+%d+%d/%dx%d+%d+%d\n",
+			attached_plane->base.id,
+			attached_rstate->sequence_id,
+			attached_rstate->out_src_rect.x1 >> 16,
+			attached_state->src_w >> 16,
+			attached_state->src_h >> 16,
+			attached_state->src_x >> 16,
+			attached_state->src_y >> 16,
+			drm_rect_width(&attached_rstate->out_src_rect) >> 16,
+			drm_rect_height(&attached_rstate->out_src_rect) >> 16,
+			attached_rstate->out_src_rect.x1 >> 16,
+			attached_rstate->out_src_rect.y1 >> 16);
+	}
+
+	rstate->out_xpos = xpos;
+	rstate->nplane = found;
+
+	SDE_DEBUG("plane%d.%u xpos:%d/%d rot:%dx%d+%d+%d/%dx%d+%d+%d\n",
+			plane->base.id, rstate->sequence_id,
+			rstate->out_xpos, rstate->nplane,
+			drm_rect_width(in_rot) >> 16,
+			drm_rect_height(in_rot) >> 16,
+			in_rot->x1 >> 16, in_rot->y1 >> 16,
+			drm_rect_width(&rstate->out_rot_rect) >> 16,
+			drm_rect_height(&rstate->out_rot_rect) >> 16,
+			rstate->out_rot_rect.x1 >> 16,
+			rstate->out_rot_rect.y1 >> 16);
+}
+
+/**
+ * sde_plane_rot_submit_command - commit given state for the rotator stage
+ * @plane: Pointer to drm plane
+ * @state: Pointer to the state to be committed
+ * @hw_cmd: rotator command type
+ * return: 0 if success; error code otherwise
+ */
+static int sde_plane_rot_submit_command(struct drm_plane *plane,
+		struct drm_plane_state *state, enum sde_hw_rot_cmd_type hw_cmd)
+{
+	struct sde_plane *psde = to_sde_plane(plane);
+	struct sde_plane_state *pstate = to_sde_plane_state(state);
+	struct sde_plane_rot_state *rstate = &pstate->rot;
+	struct sde_hw_rot_cmd *rot_cmd;
+	struct drm_crtc_state *cstate;
+	struct sde_crtc_state *sde_cstate;
+	int ret, i;
+
+	if (!plane || !state || !state->fb || !rstate->rot_hw) {
+		SDE_ERROR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	cstate = _sde_plane_get_crtc_state(state);
+	if (IS_ERR_OR_NULL(cstate)) {
+		SDE_ERROR("invalid crtc state %ld\n", PTR_ERR(cstate));
+		return -EINVAL;
+	}
+	sde_cstate = to_sde_crtc_state(cstate);
+
+	rot_cmd = &rstate->rot_cmd;
+
+	rot_cmd->master = (rstate->out_xpos == 0);
+	rot_cmd->sequence_id = rstate->sequence_id;
+	rot_cmd->fps = pstate->base.crtc && pstate->base.crtc->state ?
+		drm_mode_vrefresh(&pstate->base.crtc->state->adjusted_mode) :
+		DEFAULT_REFRESH_RATE;
+	rot_cmd->rot90 = rstate->rot90;
+	rot_cmd->hflip = rstate->hflip;
+	rot_cmd->vflip = rstate->vflip;
+	rot_cmd->secure = state->fb->flags & DRM_MODE_FB_SECURE ? true : false;
+	rot_cmd->prefill_bw = sde_crtc_get_property(sde_cstate,
+			CRTC_PROP_ROT_PREFILL_BW);
+	rot_cmd->clkrate = sde_crtc_get_property(sde_cstate,
+			CRTC_PROP_ROT_CLK);
+	rot_cmd->dst_writeback = psde->sbuf_writeback;
+
+	if (sde_crtc_get_intf_mode(state->crtc) == INTF_MODE_VIDEO)
+		rot_cmd->video_mode = true;
+	else
+		rot_cmd->video_mode = false;
+
+	rot_cmd->src_pixel_format = state->fb->pixel_format;
+	rot_cmd->src_modifier = state->fb->modifier[0];
+	rot_cmd->src_stride = state->fb->pitches[0];
+
+	rot_cmd->src_format = to_sde_format(msm_framebuffer_format(state->fb));
+	if (!rot_cmd->src_format) {
+		SDE_ERROR("failed to get src format\n");
+		return -EINVAL;
+	}
+
+	rot_cmd->src_width = state->fb->width;
+	rot_cmd->src_height = state->fb->height;
+	rot_cmd->src_rect_x = rstate->in_rot_rect.x1 >> 16;
+	rot_cmd->src_rect_y = rstate->in_rot_rect.y1 >> 16;
+	rot_cmd->src_rect_w = drm_rect_width(&rstate->in_rot_rect) >> 16;
+	rot_cmd->src_rect_h = drm_rect_height(&rstate->in_rot_rect) >> 16;
+	rot_cmd->dst_rect_x = rstate->out_rot_rect.x1 >> 16;
+	rot_cmd->dst_rect_y = rstate->out_rot_rect.y1 >> 16;
+	rot_cmd->dst_rect_w = drm_rect_width(&rstate->out_rot_rect) >> 16;
+	rot_cmd->dst_rect_h = drm_rect_height(&rstate->out_rot_rect) >> 16;
+
+	if (hw_cmd == SDE_HW_ROT_CMD_COMMIT) {
+		struct sde_hw_fmt_layout layout;
+
+		memset(&layout, 0, sizeof(struct sde_hw_fmt_layout));
+		sde_format_populate_layout(rstate->mmu_id, state->fb,
+				&layout);
+		for (i = 0; i < ARRAY_SIZE(rot_cmd->src_iova); i++) {
+			rot_cmd->src_iova[i] = layout.plane_addr[i];
+			rot_cmd->src_len[i] = layout.plane_size[i];
+		}
+		rot_cmd->src_planes = layout.num_planes;
+
+		memset(&layout, 0, sizeof(struct sde_hw_fmt_layout));
+		sde_format_populate_layout(rstate->mmu_id, rstate->out_fb,
+				&layout);
+		for (i = 0; i < ARRAY_SIZE(rot_cmd->dst_iova); i++) {
+			rot_cmd->dst_iova[i] = layout.plane_addr[i];
+			rot_cmd->dst_len[i] = layout.plane_size[i];
+		}
+		rot_cmd->dst_planes = layout.num_planes;
+	}
+
+	ret = rstate->rot_hw->ops.commit(rstate->rot_hw, rot_cmd, hw_cmd);
+	if (ret) {
+		SDE_ERROR("failed to commit rotator %d\n", ret);
+		return ret;
+	}
+
+	rstate->out_rotation = rstate->in_rotation;
+	rstate->out_fb_flags = rot_cmd->dst_modifier ?
+			DRM_MODE_FB_MODIFIERS : 0;
+	rstate->out_fb_flags |= rot_cmd->secure ? DRM_MODE_FB_SECURE : 0;
+	rstate->out_fb_format = rot_cmd->dst_format;
+	rstate->out_fb_pixel_format = rot_cmd->dst_pixel_format;
+
+	for (i = 0; i < ARRAY_SIZE(rstate->out_fb_modifier); i++)
+		rstate->out_fb_modifier[i] = rot_cmd->dst_modifier;
+
+	rstate->out_fb_width = drm_rect_width(&rstate->out_rot_rect) >> 16;
+	rstate->out_fb_height = drm_rect_height(&rstate->out_rot_rect) >> 16;
+	rstate->out_src_x = rstate->out_src_rect.x1 - rstate->out_rot_rect.x1;
+	rstate->out_src_y = rstate->out_src_rect.y1 - rstate->out_rot_rect.y1;
+	rstate->out_src_w = drm_rect_width(&rstate->out_src_rect);
+	rstate->out_src_h = drm_rect_height(&rstate->out_src_rect);
+
+	if (rot_cmd->rot90)
+		rstate->out_rotation &= ~DRM_ROTATE_90;
+
+	if (rot_cmd->hflip)
+		rstate->out_rotation &= ~DRM_REFLECT_X;
+
+	if (rot_cmd->vflip)
+		rstate->out_rotation &= ~DRM_REFLECT_Y;
+
+	SDE_DEBUG(
+		"plane%d.%d rot:%d/%c%c%c%c/%dx%d/%c%c%c%c/%llx/%dx%d+%d+%d\n",
+			plane->base.id, rstate->sequence_id, hw_cmd,
+			rot_cmd->rot90 ? 'r' : '_',
+			rot_cmd->hflip ? 'h' : '_',
+			rot_cmd->vflip ? 'v' : '_',
+			rot_cmd->video_mode ? 'V' : 'C',
+			state->fb->width, state->fb->height,
+			state->fb->pixel_format >> 0,
+			state->fb->pixel_format >> 8,
+			state->fb->pixel_format >> 16,
+			state->fb->pixel_format >> 24,
+			state->fb->modifier[0],
+			drm_rect_width(&rstate->in_rot_rect) >> 16,
+			drm_rect_height(&rstate->in_rot_rect) >> 16,
+			rstate->in_rot_rect.x1 >> 16,
+			rstate->in_rot_rect.y1 >> 16);
+
+	SDE_DEBUG("plane%d.%d sspp:%d/%x/%dx%d/%c%c%c%c/%llx/%dx%d+%d+%d\n",
+			plane->base.id, rstate->sequence_id, hw_cmd,
+			rstate->out_rotation,
+			rstate->out_fb_width, rstate->out_fb_height,
+			rstate->out_fb_pixel_format >> 0,
+			rstate->out_fb_pixel_format >> 8,
+			rstate->out_fb_pixel_format >> 16,
+			rstate->out_fb_pixel_format >> 24,
+			rstate->out_fb_modifier[0],
+			rstate->out_src_w >> 16, rstate->out_src_h >> 16,
+			rstate->out_src_x >> 16, rstate->out_src_y >> 16);
+
+	return ret;
+}
+
+/**
+ * sde_plane_rot_prepare_fb - prepare framebuffer of the new state
+ *	for rotator (pre-sspp) stage
+ * @plane: Pointer to drm plane
+ * @new_state: Pointer to new drm plane state
+ * return: 0 if success; error code otherwise
+ */
+static int sde_plane_rot_prepare_fb(struct drm_plane *plane,
+		struct drm_plane_state *new_state)
+{
+	struct drm_framebuffer *fb = new_state->fb;
+	struct sde_plane_state *new_pstate = to_sde_plane_state(new_state);
+	struct sde_plane_rot_state *new_rstate = &new_pstate->rot;
+	struct drm_crtc_state *cstate;
+	int ret;
+
+	SDE_DEBUG("plane%d.%d FB[%u] sbuf:%d rot:%d crtc:%d\n",
+			plane->base.id,
+			new_rstate->sequence_id, fb ? fb->base.id : 0,
+			!!new_rstate->out_sbuf, !!new_rstate->rot_hw,
+			sde_plane_crtc_enabled(new_state));
+
+	if (!new_rstate->out_sbuf || !new_rstate->rot_hw)
+		return 0;
+
+	cstate = _sde_plane_get_crtc_state(new_state);
+	if (IS_ERR(cstate)) {
+		ret = PTR_ERR(cstate);
+		SDE_ERROR("invalid crtc state %d\n", ret);
+		return ret;
+	}
+
+	/* need to re-calc based on all newly validated plane states */
+	sde_plane_rot_calc_cfg(plane, new_state);
+
+	/* check if stream buffer is already attached to rotator */
+	if (sde_plane_enabled(new_state)) {
+		struct sde_kms_fbo *fbo;
+		struct drm_framebuffer *fb;
+
+		fbo = sde_crtc_res_get(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
+				(u64) &new_rstate->rot_hw->base);
+		fb = sde_crtc_res_get(cstate, SDE_CRTC_RES_ROT_OUT_FB,
+				(u64) &new_rstate->rot_hw->base);
+		if (fb && fbo) {
+			SDE_DEBUG("plane%d.%d get fb/fbo\n", plane->base.id,
+					new_rstate->sequence_id);
+		} else if (fbo) {
+			sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
+					(u64) &new_rstate->rot_hw->base);
+			fbo = NULL;
+		} else if (fb) {
+			sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
+					(u64) &new_rstate->rot_hw->base);
+			fb = NULL;
+		}
+
+		new_rstate->out_fbo = fbo;
+		new_rstate->out_fb = fb;
+	}
+
+	/* release buffer if output format configuration changes */
+	if (new_rstate->out_fb &&
+		((new_rstate->out_fb_height != new_rstate->out_fb->height) ||
+		(new_rstate->out_fb_width != new_rstate->out_fb->width) ||
+		(new_rstate->out_fb_pixel_format !=
+				new_rstate->out_fb->pixel_format) ||
+		(new_rstate->out_fb_modifier[0] !=
+				new_rstate->out_fb->modifier[0]) ||
+		(new_rstate->out_fb_flags != new_rstate->out_fb->flags))) {
+
+		SDE_DEBUG("plane%d.%d release fb/fbo\n", plane->base.id,
+				new_rstate->sequence_id);
+
+		sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
+				(u64) &new_rstate->rot_hw->base);
+		new_rstate->out_fb = NULL;
+		sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
+				(u64) &new_rstate->rot_hw->base);
+		new_rstate->out_fbo = NULL;
+	}
+
+	/* create new stream buffer if it is not available */
+	if (sde_plane_enabled(new_state) && !new_rstate->out_fb) {
+		u32 fb_w = drm_rect_width(&new_rstate->out_rot_rect) >> 16;
+		u32 fb_h = drm_rect_height(&new_rstate->out_rot_rect) >> 16;
+
+		SDE_DEBUG("plane%d.%d allocate fb/fbo\n", plane->base.id,
+				new_rstate->sequence_id);
+
+		if (new_state->fb->flags & DRM_MODE_FB_SECURE)
+			new_rstate->mmu_id = MSM_SMMU_DOMAIN_SECURE;
+		else
+			new_rstate->mmu_id = MSM_SMMU_DOMAIN_UNSECURE;
+
+		/* check if out_fb is already attached to rotator */
+		new_rstate->out_fbo = sde_kms_fbo_alloc(plane->dev, fb_w, fb_h,
+				new_rstate->out_fb_pixel_format,
+				new_rstate->out_fb_modifier,
+				new_rstate->out_fb_flags);
+		if (!new_rstate->out_fbo) {
+			SDE_ERROR("failed to allocate inline buffer object\n");
+			ret = -EINVAL;
+			goto error_create_fbo;
+		}
+
+		ret = sde_crtc_res_add(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
+				(u64) &new_rstate->rot_hw->base,
+				new_rstate->out_fbo, &fbo_res_ops);
+		if (ret) {
+			SDE_ERROR("failed to add crtc resource\n");
+			goto error_create_fbo_res;
+		}
+
+		new_rstate->out_fb = sde_kms_fbo_create_fb(plane->dev,
+				new_rstate->out_fbo);
+		if (!new_rstate->out_fb) {
+			SDE_ERROR("failed to create inline framebuffer\n");
+			ret = -EINVAL;
+			goto error_create_fb;
+		}
+
+		ret = sde_crtc_res_add(cstate, SDE_CRTC_RES_ROT_OUT_FB,
+				(u64) &new_rstate->rot_hw->base,
+				new_rstate->out_fb, &fb_res_ops);
+		if (ret) {
+			SDE_ERROR("failed to add crtc resource %d\n", ret);
+			goto error_create_fb_res;
+		}
+	}
+
+	/* prepare rotator input buffer */
+	ret = msm_framebuffer_prepare(new_state->fb, new_rstate->mmu_id);
+	if (ret) {
+		SDE_ERROR("failed to prepare input framebuffer\n");
+		goto error_prepare_input_buffer;
+	}
+
+	/* prepare rotator output buffer */
+	if (sde_plane_enabled(new_state) && new_rstate->out_fb) {
+		SDE_DEBUG("plane%d.%d prepare fb/fbo\n", plane->base.id,
+				new_rstate->sequence_id);
+
+		ret = msm_framebuffer_prepare(new_rstate->out_fb,
+				new_rstate->mmu_id);
+		if (ret) {
+			SDE_ERROR("failed to prepare inline framebuffer\n");
+			goto error_prepare_output_buffer;
+		}
+	}
+
+	return 0;
+
+error_prepare_output_buffer:
+	msm_framebuffer_cleanup(new_state->fb, new_rstate->mmu_id);
+error_prepare_input_buffer:
+	sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
+			(u64) &new_rstate->rot_hw->base);
+error_create_fb_res:
+	new_rstate->out_fb = NULL;
+error_create_fb:
+	sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
+			(u64) &new_rstate->rot_hw->base);
+error_create_fbo_res:
+	new_rstate->out_fbo = NULL;
+error_create_fbo:
+	return ret;
+}
+
+/**
+ * sde_plane_rot_cleanup_fb - cleanup framebuffer of previous state for the
+ *	rotator (pre-sspp) stage
+ * @plane: Pointer to drm plane
+ * @old_state: Pointer to previous drm plane state
+ * return: none
+ */
+static void sde_plane_rot_cleanup_fb(struct drm_plane *plane,
+		struct drm_plane_state *old_state)
+{
+	struct sde_plane_state *old_pstate = to_sde_plane_state(old_state);
+	struct sde_plane_rot_state *old_rstate = &old_pstate->rot;
+	struct sde_hw_rot_cmd *cmd = &old_rstate->rot_cmd;
+	struct drm_crtc_state *cstate;
+	int ret;
+
+	SDE_DEBUG("plane%d.%d FB[%u] sbuf:%d rot:%d crtc:%d\n", plane->base.id,
+			old_rstate->sequence_id, old_state->fb->base.id,
+			!!old_rstate->out_sbuf, !!old_rstate->rot_hw,
+			sde_plane_crtc_enabled(old_state));
+
+	if (!old_rstate->out_sbuf || !old_rstate->rot_hw)
+		return;
+
+	cstate = _sde_plane_get_crtc_state(old_state);
+	if (IS_ERR(cstate)) {
+		ret = PTR_ERR(cstate);
+		SDE_ERROR("invalid crtc state %d\n", ret);
+		return;
+	}
+
+	if (sde_plane_crtc_enabled(old_state)) {
+		ret = old_rstate->rot_hw->ops.commit(old_rstate->rot_hw, cmd,
+				SDE_HW_ROT_CMD_CLEANUP);
+		if (ret)
+			SDE_ERROR("failed to cleanup rotator buffers\n");
+	}
+
+	if (sde_plane_enabled(old_state)) {
+		if (old_rstate->out_fb) {
+			msm_framebuffer_cleanup(old_rstate->out_fb,
+					old_rstate->mmu_id);
+			sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
+					(u64) &old_rstate->rot_hw->base);
+			old_rstate->out_fb = NULL;
+			sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
+					(u64) &old_rstate->rot_hw->base);
+			old_rstate->out_fbo = NULL;
+		}
+
+		msm_framebuffer_cleanup(old_state->fb, old_rstate->mmu_id);
+	}
+}
+
+/**
+ * sde_plane_rot_atomic_check - verify rotator update of the given state
+ * @plane: Pointer to drm plane
+ * @state: Pointer to drm plane state to be validated
+ * return: 0 if success; error code otherwise
+ */
+static int sde_plane_rot_atomic_check(struct drm_plane *plane,
+		struct drm_plane_state *state)
+{
+	struct sde_plane *psde;
+	struct sde_plane_state *pstate, *old_pstate;
+	struct sde_plane_rot_state *rstate, *old_rstate;
+	struct drm_crtc_state *cstate;
+	struct sde_hw_blk *hw_blk;
+	int i, ret = 0;
+
+	if (!plane || !state) {
+		SDE_ERROR("invalid plane/state\n");
+		return -EINVAL;
+	}
+
+	psde = to_sde_plane(plane);
+	pstate = to_sde_plane_state(state);
+	old_pstate = to_sde_plane_state(plane->state);
+	rstate = &pstate->rot;
+	old_rstate = &old_pstate->rot;
+
+	/* cstate will be null if crtc is disconnected from plane */
+	cstate = _sde_plane_get_crtc_state(state);
+	if (IS_ERR(cstate)) {
+		ret = PTR_ERR(cstate);
+		SDE_ERROR("invalid crtc state %d\n", ret);
+		return ret;
+	}
+
+	SDE_DEBUG("plane%d.%d FB[%u] sbuf:%d rot:%d crtc:%d\n", plane->base.id,
+			rstate->sequence_id, state->fb ? state->fb->base.id : 0,
+			!!rstate->out_sbuf, !!rstate->rot_hw,
+			sde_plane_crtc_enabled(state));
+
+	rstate->in_rotation = drm_rotation_simplify(
+			sde_plane_get_property(pstate, PLANE_PROP_ROTATION),
+			DRM_ROTATE_0 | DRM_ROTATE_90 |
+			DRM_REFLECT_X | DRM_REFLECT_Y);
+	rstate->rot90 = rstate->in_rotation & DRM_ROTATE_90 ? true : false;
+	rstate->hflip = rstate->in_rotation & DRM_REFLECT_X ? true : false;
+	rstate->vflip = rstate->in_rotation & DRM_REFLECT_Y ? true : false;
+	rstate->out_sbuf = psde->sbuf_mode || rstate->rot90;
+
+	if (sde_plane_enabled(state) && rstate->out_sbuf) {
+		SDE_DEBUG("plane%d.%d acquire rotator\n",
+				plane->base.id, rstate->sequence_id);
+
+		hw_blk = sde_crtc_res_get(cstate, SDE_HW_BLK_ROT,
+				(u64) state->fb);
+		if (!hw_blk) {
+			SDE_ERROR("plane%d no available rotator\n",
+					plane->base.id);
+			return -EINVAL;
+		}
+
+		rstate->rot_hw = to_sde_hw_rot(hw_blk);
+
+		if (!rstate->rot_hw->ops.commit) {
+			SDE_ERROR("plane%d invalid rotator ops\n",
+					plane->base.id);
+			sde_crtc_res_put(cstate,
+					SDE_HW_BLK_ROT, (u64) state->fb);
+			rstate->rot_hw = NULL;
+			return -EINVAL;
+		}
+
+		rstate->in_fb = state->fb;
+	} else {
+		rstate->in_fb = NULL;
+		rstate->rot_hw = NULL;
+	}
+
+	if (sde_plane_enabled(state) && rstate->out_sbuf && rstate->rot_hw) {
+
+		SDE_DEBUG("plane%d.%d use rotator\n",
+				plane->base.id, rstate->sequence_id);
+
+		sde_plane_rot_calc_cfg(plane, state);
+
+		ret = sde_plane_rot_submit_command(plane, state,
+				SDE_HW_ROT_CMD_VALIDATE);
+
+	} else if (sde_plane_enabled(state)) {
+
+		SDE_DEBUG("plane%d.%d bypass rotator\n", plane->base.id,
+				rstate->sequence_id);
+
+		/* bypass rotator - initialize output setting as input */
+		rstate->out_rotation = rstate->in_rotation;
+		rstate->out_fb_pixel_format = state->fb->pixel_format;
+
+		for (i = 0.; i < ARRAY_SIZE(rstate->out_fb_modifier); i++)
+			rstate->out_fb_modifier[i] = state->fb->modifier[i];
+
+		rstate->out_fb_flags = state->fb->flags;
+		rstate->out_fb_width = state->fb->width;
+		rstate->out_fb_height = state->fb->height;
+		rstate->out_src_x = state->src_x;
+		rstate->out_src_y = state->src_y;
+		rstate->out_src_w = state->src_w;
+		rstate->out_src_h = state->src_h;
+
+		rstate->out_fb_format = NULL;
+		rstate->out_sbuf = false;
+		rstate->out_fb = state->fb;
+	}
+
+	return ret;
+}
+
+/**
+ * sde_plane_rot_atomic_update - perform atomic update for rotator stage
+ * @plane: Pointer to drm plane
+ * @old_state: Pointer to previous state
+ * return: none
+ */
+static void sde_plane_rot_atomic_update(struct drm_plane *plane,
+				struct drm_plane_state *old_state)
+{
+	struct drm_plane_state *state;
+	struct sde_plane_state *pstate;
+	struct sde_plane_rot_state *rstate;
+
+	if (!plane || !plane->state) {
+		SDE_ERROR("invalid plane/state\n");
+		return;
+	}
+
+	state = plane->state;
+	pstate = to_sde_plane_state(state);
+	rstate = &pstate->rot;
+
+	SDE_DEBUG("plane%d.%d sbuf:%d rot:%d crtc:%d\n", plane->base.id,
+			rstate->sequence_id,
+			!!rstate->out_sbuf, !!rstate->rot_hw,
+			sde_plane_crtc_enabled(plane->state));
+
+	if (!sde_plane_crtc_enabled(state))
+		return;
+
+	if (!rstate->out_sbuf || !rstate->rot_hw)
+		return;
+
+	sde_plane_rot_submit_command(plane, state, SDE_HW_ROT_CMD_COMMIT);
+}
+
+/**
+ * sde_plane_rot_destroy_state - destroy state for rotator stage
+ * @plane: Pointer to drm plane
+ * @state: Pointer to state to be destroyed
+ * return: none
+ */
+static void sde_plane_rot_destroy_state(struct drm_plane *plane,
+		struct drm_plane_state *state)
+{
+	struct sde_plane_state *pstate = to_sde_plane_state(state);
+	struct sde_plane_rot_state *rstate = &pstate->rot;
+
+	SDE_DEBUG("plane%d.%d sbuf:%d rot:%d crtc:%d\n", plane->base.id,
+			rstate->sequence_id,
+			!!rstate->out_sbuf, !!rstate->rot_hw,
+			sde_plane_crtc_enabled(state));
+}
+
+/**
+ * sde_plane_rot_duplicate_state - duplicate state for rotator stage
+ * @plane: Pointer to drm plane
+ * @new_state: Pointer to duplicated state
+ * return: 0 if success; error code otherwise
+ */
+static int sde_plane_rot_duplicate_state(struct drm_plane *plane,
+		struct drm_plane_state *new_state)
+{
+	struct sde_plane_state *pstate  = to_sde_plane_state(new_state);
+	struct sde_plane_rot_state *rstate = &pstate->rot;
+	struct drm_crtc_state *cstate;
+	int ret;
+
+	rstate->sequence_id++;
+
+	SDE_DEBUG("plane%d.%d sbuf:%d rot:%d\n", plane->base.id,
+			rstate->sequence_id,
+			!!rstate->out_sbuf, !!rstate->rot_hw);
+
+	cstate = _sde_plane_get_crtc_state(new_state);
+	if (IS_ERR(cstate)) {
+		ret = PTR_ERR(cstate);
+		SDE_ERROR("invalid crtc state %d\n", ret);
+		return -EINVAL;
+	}
+
+	if (rstate->rot_hw && cstate)
+		sde_crtc_res_get(cstate, SDE_HW_BLK_ROT, (u64) rstate->in_fb);
+	else if (rstate->rot_hw && !cstate)
+		SDE_ERROR("plane%d.%d zombie rotator hw\n",
+				plane->base.id, rstate->sequence_id);
+
+	rstate->out_fb = NULL;
+	rstate->out_fbo = NULL;
+
+	return 0;
+}
+
+/**
+ * sde_plane_rot_install_caps - install plane rotator capabilities
+ * @plane: Pointer to drm plane
+ * return: none
+ */
+static void sde_plane_rot_install_caps(struct drm_plane *plane)
+{
+	struct sde_plane *psde = to_sde_plane(plane);
+	const struct sde_format_extended *format_list;
+	struct sde_kms_info *info;
+	struct sde_hw_rot *rot_hw;
+	const char *downscale_caps;
+
+	if (!psde->catalog || !(psde->features & BIT(SDE_SSPP_SBUF)) ||
+			!psde->catalog->rot_count)
+		return;
+
+	if (psde->blob_rot_caps)
+		return;
+
+	info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
+	if (!info)
+		return;
+
+	rot_hw = sde_hw_rot_get(NULL);
+	if (!rot_hw || !rot_hw->ops.get_format_caps ||
+			!rot_hw->ops.get_downscale_caps) {
+		SDE_ERROR("invalid rotator hw\n");
+		goto error_rot;
+	}
+
+	sde_kms_info_reset(info);
+
+	format_list = rot_hw->ops.get_format_caps(rot_hw);
+	if (format_list) {
+		sde_kms_info_start(info, "pixel_formats");
+		while (format_list->fourcc_format) {
+			sde_kms_info_append_format(info,
+					format_list->fourcc_format,
+					format_list->modifier);
+			++format_list;
+		}
+		sde_kms_info_stop(info);
+	}
+
+	downscale_caps = rot_hw->ops.get_downscale_caps(rot_hw);
+	if (downscale_caps) {
+		sde_kms_info_start(info, "downscale_ratios");
+		sde_kms_info_append(info, downscale_caps);
+		sde_kms_info_stop(info);
+	}
+
+	if (rot_hw->ops.get_cache_size)
+		sde_kms_info_add_keyint(info, "cache_size",
+				rot_hw->ops.get_cache_size(rot_hw));
+
+	if (rot_hw->ops.get_maxlinewidth)
+		sde_kms_info_add_keyint(info, "max_linewidth",
+				rot_hw->ops.get_maxlinewidth(rot_hw));
+
+	msm_property_set_blob(&psde->property_info, &psde->blob_rot_caps,
+			info->data, info->len, PLANE_PROP_ROT_CAPS_V1);
+
+	sde_hw_rot_put(rot_hw);
+error_rot:
+	kfree(info);
+}
+
+/**
+ * sde_plane_rot_install_properties - install plane rotator properties
+ * @plane: Pointer to drm plane
+ * @catalog: Pointer to mdss configuration
+ * return: none
+ */
+static void sde_plane_rot_install_properties(struct drm_plane *plane,
+	struct sde_mdss_cfg *catalog)
+{
+	struct sde_plane *psde = to_sde_plane(plane);
+	unsigned long supported_rotations = DRM_ROTATE_0 | DRM_REFLECT_X |
+			DRM_REFLECT_Y;
+
+	if (!plane || !psde) {
+		SDE_ERROR("invalid plane\n");
+		return;
+	} else if (!catalog) {
+		SDE_ERROR("invalid catalog\n");
+		return;
+	}
+
+	if ((psde->features & BIT(SDE_SSPP_SBUF)) && catalog->rot_count)
+		supported_rotations |= DRM_ROTATE_0 | DRM_ROTATE_90 |
+				DRM_ROTATE_180 | DRM_ROTATE_270;
+
+	msm_property_install_rotation(&psde->property_info,
+			supported_rotations, PLANE_PROP_ROTATION);
+
+	if (!(psde->features & BIT(SDE_SSPP_SBUF)) || !catalog->rot_count)
+		return;
+
+	msm_property_install_range(&psde->property_info, "rot_dst_x",
+			0, 0, U64_MAX, 0, PLANE_PROP_ROT_DST_X);
+	msm_property_install_range(&psde->property_info, "rot_dst_y",
+			0, 0, U64_MAX, 0, PLANE_PROP_ROT_DST_Y);
+	msm_property_install_range(&psde->property_info, "rot_dst_w",
+			0, 0, U64_MAX, 0, PLANE_PROP_ROT_DST_W);
+	msm_property_install_range(&psde->property_info, "rot_dst_h",
+			0, 0, U64_MAX, 0, PLANE_PROP_ROT_DST_H);
+	msm_property_install_blob(&psde->property_info, "rot_caps_v1",
+		DRM_MODE_PROP_IMMUTABLE, PLANE_PROP_ROT_CAPS_V1);
+}
+
+static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
 				struct drm_plane_state *state)
 {
 	uint32_t nplanes, src_flags;
 	struct sde_plane *psde;
 	struct sde_plane_state *pstate;
+	struct sde_plane_rot_state *rstate;
 	const struct sde_format *fmt;
 	struct drm_crtc *crtc;
 	struct drm_framebuffer *fb;
@@ -1174,9 +2222,10 @@
 
 	psde = to_sde_plane(plane);
 	pstate = to_sde_plane_state(plane->state);
+	rstate = &pstate->rot;
 
 	crtc = state->crtc;
-	fb = state->fb;
+	fb = rstate->out_fb;
 	if (!crtc || !fb) {
 		SDE_ERROR_PLANE(psde, "invalid crtc %d or fb %d\n",
 				crtc != 0, fb != 0);
@@ -1185,6 +2234,21 @@
 	fmt = to_sde_format(msm_framebuffer_format(fb));
 	nplanes = fmt->num_planes;
 
+	SDE_DEBUG(
+		"plane%d.%d sspp:%dx%d/%c%c%c%c/%llx/%dx%d+%d+%d/%x crtc:%dx%d+%d+%d\n",
+			plane->base.id, rstate->sequence_id,
+			rstate->out_fb_width, rstate->out_fb_height,
+			rstate->out_fb_pixel_format >> 0,
+			rstate->out_fb_pixel_format >> 8,
+			rstate->out_fb_pixel_format >> 16,
+			rstate->out_fb_pixel_format >> 24,
+			rstate->out_fb_modifier[0],
+			rstate->out_src_w >> 16, rstate->out_src_h >> 16,
+			rstate->out_src_x >> 16, rstate->out_src_y >> 16,
+			rstate->out_rotation,
+			state->crtc_w, state->crtc_h,
+			state->crtc_x, state->crtc_y);
+
 	/* determine what needs to be refreshed */
 	while ((idx = msm_property_pop_dirty(&psde->property_info)) >= 0) {
 		switch (idx) {
@@ -1213,6 +2277,12 @@
 		case PLANE_PROP_BLEND_OP:
 			/* no special action required */
 			break;
+		case PLANE_PROP_ROT_DST_X:
+		case PLANE_PROP_ROT_DST_Y:
+		case PLANE_PROP_ROT_DST_W:
+		case PLANE_PROP_ROT_DST_H:
+			/* handled by rotator atomic update */
+			break;
 		default:
 			/* unknown property, refresh everything */
 			pstate->dirty |= SDE_PLANE_DIRTY_ALL;
@@ -1236,16 +2306,19 @@
 
 	/* update roi config */
 	if (pstate->dirty & SDE_PLANE_DIRTY_RECTS) {
-		POPULATE_RECT(&src, state->src_x, state->src_y,
-			state->src_w, state->src_h, q16_data);
+		POPULATE_RECT(&src, rstate->out_src_x, rstate->out_src_y,
+			rstate->out_src_w, rstate->out_src_h, q16_data);
 		POPULATE_RECT(&dst, state->crtc_x, state->crtc_y,
 			state->crtc_w, state->crtc_h, !q16_data);
 
 		SDE_DEBUG_PLANE(psde,
-			"FB[%u] %u,%u,%ux%u->crtc%u %d,%d,%ux%u, %s ubwc %d\n",
+			"FB[%u] %u,%u,%ux%u->crtc%u %d,%d,%ux%u, %c%c%c%c ubwc %d\n",
 				fb->base.id, src.x, src.y, src.w, src.h,
 				crtc->base.id, dst.x, dst.y, dst.w, dst.h,
-				drm_get_format_name(fmt->base.pixel_format),
+				fmt->base.pixel_format >> 0,
+				fmt->base.pixel_format >> 8,
+				fmt->base.pixel_format >> 16,
+				fmt->base.pixel_format >> 24,
 				SDE_FORMAT_IS_UBWC(fmt));
 
 		if (sde_plane_get_property(pstate, PLANE_PROP_SRC_CONFIG) &
@@ -1261,6 +2334,8 @@
 		psde->pipe_cfg.src_rect = src;
 		psde->pipe_cfg.dst_rect = dst;
 
+		_sde_plane_setup_scaler(psde, fmt, pstate);
+
 		/* check for color fill */
 		psde->color_fill = (uint32_t)sde_plane_get_property(pstate,
 				PLANE_PROP_COLOR_FILL);
@@ -1273,7 +2348,6 @@
 					pstate->multirect_index);
 		}
 
-		_sde_plane_setup_scaler(psde, fmt, pstate);
 		if (psde->pipe_hw->ops.setup_pe)
 			psde->pipe_hw->ops.setup_pe(psde->pipe_hw,
 					&psde->pixel_ext);
@@ -1299,19 +2373,48 @@
 	if ((pstate->dirty & SDE_PLANE_DIRTY_FORMAT) &&
 			psde->pipe_hw->ops.setup_format) {
 		src_flags = 0x0;
-		SDE_DEBUG_PLANE(psde, "rotation 0x%llX\n",
-			sde_plane_get_property(pstate, PLANE_PROP_ROTATION));
-		if (sde_plane_get_property(pstate, PLANE_PROP_ROTATION) &
-			DRM_REFLECT_X)
+		SDE_DEBUG_PLANE(psde, "rotation 0x%X\n", rstate->out_rotation);
+		if (rstate->out_rotation & DRM_REFLECT_X)
 			src_flags |= SDE_SSPP_FLIP_LR;
-		if (sde_plane_get_property(pstate, PLANE_PROP_ROTATION) &
-			DRM_REFLECT_Y)
+		if (rstate->out_rotation & DRM_REFLECT_Y)
 			src_flags |= SDE_SSPP_FLIP_UD;
 
 		/* update format */
 		psde->pipe_hw->ops.setup_format(psde->pipe_hw, fmt, src_flags,
 				pstate->multirect_index);
 
+		if (psde->pipe_hw->ops.setup_sys_cache) {
+			if (rstate->out_sbuf) {
+				if (rstate->nplane < 2)
+					pstate->sc_cfg.op_mode =
+					SDE_PIPE_SC_OP_MODE_INLINE_SINGLE;
+				else if (rstate->out_xpos == 0)
+					pstate->sc_cfg.op_mode =
+						SDE_PIPE_SC_OP_MODE_INLINE_LEFT;
+				else
+					pstate->sc_cfg.op_mode =
+					SDE_PIPE_SC_OP_MODE_INLINE_RIGHT;
+
+				pstate->sc_cfg.rd_en = true;
+				pstate->sc_cfg.rd_scid =
+						rstate->rot_hw->caps->scid;
+				pstate->sc_cfg.rd_noallocate = true;
+				pstate->sc_cfg.rd_op_type =
+					SDE_PIPE_SC_RD_OP_TYPE_CACHEABLE;
+			} else {
+				pstate->sc_cfg.op_mode =
+						SDE_PIPE_SC_OP_MODE_OFFLINE;
+				pstate->sc_cfg.rd_en = false;
+				pstate->sc_cfg.rd_scid = 0;
+				pstate->sc_cfg.rd_noallocate = false;
+				pstate->sc_cfg.rd_op_type =
+					SDE_PIPE_SC_RD_OP_TYPE_CACHEABLE;
+			}
+
+			psde->pipe_hw->ops.setup_sys_cache(
+					psde->pipe_hw, &pstate->sc_cfg);
+		}
+
 		/* update csc */
 		if (SDE_FORMAT_IS_YUV(fmt))
 			_sde_plane_setup_csc(psde);
@@ -1439,38 +2542,93 @@
 	return 0;
 }
 
+/**
+ * sde_plane_get_ctl_flush - get control flush for the given plane
+ * @plane: Pointer to drm plane structure
+ * @ctl: Pointer to hardware control driver
+ * @flush: Pointer to flush control word
+ */
+void sde_plane_get_ctl_flush(struct drm_plane *plane, struct sde_hw_ctl *ctl,
+		u32 *flush)
+{
+	struct sde_plane_state *pstate;
+	struct sde_plane_rot_state *rstate;
+	u32 bitmask;
+
+	if (!plane || !flush) {
+		SDE_ERROR("invalid parameters\n");
+		return;
+	}
+
+	pstate = to_sde_plane_state(plane->state);
+	rstate = &pstate->rot;
+
+	bitmask = ctl->ops.get_bitmask_sspp(ctl, sde_plane_pipe(plane));
+
+	if (sde_plane_is_sbuf_mode(plane, NULL) && rstate->rot_hw &&
+			ctl->ops.get_bitmask_rot)
+		ctl->ops.get_bitmask_rot(ctl, &bitmask, rstate->rot_hw->idx);
+
+	*flush = bitmask;
+}
+
 static int sde_plane_prepare_fb(struct drm_plane *plane,
 		struct drm_plane_state *new_state)
 {
 	struct drm_framebuffer *fb = new_state->fb;
 	struct sde_plane *psde = to_sde_plane(plane);
+	struct sde_plane_rot_state *new_rstate;
+	int ret;
 
 	if (!new_state->fb)
 		return 0;
 
 	SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
-	return msm_framebuffer_prepare(fb, psde->mmu_id);
+
+	ret = sde_plane_rot_prepare_fb(plane, new_state);
+	if (ret) {
+		SDE_ERROR("failed to prepare rot framebuffer\n");
+		return ret;
+	}
+
+	new_rstate = &to_sde_plane_state(new_state)->rot;
+
+	ret = msm_framebuffer_prepare(new_rstate->out_fb, new_rstate->mmu_id);
+	if (ret) {
+		SDE_ERROR("failed to prepare framebuffer\n");
+		return ret;
+	}
+
+	return 0;
 }
 
 static void sde_plane_cleanup_fb(struct drm_plane *plane,
 		struct drm_plane_state *old_state)
 {
-	struct drm_framebuffer *fb = old_state ? old_state->fb : NULL;
-	struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
+	struct sde_plane *psde = to_sde_plane(plane);
+	struct sde_plane_rot_state *old_rstate;
 
-	if (!fb)
+	if (!old_state->fb)
 		return;
 
-	SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
-	msm_framebuffer_cleanup(fb, psde->mmu_id);
+	SDE_DEBUG_PLANE(psde, "FB[%u]\n", old_state->fb->base.id);
+
+	old_rstate = &to_sde_plane_state(old_state)->rot;
+
+	msm_framebuffer_cleanup(old_rstate->out_fb, old_rstate->mmu_id);
+
+	sde_plane_rot_cleanup_fb(plane, old_state);
 }
 
-static void _sde_plane_atomic_check_mode_changed(struct sde_plane *psde,
+static void _sde_plane_sspp_atomic_check_mode_changed(struct sde_plane *psde,
 		struct drm_plane_state *state,
 		struct drm_plane_state *old_state)
 {
 	struct sde_plane_state *pstate = to_sde_plane_state(state);
 	struct sde_plane_state *old_pstate = to_sde_plane_state(old_state);
+	struct sde_plane_rot_state *rstate = &pstate->rot;
+	struct sde_plane_rot_state *old_rstate = &old_pstate->rot;
+	struct drm_framebuffer *fb, *old_fb;
 
 	/* no need to check it again */
 	if (pstate->dirty == SDE_PLANE_DIRTY_ALL)
@@ -1484,10 +2642,10 @@
 	} else if (to_sde_plane_state(old_state)->pending) {
 		SDE_DEBUG_PLANE(psde, "still pending\n");
 		pstate->dirty |= SDE_PLANE_DIRTY_ALL;
-	} else if (state->src_w != old_state->src_w ||
-		   state->src_h != old_state->src_h ||
-		   state->src_x != old_state->src_x ||
-		   state->src_y != old_state->src_y) {
+	} else if (rstate->out_src_w != old_rstate->out_src_w ||
+		   rstate->out_src_h != old_rstate->out_src_h ||
+		   rstate->out_src_x != old_rstate->out_src_x ||
+		   rstate->out_src_y != old_rstate->out_src_y) {
 		SDE_DEBUG_PLANE(psde, "src rect updated\n");
 		pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
 	} else if (state->crtc_w != old_state->crtc_w ||
@@ -1508,21 +2666,24 @@
 		pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
 	}
 
-	if (!state->fb || !old_state->fb) {
+	fb = rstate->out_fb;
+	old_fb = old_rstate->out_fb;
+
+	if (!fb || !old_fb) {
 		SDE_DEBUG_PLANE(psde, "can't compare fb handles\n");
-	} else if (state->fb->pixel_format != old_state->fb->pixel_format) {
+	} else if (fb->pixel_format != old_fb->pixel_format) {
 		SDE_DEBUG_PLANE(psde, "format change\n");
 		pstate->dirty |= SDE_PLANE_DIRTY_FORMAT | SDE_PLANE_DIRTY_RECTS;
 	} else {
-		uint64_t *new_mods = state->fb->modifier;
-		uint64_t *old_mods = old_state->fb->modifier;
-		uint32_t *new_pitches = state->fb->pitches;
-		uint32_t *old_pitches = old_state->fb->pitches;
-		uint32_t *new_offset = state->fb->offsets;
-		uint32_t *old_offset = old_state->fb->offsets;
+		uint64_t *new_mods = fb->modifier;
+		uint64_t *old_mods = old_fb->modifier;
+		uint32_t *new_pitches = fb->pitches;
+		uint32_t *old_pitches = old_fb->pitches;
+		uint32_t *new_offset = fb->offsets;
+		uint32_t *old_offset = old_fb->offsets;
 		int i;
 
-		for (i = 0; i < ARRAY_SIZE(state->fb->modifier); i++) {
+		for (i = 0; i < ARRAY_SIZE(fb->modifier); i++) {
 			if (new_mods[i] != old_mods[i]) {
 				SDE_DEBUG_PLANE(psde,
 					"format modifiers change\"\
@@ -1533,7 +2694,7 @@
 				break;
 			}
 		}
-		for (i = 0; i < ARRAY_SIZE(state->fb->pitches); i++) {
+		for (i = 0; i < ARRAY_SIZE(fb->pitches); i++) {
 			if (new_pitches[i] != old_pitches[i]) {
 				SDE_DEBUG_PLANE(psde,
 					"pitches change plane:%d\"\
@@ -1543,7 +2704,7 @@
 				break;
 			}
 		}
-		for (i = 0; i < ARRAY_SIZE(state->fb->offsets); i++) {
+		for (i = 0; i < ARRAY_SIZE(fb->offsets); i++) {
 			if (new_offset[i] != old_offset[i]) {
 				SDE_DEBUG_PLANE(psde,
 					"offset change plane:%d\"\
@@ -1557,12 +2718,13 @@
 	}
 }
 
-static int sde_plane_atomic_check(struct drm_plane *plane,
+static int sde_plane_sspp_atomic_check(struct drm_plane *plane,
 		struct drm_plane_state *state)
 {
 	int ret = 0;
 	struct sde_plane *psde;
 	struct sde_plane_state *pstate;
+	struct sde_plane_rot_state *rstate;
 	const struct sde_format *fmt;
 	struct sde_rect src, dst;
 	uint32_t deci_w, deci_h, src_deci_w, src_deci_h;
@@ -1578,6 +2740,7 @@
 
 	psde = to_sde_plane(plane);
 	pstate = to_sde_plane_state(state);
+	rstate = &pstate->rot;
 
 	if (!psde->pipe_sblk) {
 		SDE_ERROR_PLANE(psde, "invalid catalog\n");
@@ -1589,8 +2752,8 @@
 	deci_h = sde_plane_get_property(pstate, PLANE_PROP_V_DECIMATE);
 
 	/* src values are in Q16 fixed point, convert to integer */
-	POPULATE_RECT(&src, state->src_x, state->src_y, state->src_w,
-		state->src_h, q16_data);
+	POPULATE_RECT(&src, rstate->out_src_x, rstate->out_src_y,
+			rstate->out_src_w, rstate->out_src_h, q16_data);
 	POPULATE_RECT(&dst, state->crtc_x, state->crtc_y, state->crtc_w,
 		state->crtc_h, !q16_data);
 
@@ -1607,6 +2770,21 @@
 	if (!sde_plane_enabled(state))
 		goto modeset_update;
 
+	SDE_DEBUG(
+		"plane%d.%u sspp:%x/%dx%d/%c%c%c%c/%llx/%dx%d+%d+%d crtc:%dx%d+%d+%d\n",
+			plane->base.id, rstate->sequence_id,
+			rstate->out_rotation,
+			rstate->out_fb_width, rstate->out_fb_height,
+			rstate->out_fb_pixel_format >> 0,
+			rstate->out_fb_pixel_format >> 8,
+			rstate->out_fb_pixel_format >> 16,
+			rstate->out_fb_pixel_format >> 24,
+			rstate->out_fb_modifier[0],
+			rstate->out_src_w >> 16, rstate->out_src_h >> 16,
+			rstate->out_src_x >> 16, rstate->out_src_y >> 16,
+			state->crtc_w, state->crtc_h,
+			state->crtc_x, state->crtc_y);
+
 	fmt = to_sde_format(msm_framebuffer_format(state->fb));
 
 	min_src_size = SDE_FORMAT_IS_YUV(fmt) ? 2 : 1;
@@ -1620,11 +2798,11 @@
 		ret = -EINVAL;
 
 	/* check src bounds */
-	} else if (state->fb->width > MAX_IMG_WIDTH ||
-		state->fb->height > MAX_IMG_HEIGHT ||
+	} else if (rstate->out_fb_width > MAX_IMG_WIDTH ||
+		rstate->out_fb_height > MAX_IMG_HEIGHT ||
 		src.w < min_src_size || src.h < min_src_size ||
-		CHECK_LAYER_BOUNDS(src.x, src.w, state->fb->width) ||
-		CHECK_LAYER_BOUNDS(src.y, src.h, state->fb->height)) {
+		CHECK_LAYER_BOUNDS(src.x, src.w, rstate->out_fb_width) ||
+		CHECK_LAYER_BOUNDS(src.y, src.h, rstate->out_fb_height)) {
 		SDE_ERROR_PLANE(psde, "invalid source %u, %u, %ux%u\n",
 			src.x, src.y, src.w, src.h);
 		ret = -E2BIG;
@@ -1703,7 +2881,37 @@
 
 modeset_update:
 	if (!ret)
-		_sde_plane_atomic_check_mode_changed(psde, state, plane->state);
+		_sde_plane_sspp_atomic_check_mode_changed(psde,
+				state, plane->state);
+exit:
+	return ret;
+}
+
+static int sde_plane_atomic_check(struct drm_plane *plane,
+		struct drm_plane_state *state)
+{
+	int ret = 0;
+	struct sde_plane *psde;
+	struct sde_plane_state *pstate;
+
+	if (!plane || !state) {
+		SDE_ERROR("invalid arg(s), plane %d state %d\n",
+				plane != 0, state != 0);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	psde = to_sde_plane(plane);
+	pstate = to_sde_plane_state(state);
+
+	SDE_DEBUG_PLANE(psde, "\n");
+
+	ret = sde_plane_rot_atomic_check(plane, state);
+	if (ret)
+		goto exit;
+
+	ret = sde_plane_sspp_atomic_check(plane, state);
+
 exit:
 	return ret;
 }
@@ -1736,6 +2944,10 @@
 	else if (psde->pipe_hw && psde->csc_ptr && psde->pipe_hw->ops.setup_csc)
 		psde->pipe_hw->ops.setup_csc(psde->pipe_hw, psde->csc_ptr);
 
+	/* force black color fill during suspend */
+	if (msm_is_suspend_state(plane->dev) && suspend_blank)
+		_sde_plane_color_fill(psde, 0x0, 0x0);
+
 	/* flag h/w flush complete */
 	if (plane->state)
 		to_sde_plane_state(plane->state)->pending = false;
@@ -1747,6 +2959,7 @@
 	struct sde_plane *psde;
 	struct drm_plane_state *state;
 	struct sde_plane_state *pstate;
+	struct sde_plane_state *old_pstate;
 
 	if (!plane) {
 		SDE_ERROR("invalid plane\n");
@@ -1760,15 +2973,18 @@
 	psde->is_error = false;
 	state = plane->state;
 	pstate = to_sde_plane_state(state);
+	old_pstate = to_sde_plane_state(old_state);
 
 	SDE_DEBUG_PLANE(psde, "\n");
 
-	if (!sde_plane_enabled(state)) {
+	sde_plane_rot_atomic_update(plane, old_state);
+
+	if (!sde_plane_sspp_enabled(state)) {
 		pstate->pending = true;
 	} else {
 		int ret;
 
-		ret = _sde_plane_mode_set(plane, state);
+		ret = sde_plane_sspp_atomic_update(plane, state);
 		/* atomic_check should have ensured that this doesn't fail */
 		WARN_ON(ret < 0);
 	}
@@ -1807,6 +3023,8 @@
 		return;
 	}
 
+	psde->catalog = catalog;
+
 	if (sde_is_custom_client()) {
 		if (catalog->mixer_count && catalog->mixer &&
 				catalog->mixer[0].sblk->maxblendstages) {
@@ -1900,10 +3118,7 @@
 		msm_property_install_volatile_range(&psde->property_info,
 			"excl_rect_v1", 0x0, 0, ~0, 0, PLANE_PROP_EXCL_RECT_V1);
 
-	/* standard properties */
-	msm_property_install_rotation(&psde->property_info,
-		(unsigned int) (DRM_REFLECT_X | DRM_REFLECT_Y),
-		PLANE_PROP_ROTATION);
+	sde_plane_rot_install_properties(plane, catalog);
 
 	msm_property_install_enum(&psde->property_info, "blend_op", 0x0, 0,
 		e_blend_op, ARRAY_SIZE(e_blend_op), PLANE_PROP_BLEND_OP);
@@ -2229,6 +3444,9 @@
 		}
 	}
 
+	SDE_DEBUG_PLANE(psde, "%s[%d] <= 0x%llx ret=%d\n",
+			property->name, property->base.id, val, ret);
+
 	return ret;
 }
 
@@ -2256,6 +3474,7 @@
 	} else {
 		SDE_DEBUG_PLANE(psde, "\n");
 		pstate = to_sde_plane_state(state);
+		sde_plane_rot_install_caps(plane);
 		ret = msm_property_atomic_get(&psde->property_info,
 				pstate->property_values, pstate->property_blobs,
 				property, val);
@@ -2307,6 +3526,8 @@
 
 	SDE_DEBUG_PLANE(psde, "\n");
 
+	sde_plane_rot_destroy_state(plane, &pstate->base);
+
 	/* remove ref count for frame buffers */
 	if (state->fb)
 		drm_framebuffer_unreference(state->fb);
@@ -2350,10 +3571,6 @@
 	msm_property_duplicate_state(&psde->property_info, old_state, pstate,
 			pstate->property_values, pstate->property_blobs);
 
-	/* add ref count for frame buffer */
-	if (pstate->base.fb)
-		drm_framebuffer_reference(pstate->base.fb);
-
 	/* clear out any input fence */
 	pstate->input_fence = 0;
 	input_fence_default = msm_property_get_default(
@@ -2364,6 +3581,10 @@
 	pstate->dirty = 0x0;
 	pstate->pending = false;
 
+	__drm_atomic_helper_plane_duplicate_state(plane, &pstate->base);
+
+	sde_plane_rot_duplicate_state(plane, &pstate->base);
+
 	return &pstate->base;
 }
 
@@ -2590,6 +3811,13 @@
 			0644,
 			psde->debugfs_root,
 			kms, &sde_plane_danger_enable);
+	debugfs_create_u32("sbuf_mode",
+			0644,
+			psde->debugfs_root, &psde->sbuf_mode);
+	debugfs_create_u32("sbuf_writeback",
+			0644,
+			psde->debugfs_root,
+			&psde->sbuf_writeback);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index 9d7056e..e955f41 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -22,7 +22,74 @@
 #include <drm/drm_crtc.h>
 
 #include "msm_prop.h"
+#include "sde_kms.h"
 #include "sde_hw_mdss.h"
+#include "sde_hw_rot.h"
+#include "sde_hw_sspp.h"
+
+/**
+ * struct sde_plane_rot_state - state of pre-sspp rotator stage
+ * @sequence_id: sequence identifier, incremented per state duplication
+ * @rot_hw: Pointer to rotator hardware driver
+ * @rot90: true if rotation of 90 degree is required
+ * @hflip: true if horizontal flip is required
+ * @vflip: true if vertical flip is required
+ * @mmu_id: iommu identifier for input/output buffers
+ * @rot_cmd: rotator configuration command
+ * @nplane: total number of drm plane attached to rotator
+ * @in_fb: input fb attached to rotator
+ * @in_rotation: input rotation property of rotator stage
+ * @in_rot_rect: input rectangle of the rotator in plane fb coordinate
+ * @out_rotation: output rotation property of rotator stage
+ * @out_rot_rect: output rectangle of the rotator in plane fb coordinate
+ * @out_src_rect: output rectangle of the plane source in plane fb coordinate
+ * @out_src_x: output src_x of rotator stage in rotator output fb coordinate
+ * @out_src_y: output src y of rotator stage in rotator output fb coordinate
+ * @out_src_w: output src w of rotator stage in rotator output fb ooordinate
+ * @out_src_h: output src h of rotator stage in rotator output fb coordinate
+ * @out_fb_width: output framebuffer width of rotator stage
+ * @out_fb_height: output framebuffer height of rotator stage
+ * @out_fb_pixel_format: output framebuffer pixel format of rotator stage
+ * @out_fb_modifier: output framebuffer modifier of rotator stage
+ * @out_fb_flags: output framebuffer flags of rotator stage
+ * @out_sbuf: true if output streaming buffer is required
+ * @out_fb_format: Pointer to output framebuffer format of rotator stage
+ * @out_fb: Pointer to output drm framebuffer of rotator stage
+ * @out_fbo: framebuffer object of output streaming buffer
+ * @out_xpos: relative horizontal position of the plane (0 - leftmost)
+ */
+struct sde_plane_rot_state {
+	u32 sequence_id;
+	struct sde_hw_rot *rot_hw;
+	bool rot90;
+	bool hflip;
+	bool vflip;
+	u32 mmu_id;
+	struct sde_hw_rot_cmd rot_cmd;
+	int nplane;
+	/* input */
+	struct drm_framebuffer *in_fb;
+	struct drm_rect in_rot_rect;
+	u32 in_rotation;
+	/* output */
+	struct drm_rect out_rot_rect;
+	struct drm_rect out_src_rect;
+	u32 out_rotation;
+	u32 out_src_x;
+	u32 out_src_y;
+	u32 out_src_w;
+	u32 out_src_h;
+	u32 out_fb_width;
+	u32 out_fb_height;
+	u32 out_fb_pixel_format;
+	u64 out_fb_modifier[4];
+	u32 out_fb_flags;
+	bool out_sbuf;
+	const struct sde_format *out_fb_format;
+	struct drm_framebuffer *out_fb;
+	struct sde_kms_fbo *out_fbo;
+	int out_xpos;
+};
 
 /**
  * struct sde_plane_state: Define sde extension of drm plane state object
@@ -48,6 +115,10 @@
 	uint32_t multirect_index;
 	uint32_t multirect_mode;
 	bool pending;
+
+	/* @sc_cfg: system_cache configuration */
+	struct sde_hw_pipe_sc_cfg sc_cfg;
+	struct sde_plane_rot_state rot;
 };
 
 /**
@@ -88,6 +159,23 @@
 bool is_sde_plane_virtual(struct drm_plane *plane);
 
 /**
+ * sde_plane_get_ctl_flush - get control flush mask
+ * @plane:   Pointer to DRM plane object
+ * @ctl: Pointer to control hardware
+ * @flush: Pointer to updated flush mask
+ */
+void sde_plane_get_ctl_flush(struct drm_plane *plane, struct sde_hw_ctl *ctl,
+		u32 *flush);
+
+/**
+ * sde_plane_is_sbuf_mode - return status of stream buffer mode
+ * @plane:   Pointer to DRM plane object
+ * @prefill: Pointer to updated prefill in stream buffer mode (optional)
+ * Returns: true if plane is in stream buffer mode
+ */
+bool sde_plane_is_sbuf_mode(struct drm_plane *plane, u32 *prefill);
+
+/**
  * sde_plane_flush - final plane operations before commit flush
  * @plane: Pointer to drm plane structure
  */
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index d72b7cd..66318b3 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -24,6 +24,7 @@
 #include "sde_encoder.h"
 #include "sde_connector.h"
 #include "sde_hw_dsc.h"
+#include "sde_hw_rot.h"
 
 #define RESERVED_BY_OTHER(h, r) \
 	((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
@@ -169,7 +170,7 @@
 	blk_list = &rm->hw_blks[i->type];
 
 	if (i->blk && (&i->blk->list == blk_list)) {
-		SDE_ERROR("attempt resume iteration past last\n");
+		SDE_DEBUG("attempt resume iteration past last\n");
 		return false;
 	}
 
@@ -225,6 +226,9 @@
 	case SDE_HW_BLK_DSC:
 		sde_hw_dsc_destroy(hw);
 		break;
+	case SDE_HW_BLK_ROT:
+		sde_hw_rot_destroy(hw);
+		break;
 	case SDE_HW_BLK_SSPP:
 		/* SSPPs are not managed by the resource manager */
 	case SDE_HW_BLK_TOP:
@@ -317,6 +321,10 @@
 		hw = sde_hw_dsc_init(id, mmio, cat);
 		name = "dsc";
 		break;
+	case SDE_HW_BLK_ROT:
+		hw = sde_hw_rot_init(id, mmio, cat);
+		name = "rot";
+		break;
 	case SDE_HW_BLK_SSPP:
 		/* SSPPs are not managed by the resource manager */
 	case SDE_HW_BLK_TOP:
@@ -368,6 +376,8 @@
 	for (type = 0; type < SDE_HW_BLK_MAX; type++)
 		INIT_LIST_HEAD(&rm->hw_blks[type]);
 
+	rm->dev = dev;
+
 	/* Some of the sub-blocks require an mdptop to be created */
 	rm->hw_mdp = sde_hw_mdptop_init(MDP_TOP, mmio, cat);
 	if (IS_ERR_OR_NULL(rm->hw_mdp)) {
@@ -456,6 +466,15 @@
 		}
 	}
 
+	for (i = 0; i < cat->rot_count; i++) {
+		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_ROT,
+				cat->rot[i].id, &cat->rot[i]);
+		if (rc) {
+			SDE_ERROR("failed: rot hw not available\n");
+			goto fail;
+		}
+	}
+
 	for (i = 0; i < cat->ctl_count; i++) {
 		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_CTL,
 				cat->ctl[i].id, &cat->ctl[i]);
@@ -1130,14 +1149,6 @@
 	}
 
 	kfree(rsvp);
-
-	/* if no remaining reservation, then clear the topology name */
-	if (!_sde_rm_get_rsvp(rm, conn->encoder))
-		(void) msm_property_set_property(
-				sde_connector_get_propinfo(conn),
-				sde_connector_get_property_values(conn->state),
-				CONNECTOR_PROP_TOPOLOGY_NAME,
-				SDE_RM_TOPOLOGY_UNKNOWN);
 }
 
 void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc)
@@ -1173,6 +1184,12 @@
 		SDE_DEBUG("release rsvp[s%de%d]\n", rsvp->seq,
 				rsvp->enc_id);
 		_sde_rm_release_rsvp(rm, rsvp, conn);
+
+		(void) msm_property_set_property(
+				sde_connector_get_propinfo(conn),
+				sde_connector_get_property_values(conn->state),
+				CONNECTOR_PROP_TOPOLOGY_NAME,
+				SDE_RM_TOPOLOGY_UNKNOWN);
 	}
 }
 
@@ -1190,8 +1207,12 @@
 			sde_connector_get_property_values(conn_state),
 			CONNECTOR_PROP_TOPOLOGY_NAME,
 			rsvp->topology);
-	if (ret)
+	if (ret) {
+		SDE_ERROR("failed to set topology name property, ret %d\n",
+				ret);
 		_sde_rm_release_rsvp(rm, rsvp, conn_state->connector);
+		return ret;
+	}
 
 	/* Swap next rsvp to be the active */
 	for (type = 0; type < SDE_HW_BLK_MAX; type++) {
@@ -1284,6 +1305,12 @@
 		_sde_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
 		rsvp_cur = NULL;
 		_sde_rm_print_rsvps(rm, SDE_RM_STAGE_AFTER_CLEAR);
+		(void) msm_property_set_property(
+				sde_connector_get_propinfo(
+						conn_state->connector),
+				sde_connector_get_property_values(conn_state),
+				CONNECTOR_PROP_TOPOLOGY_NAME,
+				SDE_RM_TOPOLOGY_UNKNOWN);
 	}
 
 	/* Check the proposed reservation, store it in hw's "next" field */
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index 4c1260b..40e02b8 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -1974,6 +1974,9 @@
 	char *end_addr;
 	int i;
 
+	if (!len_bytes)
+		return;
+
 	in_log = (reg_dump_flag & SDE_DBG_DUMP_IN_LOG);
 	in_mem = (reg_dump_flag & SDE_DBG_DUMP_IN_MEM);
 
@@ -2433,7 +2436,7 @@
 	struct sde_dbg_reg_base **blk_arr;
 	u32 blk_len;
 
-	if (!sde_evtlog_is_enabled(sde_dbg_base.evtlog, SDE_EVTLOG_DEFAULT))
+	if (!sde_evtlog_is_enabled(sde_dbg_base.evtlog, SDE_EVTLOG_ALWAYS))
 		return;
 
 	if (queue_work && work_pending(&sde_dbg_base.dump_work))
@@ -2446,8 +2449,12 @@
 			sizeof(sde_dbg_base.req_dump_blks));
 
 	va_start(args, name);
-	for (i = 0; i < SDE_EVTLOG_MAX_DATA; i++) {
-		blk_name = va_arg(args, char*);
+	i = 0;
+	while ((blk_name = va_arg(args, char*))) {
+		if (i++ >= SDE_EVTLOG_MAX_DATA) {
+			pr_err("could not parse all dump arguments\n");
+			break;
+		}
 		if (IS_ERR_OR_NULL(blk_name))
 			break;
 
@@ -2471,9 +2478,6 @@
 		if (!strcmp(blk_name, "panic"))
 			do_panic = true;
 	}
-	blk_name = va_arg(args, char*);
-	if (!IS_ERR_OR_NULL(blk_name))
-		pr_err("could not parse all dump arguments\n");
 	va_end(args);
 
 	if (queue_work) {
@@ -2554,6 +2558,82 @@
 	.write = sde_evtlog_dump_write,
 };
 
+/*
+ * sde_evtlog_filter_show - read callback for evtlog filter
+ * @s: pointer to seq_file object
+ * @data: pointer to private data
+ */
+static int sde_evtlog_filter_show(struct seq_file *s, void *data)
+{
+	struct sde_dbg_evtlog *evtlog;
+	char buffer[64];
+	int i;
+
+	if (!s || !s->private)
+		return -EINVAL;
+
+	evtlog = s->private;
+
+	for (i = 0; !sde_evtlog_get_filter(
+				evtlog, i, buffer, ARRAY_SIZE(buffer)); ++i)
+		seq_printf(s, "*%s*\n", buffer);
+	return 0;
+}
+
+/*
+ * sde_evtlog_filter_open - debugfs open handler for evtlog filter
+ * @inode: debugfs inode
+ * @file: file handle
+ * Returns: zero on success
+ */
+static int sde_evtlog_filter_open(struct inode *inode, struct file *file)
+{
+	if (!file)
+		return -EINVAL;
+
+	return single_open(file, sde_evtlog_filter_show, inode->i_private);
+}
+
+/*
+ * sde_evtlog_filter_write - write callback for evtlog filter
+ * @file: pointer to file structure
+ * @user_buf: pointer to incoming user data
+ * @count: size of incoming user buffer
+ * @ppos: pointer to file offset
+ */
+static ssize_t sde_evtlog_filter_write(struct file *file,
+	const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char *tmp_filter = NULL;
+	ssize_t rc = 0;
+
+	if (count > 0) {
+		/* copy user provided string and null terminate it */
+		tmp_filter = kzalloc(count + 1, GFP_KERNEL);
+		if (!tmp_filter)
+			rc = -ENOMEM;
+		else if (copy_from_user(tmp_filter, user_buf, count))
+			rc = -EFAULT;
+	}
+
+	/* update actual filter configuration on success */
+	if (!rc) {
+		sde_evtlog_set_filter(sde_dbg_base.evtlog, tmp_filter);
+		rc = count;
+	}
+	kfree(tmp_filter);
+
+	return rc;
+}
+
+static const struct file_operations sde_evtlog_filter_fops = {
+	.open =		sde_evtlog_filter_open,
+	.write =	sde_evtlog_filter_write,
+	.read =		seq_read,
+	.llseek =	seq_lseek,
+	.release =	seq_release
+};
+
 /**
  * sde_dbg_reg_base_release - release allocated reg dump file private data
  * @inode: debugfs inode
@@ -2795,12 +2875,14 @@
 			&sde_evtlog_fops);
 	debugfs_create_u32("enable", 0644, sde_dbg_base.root,
 			&(sde_dbg_base.evtlog->enable));
+	debugfs_create_file("filter", 0644, sde_dbg_base.root,
+			sde_dbg_base.evtlog,
+			&sde_evtlog_filter_fops);
 	debugfs_create_u32("panic", 0644, sde_dbg_base.root,
 			&sde_dbg_base.panic_on_err);
 	debugfs_create_u32("reg_dump", 0644, sde_dbg_base.root,
 			&sde_dbg_base.enable_reg_dump);
 
-
 	if (dbg->dbgbus_sde.entries) {
 		dbg->dbgbus_sde.cmn.name = DBGBUS_NAME_SDE;
 		snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
@@ -2834,7 +2916,7 @@
 	return 0;
 }
 
-#if defined(CONFIG_DEBUG_FS)
+#ifdef CONFIG_DEBUG_FS
 static void _sde_dbg_debugfs_destroy(void)
 {
 	debugfs_remove_recursive(sde_dbg_base.root);
diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h
index 7a940f4..4344eb8 100644
--- a/drivers/gpu/drm/msm/sde_dbg.h
+++ b/drivers/gpu/drm/msm/sde_dbg.h
@@ -24,9 +24,10 @@
 #define SDE_DBG_DUMP_DATA_LIMITER (NULL)
 
 enum sde_dbg_evtlog_flag {
-	SDE_EVTLOG_DEFAULT = BIT(0),
+	SDE_EVTLOG_CRITICAL = BIT(0),
 	SDE_EVTLOG_IRQ = BIT(1),
-	SDE_EVTLOG_ALL = BIT(7)
+	SDE_EVTLOG_VERBOSE = BIT(2),
+	SDE_EVTLOG_ALWAYS = -1
 };
 
 enum sde_dbg_dump_flag {
@@ -35,7 +36,7 @@
 };
 
 #ifdef CONFIG_DRM_SDE_EVTLOG_DEBUG
-#define SDE_EVTLOG_DEFAULT_ENABLE 1
+#define SDE_EVTLOG_DEFAULT_ENABLE SDE_EVTLOG_CRITICAL
 #else
 #define SDE_EVTLOG_DEFAULT_ENABLE 0
 #endif
@@ -72,6 +73,9 @@
 	int pid;
 };
 
+/**
+ * @filter_list: Linked list of currently active filter strings
+ */
 struct sde_dbg_evtlog {
 	struct sde_dbg_evtlog_log logs[SDE_EVTLOG_ENTRY];
 	u32 first;
@@ -80,6 +84,7 @@
 	u32 next;
 	u32 enable;
 	spinlock_t spin_lock;
+	struct list_head filter_list;
 };
 
 extern struct sde_dbg_evtlog *sde_dbg_base_evtlog;
@@ -89,7 +94,15 @@
  * ... - variable arguments
  */
 #define SDE_EVT32(...) sde_evtlog_log(sde_dbg_base_evtlog, __func__, \
-		__LINE__, SDE_EVTLOG_DEFAULT, ##__VA_ARGS__, \
+		__LINE__, SDE_EVTLOG_ALWAYS, ##__VA_ARGS__, \
+		SDE_EVTLOG_DATA_LIMITER)
+
+/**
+ * SDE_EVT32_VERBOSE - Write a list of 32bit values for verbose event logging
+ * ... - variable arguments
+ */
+#define SDE_EVT32_VERBOSE(...) sde_evtlog_log(sde_dbg_base_evtlog, __func__, \
+		__LINE__, SDE_EVTLOG_VERBOSE, ##__VA_ARGS__, \
 		SDE_EVTLOG_DATA_LIMITER)
 
 /**
@@ -244,6 +257,24 @@
 		const char *range_name, u32 offset_start, u32 offset_end,
 		uint32_t xin_id);
 
+/**
+ * sde_evtlog_set_filter - update evtlog filtering
+ * @evtlog:	pointer to evtlog
+ * @filter:     pointer to optional function name filter, set to NULL to disable
+ */
+void sde_evtlog_set_filter(struct sde_dbg_evtlog *evtlog, char *filter);
+
+/**
+ * sde_evtlog_get_filter - query configured evtlog filters
+ * @evtlog:	pointer to evtlog
+ * @index:	filter index to retrieve
+ * @buf:	pointer to output filter buffer
+ * @bufsz:	size of output filter buffer
+ * Returns:	zero if a filter string was returned
+ */
+int sde_evtlog_get_filter(struct sde_dbg_evtlog *evtlog, int index,
+		char *buf, size_t bufsz);
+
 #else
 static inline struct sde_dbg_evtlog *sde_evtlog_init(void)
 {
@@ -275,7 +306,7 @@
 	return 0;
 }
 
-void sde_dbg_init_dbg_buses(u32 hwversion)
+static inline void sde_dbg_init_dbg_buses(u32 hwversion)
 {
 }
 
@@ -285,7 +316,7 @@
 	return 0;
 }
 
-int sde_dbg_debugfs_register(struct dentry *debugfs_root)
+static inline int sde_dbg_debugfs_register(struct dentry *debugfs_root)
 {
 	return 0;
 }
@@ -310,6 +341,17 @@
 {
 }
 
+static inline void sde_evtlog_set_filter(
+		struct sde_dbg_evtlog *evtlog, char *filter)
+{
+}
+
+static inline int sde_evtlog_get_filter(struct sde_dbg_evtlog *evtlog,
+		int index, char *buf, size_t bufsz)
+{
+	return -EINVAL;
+}
+
 #endif /* defined(CONFIG_DEBUG_FS) */
 
 
diff --git a/drivers/gpu/drm/msm/sde_dbg_evtlog.c b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
index 759bdab..699396f 100644
--- a/drivers/gpu/drm/msm/sde_dbg_evtlog.c
+++ b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
@@ -23,13 +23,40 @@
 #include "sde_dbg.h"
 #include "sde_trace.h"
 
+#define SDE_EVTLOG_FILTER_STRSIZE	64
+
+struct sde_evtlog_filter {
+	struct list_head list;
+	char filter[SDE_EVTLOG_FILTER_STRSIZE];
+};
+
+static bool _sde_evtlog_is_filtered_no_lock(
+		struct sde_dbg_evtlog *evtlog, const char *str)
+{
+	struct sde_evtlog_filter *filter_node;
+	bool rc;
+
+	if (!str)
+		return true;
+
+	/*
+	 * Filter the incoming string IFF the list is not empty AND
+	 * a matching entry is not in the list.
+	 */
+	rc = !list_empty(&evtlog->filter_list);
+	list_for_each_entry(filter_node, &evtlog->filter_list, list)
+		if (strnstr(str, filter_node->filter,
+					SDE_EVTLOG_FILTER_STRSIZE - 1)) {
+			rc = false;
+			break;
+		}
+
+	return rc;
+}
+
 bool sde_evtlog_is_enabled(struct sde_dbg_evtlog *evtlog, u32 flag)
 {
-	if (!evtlog)
-		return false;
-
-	return (flag & evtlog->enable) ||
-		(flag == SDE_EVTLOG_ALL && evtlog->enable);
+	return evtlog && (evtlog->enable & flag);
 }
 
 void sde_evtlog_log(struct sde_dbg_evtlog *evtlog, const char *name, int line,
@@ -47,6 +74,10 @@
 		return;
 
 	spin_lock_irqsave(&evtlog->spin_lock, flags);
+
+	if (_sde_evtlog_is_filtered_no_lock(evtlog, name))
+		goto exit;
+
 	log = &evtlog->logs[evtlog->curr];
 	log->time = ktime_to_us(ktime_get());
 	log->name = name;
@@ -70,27 +101,20 @@
 
 	trace_sde_evtlog(name, line, i > 0 ? log->data[0] : 0,
 			i > 1 ? log->data[1] : 0);
-
+exit:
 	spin_unlock_irqrestore(&evtlog->spin_lock, flags);
 }
 
 /* always dump the last entries which are not dumped yet */
 static bool _sde_evtlog_dump_calc_range(struct sde_dbg_evtlog *evtlog)
 {
-	bool need_dump = true;
-	unsigned long flags;
-
 	if (!evtlog)
 		return false;
 
-	spin_lock_irqsave(&evtlog->spin_lock, flags);
-
 	evtlog->first = evtlog->next;
 
-	if (evtlog->last == evtlog->first) {
-		need_dump = false;
-		goto dump_exit;
-	}
+	if (evtlog->last == evtlog->first)
+		return false;
 
 	if (evtlog->last < evtlog->first) {
 		evtlog->first %= SDE_EVTLOG_ENTRY;
@@ -99,16 +123,14 @@
 	}
 
 	if ((evtlog->last - evtlog->first) > SDE_EVTLOG_PRINT_ENTRY) {
-		pr_warn("evtlog buffer overflow before dump: %d\n",
-			evtlog->last - evtlog->first);
+		pr_info("evtlog skipping %d entries, last=%d\n",
+			evtlog->last - evtlog->first - SDE_EVTLOG_PRINT_ENTRY,
+			evtlog->last - 1);
 		evtlog->first = evtlog->last - SDE_EVTLOG_PRINT_ENTRY;
 	}
 	evtlog->next = evtlog->first + 1;
 
-dump_exit:
-	spin_unlock_irqrestore(&evtlog->spin_lock, flags);
-
-	return need_dump;
+	return true;
 }
 
 ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
@@ -122,16 +144,15 @@
 	if (!evtlog || !evtlog_buf)
 		return 0;
 
+	spin_lock_irqsave(&evtlog->spin_lock, flags);
+
 	/* update markers, exit if nothing to print */
 	if (!_sde_evtlog_dump_calc_range(evtlog))
-		return 0;
-
-	spin_lock_irqsave(&evtlog->spin_lock, flags);
+		goto exit;
 
 	log = &evtlog->logs[evtlog->first % SDE_EVTLOG_ENTRY];
 
-	prev_log = &evtlog->logs[(evtlog->first - 1) %
-		SDE_EVTLOG_ENTRY];
+	prev_log = &evtlog->logs[(evtlog->first - 1) % SDE_EVTLOG_ENTRY];
 
 	off = snprintf((evtlog_buf + off), (evtlog_buf_size - off), "%s:%-4d",
 		log->name, log->line);
@@ -150,7 +171,7 @@
 			"%x ", log->data[i]);
 
 	off += snprintf((evtlog_buf + off), (evtlog_buf_size - off), "\n");
-
+exit:
 	spin_unlock_irqrestore(&evtlog->spin_lock, flags);
 
 	return off;
@@ -178,10 +199,109 @@
 	spin_lock_init(&evtlog->spin_lock);
 	evtlog->enable = SDE_EVTLOG_DEFAULT_ENABLE;
 
+	INIT_LIST_HEAD(&evtlog->filter_list);
+
 	return evtlog;
 }
 
+int sde_evtlog_get_filter(struct sde_dbg_evtlog *evtlog, int index,
+		char *buf, size_t bufsz)
+{
+	struct sde_evtlog_filter *filter_node;
+	unsigned long flags;
+	int rc = -EFAULT;
+
+	if (!evtlog || !buf || !bufsz || index < 0)
+		return -EINVAL;
+
+	spin_lock_irqsave(&evtlog->spin_lock, flags);
+	list_for_each_entry(filter_node, &evtlog->filter_list, list) {
+		if (index--)
+			continue;
+
+		/* don't care about return value */
+		(void)strlcpy(buf, filter_node->filter, bufsz);
+		rc = 0;
+		break;
+	}
+	spin_unlock_irqrestore(&evtlog->spin_lock, flags);
+
+	return rc;
+}
+
+void sde_evtlog_set_filter(struct sde_dbg_evtlog *evtlog, char *filter)
+{
+	struct sde_evtlog_filter *filter_node, *tmp;
+	struct list_head free_list;
+	unsigned long flags;
+	char *flt;
+
+	if (!evtlog)
+		return;
+
+	INIT_LIST_HEAD(&free_list);
+
+	/*
+	 * Clear active filter list and cache filter_nodes locally
+	 * to reduce memory fragmentation.
+	 */
+	spin_lock_irqsave(&evtlog->spin_lock, flags);
+	list_for_each_entry_safe(filter_node, tmp, &evtlog->filter_list, list) {
+		list_del_init(&filter_node->list);
+		list_add_tail(&filter_node->list, &free_list);
+	}
+	spin_unlock_irqrestore(&evtlog->spin_lock, flags);
+
+	/*
+	 * Parse incoming filter request string and build up a new
+	 * filter list. New filter nodes are taken from the local
+	 * free list, if available, and allocated from the system
+	 * heap once the free list is empty.
+	 */
+	while (filter && (flt = strsep(&filter, "|\r\n\t ")) != NULL) {
+		if (!*flt)
+			continue;
+
+		if (list_empty(&free_list)) {
+			filter_node = kzalloc(sizeof(*filter_node), GFP_KERNEL);
+			if (!filter_node)
+				break;
+
+			INIT_LIST_HEAD(&filter_node->list);
+		} else {
+			filter_node = list_first_entry(&free_list,
+					struct sde_evtlog_filter, list);
+			list_del_init(&filter_node->list);
+		}
+
+		/* don't care if copy truncated */
+		(void)strlcpy(filter_node->filter, flt,
+				SDE_EVTLOG_FILTER_STRSIZE);
+
+		spin_lock_irqsave(&evtlog->spin_lock, flags);
+		list_add_tail(&filter_node->list, &evtlog->filter_list);
+		spin_unlock_irqrestore(&evtlog->spin_lock, flags);
+	}
+
+	/*
+	 * Free any unused filter_nodes back to the system.
+	 */
+	list_for_each_entry_safe(filter_node, tmp, &free_list, list) {
+		list_del(&filter_node->list);
+		kfree(filter_node);
+	}
+}
+
 void sde_evtlog_destroy(struct sde_dbg_evtlog *evtlog)
 {
+	struct sde_evtlog_filter *filter_node, *tmp;
+
+	if (!evtlog)
+		return;
+
+	list_for_each_entry_safe(filter_node, tmp, &evtlog->filter_list, list) {
+		list_del(&filter_node->list);
+		kfree(filter_node);
+	}
 	kfree(evtlog);
 }
diff --git a/drivers/gpu/drm/msm/sde_io_util.c b/drivers/gpu/drm/msm/sde_io_util.c
index 70a4225..d5a438e 100644
--- a/drivers/gpu/drm/msm/sde_io_util.c
+++ b/drivers/gpu/drm/msm/sde_io_util.c
@@ -355,7 +355,11 @@
 	return rc;
 
 error:
-	msm_dss_put_clk(clk_arry, num_clk);
+	for (i--; i >= 0; i--) {
+		if (clk_arry[i].clk)
+			clk_put(clk_arry[i].clk);
+		clk_arry[i].clk = NULL;
+	}
 
 	return rc;
 } /* msm_dss_get_clk */
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index 5157b9c..62efe8e 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -470,6 +470,8 @@
 		if (IS_ERR_OR_NULL(pdbus->data_bus_scale_table)) {
 			pr_err("reg bus handle parsing failed\n");
 			rc = PTR_ERR(pdbus->data_bus_scale_table);
+			if (!pdbus->data_bus_scale_table)
+				rc = -EINVAL;
 			goto end;
 		}
 		pdbus->data_bus_hdl = msm_bus_scale_register_client(
@@ -480,18 +482,6 @@
 			goto end;
 		}
 		pr_debug("register data_bus_hdl=%x\n", pdbus->data_bus_hdl);
-
-		/*
-		 * Following call will not result in actual vote rather update
-		 * the current index and ab/ib value. When continuous splash
-		 * is enabled, actual vote will happen when splash handoff is
-		 * done.
-		 */
-		return _sde_power_data_bus_set_quota(pdbus,
-				SDE_POWER_HANDLE_DATA_BUS_AB_QUOTA,
-				SDE_POWER_HANDLE_DATA_BUS_AB_QUOTA,
-				SDE_POWER_HANDLE_DATA_BUS_IB_QUOTA,
-				SDE_POWER_HANDLE_DATA_BUS_IB_QUOTA);
 	}
 
 end:
@@ -511,6 +501,8 @@
 		if (IS_ERR_OR_NULL(bus_scale_table)) {
 			pr_err("reg bus handle parsing failed\n");
 			rc = PTR_ERR(bus_scale_table);
+			if (!bus_scale_table)
+				rc = -EINVAL;
 			goto end;
 		}
 		phandle->reg_bus_hdl = msm_bus_scale_register_client(
@@ -533,6 +525,31 @@
 		msm_bus_scale_unregister_client(reg_bus_hdl);
 }
 
+static int sde_power_data_bus_update(struct sde_power_data_bus_handle *pdbus,
+							bool enable)
+{
+	int rc = 0;
+	u64 ab_quota_rt, ab_quota_nrt;
+	u64 ib_quota_rt, ib_quota_nrt;
+
+	ab_quota_rt = ab_quota_nrt = enable ?
+			SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA :
+			SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA;
+	ib_quota_rt = ib_quota_nrt = enable ?
+			SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA :
+			SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA;
+
+	if (pdbus->data_bus_hdl)
+		rc = _sde_power_data_bus_set_quota(pdbus, ab_quota_rt,
+				ab_quota_nrt, ib_quota_rt, ib_quota_nrt);
+
+	if (rc)
+		pr_err("failed to set data bus vote rc=%d enable:%d\n",
+							rc, enable);
+
+	return rc;
+}
+
 static int sde_power_reg_bus_update(u32 reg_bus_hdl, u32 usecase_ndx)
 {
 	int rc = 0;
@@ -578,6 +595,12 @@
 {
 	return 0;
 }
+
+static int sde_power_data_bus_update(struct sde_power_data_bus_handle *pdbus,
+							bool enable)
+{
+	return 0;
+}
 #endif
 
 int sde_power_resource_init(struct platform_device *pdev,
@@ -734,6 +757,13 @@
 		goto end;
 
 	if (enable) {
+		rc = sde_power_data_bus_update(&phandle->data_bus_handle,
+									enable);
+		if (rc) {
+			pr_err("failed to set data bus vote rc=%d\n", rc);
+			goto data_bus_hdl_err;
+		}
+
 		rc = msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
 		if (rc) {
 			pr_err("failed to enable vregs rc=%d\n", rc);
@@ -759,6 +789,8 @@
 							max_usecase_ndx);
 
 		msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
+
+		sde_power_data_bus_update(&phandle->data_bus_handle, enable);
 	}
 
 end:
@@ -770,6 +802,8 @@
 reg_bus_hdl_err:
 	msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, 0);
 vreg_err:
+	sde_power_data_bus_update(&phandle->data_bus_handle, 0);
+data_bus_hdl_err:
 	phandle->current_usecase_ndx = prev_usecase_ndx;
 	mutex_unlock(&phandle->phandle_lock);
 	return rc;
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index 4f0348f..4e262a3 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -16,8 +16,12 @@
 
 #define MAX_CLIENT_NAME_LEN 128
 
-#define SDE_POWER_HANDLE_DATA_BUS_IB_QUOTA 2000000000
-#define SDE_POWER_HANDLE_DATA_BUS_AB_QUOTA 2000000000
+#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA	64000
+#define SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA	0
+#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA	64000
+#define SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA	0
+
+#include <linux/sde_io_util.h>
 
 /**
  * mdss_bus_vote_type: register bus vote type
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index b36e17c..a9a7d4f 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -27,7 +27,7 @@
 #include <soc/qcom/rpmh.h>
 #include <drm/drmP.h>
 #include <drm/drm_irq.h>
-#include "sde_rsc.h"
+#include "sde_rsc_priv.h"
 
 /* this time is ~0.02ms */
 #define RSC_BACKOFF_TIME_NS		 20000
@@ -48,6 +48,8 @@
 #define MAX_BUFFER_SIZE 256
 
 #define TRY_CMD_MODE_SWITCH		0xFFFF
+#define TRY_CLK_MODE_SWITCH		0xFFFE
+#define STATE_UPDATE_NOT_ALLOWED	0xFFFD
 
 static struct sde_rsc_priv *rsc_prv_list[MAX_RSC_COUNT];
 
@@ -103,6 +105,7 @@
 
 	return client;
 }
+EXPORT_SYMBOL(sde_rsc_client_create);
 
 /**
  * sde_rsc_client_destroy() - Destroy the sde rsc client.
@@ -139,6 +142,114 @@
 end:
 	return;
 }
+EXPORT_SYMBOL(sde_rsc_client_destroy);
+
+struct sde_rsc_event *sde_rsc_register_event(int rsc_index, uint32_t event_type,
+		void (*cb_func)(uint32_t event_type, void *usr), void *usr)
+{
+	struct sde_rsc_event *evt;
+	struct sde_rsc_priv *rsc;
+
+	if (rsc_index >= MAX_RSC_COUNT) {
+		pr_err("invalid rsc index:%d\n", rsc_index);
+		return ERR_PTR(-EINVAL);
+	} else if (!rsc_prv_list[rsc_index]) {
+		pr_err("rsc idx:%d not probed yet or not available\n",
+								rsc_index);
+		return ERR_PTR(-EINVAL);
+	} else if (!cb_func || !event_type) {
+		pr_err("no event or cb func\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rsc = rsc_prv_list[rsc_index];
+	evt = kzalloc(sizeof(struct sde_rsc_event), GFP_KERNEL);
+	if (!evt)
+		return ERR_PTR(-ENOMEM);
+
+	evt->event_type = event_type;
+	evt->rsc_index = rsc_index;
+	evt->usr = usr;
+	evt->cb_func = cb_func;
+	pr_debug("event register type:%d rsc index:%d\n",
+						event_type, rsc_index);
+
+	mutex_lock(&rsc->client_lock);
+	list_add(&evt->list, &rsc->event_list);
+	mutex_unlock(&rsc->client_lock);
+
+	return evt;
+}
+EXPORT_SYMBOL(sde_rsc_register_event);
+
+void sde_rsc_unregister_event(struct sde_rsc_event *event)
+{
+	struct sde_rsc_priv *rsc;
+
+	if (!event) {
+		pr_debug("invalid event client\n");
+		goto end;
+	} else if (event->rsc_index >= MAX_RSC_COUNT) {
+		pr_err("invalid rsc index\n");
+		goto end;
+	}
+
+	pr_debug("event client destroyed\n");
+	rsc = rsc_prv_list[event->rsc_index];
+	if (!rsc)
+		goto end;
+
+	mutex_lock(&rsc->client_lock);
+	list_del_init(&event->list);
+	mutex_unlock(&rsc->client_lock);
+
+	kfree(event);
+end:
+	return;
+}
+EXPORT_SYMBOL(sde_rsc_unregister_event);
+
+static int sde_rsc_clk_enable(struct sde_power_handle *phandle,
+	struct sde_power_client *pclient, bool enable)
+{
+	int rc = 0;
+	struct dss_module_power *mp;
+
+	if (!phandle || !pclient) {
+		pr_err("invalid input argument\n");
+		return -EINVAL;
+	}
+
+	mp = &phandle->mp;
+
+	if (enable)
+		pclient->refcount++;
+	else if (pclient->refcount)
+		pclient->refcount--;
+
+	if (pclient->refcount)
+		pclient->usecase_ndx = VOTE_INDEX_LOW;
+	else
+		pclient->usecase_ndx = VOTE_INDEX_DISABLE;
+
+	if (phandle->current_usecase_ndx == pclient->usecase_ndx)
+		goto end;
+
+	if (enable) {
+		rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+		if (rc) {
+			pr_err("clock enable failed rc:%d\n", rc);
+			goto end;
+		}
+	} else {
+		msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+	}
+
+	phandle->current_usecase_ndx = pclient->usecase_ndx;
+
+end:
+	return rc;
+}
 
 static u32 sde_rsc_timer_calculate(struct sde_rsc_priv *rsc,
 	struct sde_rsc_cmd_config *cmd_config)
@@ -237,24 +348,50 @@
 static int sde_rsc_switch_to_idle(struct sde_rsc_priv *rsc)
 {
 	struct sde_rsc_client *client;
-	int rc = 0;
+	int rc = STATE_UPDATE_NOT_ALLOWED;
+	bool idle_switch = true;
 
 	list_for_each_entry(client, &rsc->client_list, list)
-		if (client->current_state != SDE_RSC_IDLE_STATE)
-			return TRY_CMD_MODE_SWITCH;
+		if (client->current_state != SDE_RSC_IDLE_STATE) {
+			idle_switch = false;
+			break;
+		}
 
-	if (rsc->hw_ops.state_update)
+	if (!idle_switch) {
+		/**
+		 * following code needs to run the loop through each
+		 * client because they might be in different order
+		 * sorting is not possible; only preference is available
+		 */
+
+		/* first check if any vid client active */
+		list_for_each_entry(client, &rsc->client_list, list)
+			if (client->current_state == SDE_RSC_VID_STATE)
+				return rc;
+
+		/* now try cmd state switch */
+		list_for_each_entry(client, &rsc->client_list, list)
+			if (client->current_state == SDE_RSC_CMD_STATE)
+				return TRY_CMD_MODE_SWITCH;
+
+		/* now try clk state switch */
+		list_for_each_entry(client, &rsc->client_list, list)
+			if (client->current_state == SDE_RSC_CLK_STATE)
+				return TRY_CLK_MODE_SWITCH;
+
+	} else if (rsc->hw_ops.state_update) {
 		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_IDLE_STATE);
+	}
 
 	return rc;
 }
 
-static bool sde_rsc_switch_to_cmd(struct sde_rsc_priv *rsc,
+static int sde_rsc_switch_to_cmd(struct sde_rsc_priv *rsc,
 	struct sde_rsc_cmd_config *config,
 	struct sde_rsc_client *caller_client, bool wait_req)
 {
 	struct sde_rsc_client *client;
-	int rc = 0;
+	int rc = STATE_UPDATE_NOT_ALLOWED;
 
 	if (!rsc->primary_client) {
 		pr_err("primary client not available for cmd state switch\n");
@@ -276,6 +413,12 @@
 		if (client->current_state == SDE_RSC_VID_STATE)
 			goto end;
 
+	/* no need to enable solver again */
+	if (rsc->current_state == SDE_RSC_CLK_STATE) {
+		rc = 0;
+		goto end;
+	}
+
 	if (rsc->hw_ops.state_update)
 		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CMD_STATE);
 
@@ -287,6 +430,28 @@
 	return rc;
 }
 
+static bool sde_rsc_switch_to_clk(struct sde_rsc_priv *rsc)
+{
+	struct sde_rsc_client *client;
+	int rc = STATE_UPDATE_NOT_ALLOWED;
+
+	list_for_each_entry(client, &rsc->client_list, list)
+		if ((client->current_state == SDE_RSC_VID_STATE) ||
+		    (client->current_state == SDE_RSC_CMD_STATE))
+			goto end;
+
+	/* no need to enable the solver again */
+	if (rsc->current_state == SDE_RSC_CMD_STATE) {
+		rc = 0;
+		goto end;
+	}
+
+	if (rsc->hw_ops.state_update)
+		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CMD_STATE);
+end:
+	return rc;
+}
+
 static bool sde_rsc_switch_to_vid(struct sde_rsc_priv *rsc,
 	struct sde_rsc_cmd_config *config,
 	struct sde_rsc_client *caller_client, bool wait_req)
@@ -310,7 +475,7 @@
 
 /**
  * sde_rsc_client_state_update() - rsc client state update
- * Video mode and command mode are supported as modes. A client need to
+ * Video mode, cmd mode and clk state are suppoed as modes. A client need to
  * set this property during panel config time. A switching client can set the
  * property to change the state
  *
@@ -350,8 +515,7 @@
 		pr_err("invalid master component binding\n");
 		rc = -EINVAL;
 		goto end;
-	} else if ((rsc->current_state == state) &&
-				(state != SDE_RSC_CMD_UPDATE_STATE)) {
+	} else if ((rsc->current_state == state) && !config) {
 		pr_debug("no state change: %d\n", state);
 		goto end;
 	}
@@ -360,22 +524,33 @@
 		__builtin_return_address(0), rsc->current_state,
 		caller_client->name, state);
 
-	wait_requested = (rsc->current_state != SDE_RSC_IDLE_STATE);
+	/* only switch state needs vsync wait */
+	wait_requested = (rsc->current_state == SDE_RSC_VID_STATE) ||
+			(rsc->current_state == SDE_RSC_CMD_STATE);
 
 	if (rsc->power_collapse)
-		sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+		sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
 
 	switch (state) {
 	case SDE_RSC_IDLE_STATE:
 		rc = sde_rsc_switch_to_idle(rsc);
+
 		/* video state client might be exiting; try cmd state switch */
-		if (rc == TRY_CMD_MODE_SWITCH)
+		if (rc == TRY_CMD_MODE_SWITCH) {
 			rc = sde_rsc_switch_to_cmd(rsc, NULL,
 					rsc->primary_client, wait_requested);
+			if (!rc)
+				state = SDE_RSC_CMD_STATE;
+
+		/* cmd state client might be exiting; try clk state switch */
+		} else if (rc == TRY_CLK_MODE_SWITCH) {
+			rc = sde_rsc_switch_to_clk(rsc);
+			if (!rc)
+				state = SDE_RSC_CLK_STATE;
+		}
 		break;
 
 	case SDE_RSC_CMD_STATE:
-	case SDE_RSC_CMD_UPDATE_STATE:
 		rc = sde_rsc_switch_to_cmd(rsc, config, caller_client,
 								wait_requested);
 		break;
@@ -385,25 +560,34 @@
 								wait_requested);
 		break;
 
+	case SDE_RSC_CLK_STATE:
+		rc = sde_rsc_switch_to_clk(rsc);
+		break;
+
 	default:
 		pr_err("invalid state handling %d\n", state);
 		break;
 	}
 
-	if (rc) {
+	if (rc == STATE_UPDATE_NOT_ALLOWED) {
+		rc = 0;
+		goto clk_disable;
+	} else if (rc) {
 		pr_err("state update failed rc:%d\n", rc);
-		goto end;
+		goto clk_disable;
 	}
 
 	pr_debug("state switch successfully complete: %d\n", state);
 	rsc->current_state = state;
 
+clk_disable:
 	if (rsc->power_collapse)
-		sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+		sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 end:
 	mutex_unlock(&rsc->client_lock);
 	return rc;
 }
+EXPORT_SYMBOL(sde_rsc_client_state_update);
 
 /**
  * sde_rsc_client_vote() - ab/ib vote from rsc client
@@ -482,6 +666,7 @@
 	mutex_unlock(&rsc->client_lock);
 	return rc;
 }
+EXPORT_SYMBOL(sde_rsc_client_vote);
 
 static int _sde_debugfs_status_show(struct seq_file *s, void *data)
 {
@@ -518,7 +703,7 @@
 		seq_printf(s, "\t client:%s state:%d\n",
 				client->name, client->current_state);
 
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
 
 	if (rsc->hw_ops.debug_show) {
 		ret = rsc->hw_ops.debug_show(s, rsc);
@@ -526,7 +711,7 @@
 			pr_err("sde rsc: hw debug failed ret:%d\n", ret);
 	}
 
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 	mutex_unlock(&rsc->client_lock);
 
 	return 0;
@@ -555,12 +740,12 @@
 		return 0;
 
 	mutex_lock(&rsc->client_lock);
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
 
 	blen = rsc->hw_ops.mode_ctrl(rsc, MODE_READ, buffer,
 							MAX_BUFFER_SIZE, 0);
 
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 	mutex_unlock(&rsc->client_lock);
 
 	if (blen < 0)
@@ -594,7 +779,7 @@
 	input[count - 1] = '\0';
 
 	mutex_lock(&rsc->client_lock);
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
 
 	mode = strnstr(input, "mode0=", strlen("mode0="));
 	if (mode) {
@@ -620,7 +805,7 @@
 	}
 
 end:
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 	mutex_unlock(&rsc->client_lock);
 
 	pr_err("req: mode0:%d mode1:%d mode2:%d\n", mode0_state, mode1_state,
@@ -647,12 +832,12 @@
 		return 0;
 
 	mutex_lock(&rsc->client_lock);
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
 
 	blen = rsc->hw_ops.hw_vsync(rsc, VSYNC_READ, buffer,
 						MAX_BUFFER_SIZE, 0);
 
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 	mutex_unlock(&rsc->client_lock);
 
 	if (blen < 0)
@@ -692,7 +877,7 @@
 	}
 
 	mutex_lock(&rsc->client_lock);
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
 
 	if (vsync_state)
 		rsc->hw_ops.hw_vsync(rsc, VSYNC_ENABLE, NULL,
@@ -700,7 +885,7 @@
 	else
 		rsc->hw_ops.hw_vsync(rsc, VSYNC_DISABLE, NULL, 0, 0);
 
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 	mutex_unlock(&rsc->client_lock);
 
 	kfree(input);
@@ -750,7 +935,7 @@
 		return;
 
 	if (rsc->pclient)
-		sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+		sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 	if (rsc->fs)
 		devm_regulator_put(rsc->fs);
 	if (rsc->wrapper_io.base)
@@ -890,8 +1075,7 @@
 		goto sde_rsc_fail;
 	}
 
-	/* these clocks are always on */
-	if (sde_power_resource_enable(&rsc->phandle, rsc->pclient, true)) {
+	if (sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true)) {
 		pr_err("failed to enable sde rsc power resources\n");
 		goto sde_rsc_fail;
 	}
@@ -899,6 +1083,8 @@
 	if (sde_rsc_timer_calculate(rsc, NULL))
 		goto sde_rsc_fail;
 
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
+
 	INIT_LIST_HEAD(&rsc->client_list);
 	mutex_init(&rsc->client_lock);
 
diff --git a/drivers/gpu/drm/msm/sde_rsc.h b/drivers/gpu/drm/msm/sde_rsc.h
deleted file mode 100644
index e9a55b6..0000000
--- a/drivers/gpu/drm/msm/sde_rsc.h
+++ /dev/null
@@ -1,302 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _SDE_RSC_H_
-#define _SDE_RSC_H_
-
-#include <linux/kernel.h>
-#include <linux/sde_io_util.h>
-
-#include <soc/qcom/tcs.h>
-#include "sde_power_handle.h"
-
-#define SDE_RSC_COMPATIBLE "disp_rscc"
-
-#define MAX_RSC_CLIENT_NAME_LEN 128
-
-/* primary display rsc index */
-#define SDE_RSC_INDEX		0
-
-/* rsc index max count */
-#define MAX_RSC_COUNT		5
-
-struct sde_rsc_priv;
-
-/**
- * rsc_mode_req: sde rsc mode request information
- * MODE_READ: read vsync status
- * MODE0_UPDATE: mode0 status , this should be 0x0
- * MODE1_UPDATE: mode1 status , this should be 0x1
- * MODE2_UPDATE: mode2 status , this should be 0x2
- */
-enum rsc_mode_req {
-	MODE_READ,
-	MODE0_UPDATE = 0x1,
-	MODE1_UPDATE = 0x2,
-	MODE2_UPDATE = 0x3,
-};
-
-/**
- * rsc_vsync_req: sde rsc vsync request information
- * VSYNC_READ: read vsync status
- * VSYNC_ENABLE: enable rsc wrapper vsync status
- * VSYNC_DISABLE: disable rsc wrapper vsync status
- */
-enum rsc_vsync_req {
-	VSYNC_READ,
-	VSYNC_ENABLE,
-	VSYNC_DISABLE,
-};
-
-/**
- * sde_rsc_state: sde rsc state information
- * SDE_RSC_MODE_IDLE: A client requests for idle state when there is no
- *                    pixel or cmd transfer expected. An idle vote from
- *                    all clients lead to power collapse state.
- * SDE_RSC_MODE_CMD:  A client requests for cmd state when it wants to
- *                    enable the solver mode.
- * SDE_RSC_MODE_CMD_UPDATE: A clients requests for cmd_update state when
- *                    it wants to update the backoff time during solver
- *                    enable state. Inline-rotation is one good example
- *                    use case. It increases the prefill lines by 128 lines.
- * SDE_RSC_MODE_VID:  A client requests for vid state it wants to avoid
- *                    solver enable because client is fetching data from
- *                    continuously.
- */
-enum sde_rsc_state {
-	SDE_RSC_IDLE_STATE,
-	SDE_RSC_CMD_STATE,
-	SDE_RSC_CMD_UPDATE_STATE,
-	SDE_RSC_VID_STATE,
-};
-
-/**
- * struct sde_rsc_client: stores the rsc client for sde driver
- * @name:	name of the client
- * @current_state:   current client state
- * @crtc_id:		crtc_id associated with this rsc client.
- * @rsc_index:	rsc index of a client - only index "0" valid.
- * @list:	list to attach power handle master list
- */
-struct sde_rsc_client {
-	char name[MAX_RSC_CLIENT_NAME_LEN];
-	short current_state;
-	int crtc_id;
-	u32 rsc_index;
-	struct list_head list;
-};
-
-/**
- * struct sde_rsc_hw_ops - sde resource state coordinator hardware ops
- * @init:			Initialize the sequencer, solver, qtimer,
-				etc. hardware blocks on RSC.
- * @tcs_wait:			Waits for TCS block OK to allow sending a
- *				TCS command.
- * @hw_vsync:			Enables the vsync on RSC block.
- * @tcs_use_ok:			set TCS set to high to allow RSC to use it.
- * @mode2_entry:		Request to entry mode2 when all clients are
- *                              requesting power collapse.
- * @mode2_exit:			Request to exit mode2 when one of the client
- *                              is requesting against the power collapse
- * @is_amc_mode:		Check current amc mode status
- * @state_update:		Enable/override the solver based on rsc state
- *                              status (command/video)
- * @mode_show:			shows current mode status, mode0/1/2
- * @debug_show:			Show current debug status.
- */
-
-struct sde_rsc_hw_ops {
-	int (*init)(struct sde_rsc_priv *rsc);
-	int (*tcs_wait)(struct sde_rsc_priv *rsc);
-	int (*hw_vsync)(struct sde_rsc_priv *rsc, enum rsc_vsync_req request,
-		char *buffer, int buffer_size, u32 mode);
-	int (*tcs_use_ok)(struct sde_rsc_priv *rsc);
-	int (*mode2_entry)(struct sde_rsc_priv *rsc);
-	int (*mode2_exit)(struct sde_rsc_priv *rsc);
-	bool (*is_amc_mode)(struct sde_rsc_priv *rsc);
-	int (*state_update)(struct sde_rsc_priv *rsc, enum sde_rsc_state state);
-	int (*debug_show)(struct seq_file *s, struct sde_rsc_priv *rsc);
-	int (*mode_ctrl)(struct sde_rsc_priv *rsc, enum rsc_mode_req request,
-		char *buffer, int buffer_size, bool mode);
-};
-
-/**
- * struct sde_rsc_cmd_config: provides panel configuration to rsc
- * when client is command mode. It is not required to set it during
- * video mode.
- *
- * @fps:	panel te interval
- * @vtotal:	current vertical total (height + vbp + vfp)
- * @jitter:	panel can set the jitter to wake up rsc/solver early
- *              This value causes mdp core to exit certain mode
- *              early. Default is 10% jitter
- * @prefill_lines:	max prefill lines based on panel
- */
-struct sde_rsc_cmd_config {
-	u32 fps;
-	u32 vtotal;
-	u32 jitter;
-	u32 prefill_lines;
-};
-
-/**
- * struct sde_rsc_timer_config: this is internal configuration between
- * rsc and rsc_hw API.
- *
- * @static_wakeup_time_ns:	wrapper backoff time in nano seconds
- * @rsc_backoff_time_ns:	rsc backoff time in nano seconds
- * @pdc_backoff_time_ns:	pdc backoff time in nano seconds
- * @rsc_mode_threshold_time_ns:	rsc mode threshold time in nano seconds
- * @rsc_time_slot_0_ns:		mode-0 time slot threshold in nano seconds
- * @rsc_time_slot_1_ns:		mode-1 time slot threshold in nano seconds
- * @rsc_time_slot_2_ns:		mode-2 time slot threshold in nano seconds
- */
-struct sde_rsc_timer_config {
-	u32 static_wakeup_time_ns;
-
-	u32 rsc_backoff_time_ns;
-	u32 pdc_backoff_time_ns;
-	u32 rsc_mode_threshold_time_ns;
-	u32 rsc_time_slot_0_ns;
-	u32 rsc_time_slot_1_ns;
-	u32 rsc_time_slot_2_ns;
-};
-
-/**
- * struct sde_rsc_priv: sde resource state coordinator(rsc) private handle
- * @version:		rsc sequence version
- * @phandle:		module power handle for clocks
- * @pclient:		module power client of phandle
- * @fs:			"MDSS GDSC" handle
- *
- * @drv_io:		sde drv io data mapping
- * @wrapper_io:		wrapper io data mapping
- *
- * @client_list:	current rsc client list handle
- * @client_lock:	current rsc client synchronization lock
- *
- * timer_config:	current rsc timer configuration
- * cmd_config:		current panel config
- * current_state:	current rsc state (video/command), solver
- *                      override/enabled.
- * debug_mode:		enables the logging for each register read/write
- * debugfs_root:	debugfs file system root node
- *
- * hw_ops:		sde rsc hardware operations
- * power_collapse:	if all clients are in IDLE state then it enters in
- *			mode2 state and enable the power collapse state
- * power_collapse_block:By default, rsc move to mode-2 if all clients are in
- *			invalid state. It can be blocked by this boolean entry.
- * primary_client:	A client which is allowed to make command state request
- *			and ab/ib vote on display rsc
- * master_drm:		Primary client waits for vsync on this drm object based
- *			on crtc id
- */
-struct sde_rsc_priv {
-	u32 version;
-	struct sde_power_handle phandle;
-	struct sde_power_client *pclient;
-	struct regulator *fs;
-
-	struct dss_io_data drv_io;
-	struct dss_io_data wrapper_io;
-
-	struct list_head client_list;
-	struct mutex client_lock;
-
-	struct sde_rsc_timer_config timer_config;
-	struct sde_rsc_cmd_config cmd_config;
-	u32	current_state;
-
-	u32 debug_mode;
-	struct dentry *debugfs_root;
-
-	struct sde_rsc_hw_ops hw_ops;
-	bool power_collapse;
-	bool power_collapse_block;
-	struct sde_rsc_client *primary_client;
-
-	struct drm_device *master_drm;
-};
-
-/**
- * sde_rsc_client_create() - create the client for sde rsc.
- * Different displays like DSI, HDMI, DP, WB, etc should call this
- * api to register their vote for rpmh. They still need to vote for
- * power handle to get the clocks.
-
- * @rsc_index:   A client will be created on this RSC. As of now only
- *               SDE_RSC_INDEX is valid rsc index.
- * @name:	 Caller needs to provide some valid string to identify
- *               the client. "primary", "dp", "hdmi" are suggested name.
- * @is_primary:	 Caller needs to provide information if client is primary
- *               or not. Primary client votes will be redirected to
- *               display rsc.
- * @config:	 fps, vtotal, porches, etc configuration for command mode
- *               panel
- *
- * Return: client node pointer.
- */
-struct sde_rsc_client *sde_rsc_client_create(u32 rsc_index, char *name,
-		bool is_primary_display);
-
-/**
- * sde_rsc_client_destroy() - Destroy the sde rsc client.
- *
- * @client:	 Client pointer provided by sde_rsc_client_create().
- *
- * Return: none
- */
-void sde_rsc_client_destroy(struct sde_rsc_client *client);
-
-/**
- * sde_rsc_client_state_update() - rsc client state update
- * Video mode and command mode are supported as modes. A client need to
- * set this property during panel time. A switching client can set the
- * property to change the state
- *
- * @client:	 Client pointer provided by sde_rsc_client_create().
- * @state:	 Client state - video/cmd
- * @config:	 fps, vtotal, porches, etc configuration for command mode
- *               panel
- * @crtc_id:	 current client's crtc id
- *
- * Return: error code.
- */
-int sde_rsc_client_state_update(struct sde_rsc_client *client,
-	enum sde_rsc_state state,
-	struct sde_rsc_cmd_config *config, int crtc_id);
-
-/**
- * sde_rsc_client_vote() - ab/ib vote from rsc client
- *
- * @client:	 Client pointer provided by sde_rsc_client_create().
- * @ab:		 aggregated bandwidth vote from client.
- * @ib:		 instant bandwidth vote from client.
- *
- * Return: error code.
- */
-int sde_rsc_client_vote(struct sde_rsc_client *caller_client,
-	u64 ab_vote, u64 ib_vote);
-
-/**
- * sde_rsc_hw_register() - register hardware API
- *
- * @client:	 Client pointer provided by sde_rsc_client_create().
- *
- * Return: error code.
- */
-int sde_rsc_hw_register(struct sde_rsc_priv *rsc);
-
-
-#endif /* _SDE_RSC_H_ */
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index 8dd04bd..fb963ee 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -17,7 +17,7 @@
 #include <linux/debugfs.h>
 #include <linux/delay.h>
 
-#include "sde_rsc.h"
+#include "sde_rsc_priv.h"
 
 /* display rsc offset */
 #define SDE_RSCC_PDC_SEQ_START_ADDR_REG_OFFSET_DRV0	0x020
@@ -93,15 +93,17 @@
 #define SDE_RSCC_F0_QTMR_V1_CNTP_CTL			0x202C
 #define SDE_RSCC_F1_QTMR_V1_CNTP_CTL			0x302C
 
-/* mdp and dsi clocks in clock gate state */
-#define DISP_MDP_DSI_CLK_GATE		0x7f0
-
-/* mdp and dsi clocks in clock ungate state */
-#define MDSS_CORE_GDSCR			0x0
-#define DISP_MDP_DSI_CLK_UNGATE		0x5000
-
 #define MAX_CHECK_LOOPS			500
 
+static void rsc_event_trigger(struct sde_rsc_priv *rsc, uint32_t event_type)
+{
+	struct sde_rsc_event *event;
+
+	list_for_each_entry(event, &rsc->event_list, list)
+		if (event->event_type & event_type)
+			event->cb_func(event_type, event->usr);
+}
+
 static int rsc_hw_qtimer_init(struct sde_rsc_priv *rsc)
 {
 	pr_debug("rsc hardware qtimer init\n");
@@ -182,31 +184,33 @@
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x10,
 						0x888babec, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x14,
-						0xaaa8a020, rsc->debug_mode);
+						0xa806a020, rsc->debug_mode);
 
 	/* Mode - 2 sequence */
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x18,
-						0xe1a138eb, rsc->debug_mode);
+						0xa138ebaa, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x1c,
-						0xa2ede081, rsc->debug_mode);
+						0xe0a581e1, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x20,
-						0x8a3982e2, rsc->debug_mode);
+						0x82e2a2ed, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x24,
-						0xa92088ea, rsc->debug_mode);
+						0x88ea8a39, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x28,
-						0x89e6a6e9, rsc->debug_mode);
+						0xa6e9a920, rsc->debug_mode);
 
 	/* tcs sleep sequence */
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x2c,
-						0xa7e9a920, rsc->debug_mode);
+						0xa92089e6, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x30,
-						0x002089e7, rsc->debug_mode);
+						0x89e7a7e9, rsc->debug_mode);
+	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x34,
+						0x00000020, rsc->debug_mode);
 
 	/* branch address */
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_0_DRV0,
-						0x27, rsc->debug_mode);
+						0x29, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_1_DRV0,
-						0x2d, rsc->debug_mode);
+						0x2f, rsc->debug_mode);
 
 	return 0;
 }
@@ -297,10 +301,13 @@
 	rc = regulator_set_mode(rsc->fs, REGULATOR_MODE_FAST);
 	if (rc) {
 		pr_err("vdd reg fast mode set failed rc:%d\n", rc);
-		goto end;
+		return rc;
 	}
 
 	rc = -EBUSY;
+
+	rsc_event_trigger(rsc, SDE_RSC_EVENT_PRE_CORE_PC);
+
 	wrapper_status = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
 				rsc->debug_mode);
 	wrapper_status |= BIT(3);
@@ -319,10 +326,20 @@
 		usleep_range(1, 2);
 	}
 
-	if (rc)
+	if (rc) {
 		pr_err("vdd fs is still enabled\n");
+		goto end;
+	}
+
+	rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_PC);
+
+	return 0;
 
 end:
+	regulator_set_mode(rsc->fs, REGULATOR_MODE_NORMAL);
+
+	rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_RESTORE);
+
 	return rc;
 }
 
@@ -331,6 +348,8 @@
 	int rc = -EBUSY;
 	int count, reg;
 
+	rsc_event_trigger(rsc, SDE_RSC_EVENT_PRE_CORE_RESTORE);
+
 	// needs review with HPG sequence
 	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_LO,
 					0x0, rsc->debug_mode);
@@ -374,6 +393,8 @@
 	if (rc)
 		pr_err("vdd reg normal mode set failed rc:%d\n", rc);
 
+	rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_RESTORE);
+
 	return rc;
 }
 
@@ -407,6 +428,8 @@
 							reg, rsc->debug_mode);
 		/* make sure that solver is enabled */
 		wmb();
+
+		rsc_event_trigger(rsc, SDE_RSC_EVENT_SOLVER_ENABLED);
 		break;
 
 	case SDE_RSC_VID_STATE:
@@ -424,6 +447,8 @@
 							0x1, rsc->debug_mode);
 		/* make sure that solver mode is override */
 		wmb();
+
+		rsc_event_trigger(rsc, SDE_RSC_EVENT_SOLVER_DISABLED);
 		break;
 
 	case SDE_RSC_IDLE_STATE:
diff --git a/drivers/gpu/drm/msm/sde_rsc_priv.h b/drivers/gpu/drm/msm/sde_rsc_priv.h
new file mode 100644
index 0000000..2563c85
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_rsc_priv.h
@@ -0,0 +1,181 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_RSC_PRIV_H_
+#define _SDE_RSC_PRIV_H_
+
+#include <linux/kernel.h>
+#include <linux/sde_io_util.h>
+#include <linux/sde_rsc.h>
+
+#include <soc/qcom/tcs.h>
+#include "sde_power_handle.h"
+
+#define SDE_RSC_COMPATIBLE "disp_rscc"
+
+#define MAX_RSC_COUNT		5
+
+struct sde_rsc_priv;
+
+/**
+ * rsc_mode_req: sde rsc mode request information
+ * MODE_READ: read vsync status
+ * MODE0_UPDATE: mode0 status , this should be 0x0
+ * MODE1_UPDATE: mode1 status , this should be 0x1
+ * MODE2_UPDATE: mode2 status , this should be 0x2
+ */
+enum rsc_mode_req {
+	MODE_READ,
+	MODE0_UPDATE = 0x1,
+	MODE1_UPDATE = 0x2,
+	MODE2_UPDATE = 0x3,
+};
+
+/**
+ * rsc_vsync_req: sde rsc vsync request information
+ * VSYNC_READ: read vsync status
+ * VSYNC_ENABLE: enable rsc wrapper vsync status
+ * VSYNC_DISABLE: disable rsc wrapper vsync status
+ */
+enum rsc_vsync_req {
+	VSYNC_READ,
+	VSYNC_ENABLE,
+	VSYNC_DISABLE,
+};
+
+/**
+ * struct sde_rsc_hw_ops - sde resource state coordinator hardware ops
+ * @init:			Initialize the sequencer, solver, qtimer,
+				etc. hardware blocks on RSC.
+ * @tcs_wait:			Waits for TCS block OK to allow sending a
+ *				TCS command.
+ * @hw_vsync:			Enables the vsync on RSC block.
+ * @tcs_use_ok:			set TCS set to high to allow RSC to use it.
+ * @mode2_entry:		Request to entry mode2 when all clients are
+ *                              requesting power collapse.
+ * @mode2_exit:			Request to exit mode2 when one of the client
+ *                              is requesting against the power collapse
+ * @is_amc_mode:		Check current amc mode status
+ * @state_update:		Enable/override the solver based on rsc state
+ *                              status (command/video)
+ * @mode_show:			shows current mode status, mode0/1/2
+ * @debug_show:			Show current debug status.
+ */
+
+struct sde_rsc_hw_ops {
+	int (*init)(struct sde_rsc_priv *rsc);
+	int (*tcs_wait)(struct sde_rsc_priv *rsc);
+	int (*hw_vsync)(struct sde_rsc_priv *rsc, enum rsc_vsync_req request,
+		char *buffer, int buffer_size, u32 mode);
+	int (*tcs_use_ok)(struct sde_rsc_priv *rsc);
+	int (*mode2_entry)(struct sde_rsc_priv *rsc);
+	int (*mode2_exit)(struct sde_rsc_priv *rsc);
+	bool (*is_amc_mode)(struct sde_rsc_priv *rsc);
+	int (*state_update)(struct sde_rsc_priv *rsc, enum sde_rsc_state state);
+	int (*debug_show)(struct seq_file *s, struct sde_rsc_priv *rsc);
+	int (*mode_ctrl)(struct sde_rsc_priv *rsc, enum rsc_mode_req request,
+		char *buffer, int buffer_size, bool mode);
+};
+
+/**
+ * struct sde_rsc_timer_config: this is internal configuration between
+ * rsc and rsc_hw API.
+ *
+ * @static_wakeup_time_ns:	wrapper backoff time in nano seconds
+ * @rsc_backoff_time_ns:	rsc backoff time in nano seconds
+ * @pdc_backoff_time_ns:	pdc backoff time in nano seconds
+ * @rsc_mode_threshold_time_ns:	rsc mode threshold time in nano seconds
+ * @rsc_time_slot_0_ns:		mode-0 time slot threshold in nano seconds
+ * @rsc_time_slot_1_ns:		mode-1 time slot threshold in nano seconds
+ * @rsc_time_slot_2_ns:		mode-2 time slot threshold in nano seconds
+ */
+struct sde_rsc_timer_config {
+	u32 static_wakeup_time_ns;
+
+	u32 rsc_backoff_time_ns;
+	u32 pdc_backoff_time_ns;
+	u32 rsc_mode_threshold_time_ns;
+	u32 rsc_time_slot_0_ns;
+	u32 rsc_time_slot_1_ns;
+	u32 rsc_time_slot_2_ns;
+};
+
+/**
+ * struct sde_rsc_priv: sde resource state coordinator(rsc) private handle
+ * @version:		rsc sequence version
+ * @phandle:		module power handle for clocks
+ * @pclient:		module power client of phandle
+ * @fs:			"MDSS GDSC" handle
+ *
+ * @drv_io:		sde drv io data mapping
+ * @wrapper_io:		wrapper io data mapping
+ *
+ * @client_list:	current rsc client list handle
+ * @event_list:		current rsc event list handle
+ * @client_lock:	current rsc client synchronization lock
+ *
+ * timer_config:	current rsc timer configuration
+ * cmd_config:		current panel config
+ * current_state:	current rsc state (video/command), solver
+ *                      override/enabled.
+ * debug_mode:		enables the logging for each register read/write
+ * debugfs_root:	debugfs file system root node
+ *
+ * hw_ops:		sde rsc hardware operations
+ * power_collapse:	if all clients are in IDLE state then it enters in
+ *			mode2 state and enable the power collapse state
+ * power_collapse_block:By default, rsc move to mode-2 if all clients are in
+ *			invalid state. It can be blocked by this boolean entry.
+ * primary_client:	A client which is allowed to make command state request
+ *			and ab/ib vote on display rsc
+ * master_drm:		Primary client waits for vsync on this drm object based
+ *			on crtc id
+ */
+struct sde_rsc_priv {
+	u32 version;
+	struct sde_power_handle phandle;
+	struct sde_power_client *pclient;
+	struct regulator *fs;
+
+	struct dss_io_data drv_io;
+	struct dss_io_data wrapper_io;
+
+	struct list_head client_list;
+	struct list_head event_list;
+	struct mutex client_lock;
+
+	struct sde_rsc_timer_config timer_config;
+	struct sde_rsc_cmd_config cmd_config;
+	u32	current_state;
+
+	u32 debug_mode;
+	struct dentry *debugfs_root;
+
+	struct sde_rsc_hw_ops hw_ops;
+	bool power_collapse;
+	bool power_collapse_block;
+	struct sde_rsc_client *primary_client;
+
+	struct drm_device *master_drm;
+};
+
+/**
+ * sde_rsc_hw_register() - register hardware API
+ *
+ * @client:	 Client pointer provided by sde_rsc_client_create().
+ *
+ * Return: error code.
+ */
+int sde_rsc_hw_register(struct sde_rsc_priv *rsc);
+
+#endif /* _SDE_RSC_PRIV_H_ */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 77a52b5..70f0344 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -95,9 +95,11 @@
 nvkm-y += nvkm/engine/disp/cursgt215.o
 nvkm-y += nvkm/engine/disp/cursgf119.o
 nvkm-y += nvkm/engine/disp/cursgk104.o
+nvkm-y += nvkm/engine/disp/cursgp102.o
 
 nvkm-y += nvkm/engine/disp/oimmnv50.o
 nvkm-y += nvkm/engine/disp/oimmg84.o
 nvkm-y += nvkm/engine/disp/oimmgt215.o
 nvkm-y += nvkm/engine/disp/oimmgf119.o
 nvkm-y += nvkm/engine/disp/oimmgk104.o
+nvkm-y += nvkm/engine/disp/oimmgp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index dd2953b..9d90d8b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -82,7 +82,7 @@
 
 			if (mthd->addr) {
 				snprintf(cname_, sizeof(cname_), "%s %d",
-					 mthd->name, chan->chid);
+					 mthd->name, chan->chid.user);
 				cname = cname_;
 			}
 
@@ -139,7 +139,7 @@
 	if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
 		notify->size  = sizeof(struct nvif_notify_uevent_rep);
 		notify->types = 1;
-		notify->index = chan->chid;
+		notify->index = chan->chid.user;
 		return 0;
 	}
 
@@ -159,7 +159,7 @@
 	struct nv50_disp_chan *chan = nv50_disp_chan(object);
 	struct nv50_disp *disp = chan->root->disp;
 	struct nvkm_device *device = disp->base.engine.subdev.device;
-	*data = nvkm_rd32(device, 0x640000 + (chan->chid * 0x1000) + addr);
+	*data = nvkm_rd32(device, 0x640000 + (chan->chid.user * 0x1000) + addr);
 	return 0;
 }
 
@@ -169,7 +169,7 @@
 	struct nv50_disp_chan *chan = nv50_disp_chan(object);
 	struct nv50_disp *disp = chan->root->disp;
 	struct nvkm_device *device = disp->base.engine.subdev.device;
-	nvkm_wr32(device, 0x640000 + (chan->chid * 0x1000) + addr, data);
+	nvkm_wr32(device, 0x640000 + (chan->chid.user * 0x1000) + addr, data);
 	return 0;
 }
 
@@ -196,7 +196,7 @@
 	struct nv50_disp *disp = chan->root->disp;
 	struct nvkm_device *device = disp->base.engine.subdev.device;
 	*addr = device->func->resource_addr(device, 0) +
-		0x640000 + (chan->chid * 0x1000);
+		0x640000 + (chan->chid.user * 0x1000);
 	*size = 0x001000;
 	return 0;
 }
@@ -243,8 +243,8 @@
 {
 	struct nv50_disp_chan *chan = nv50_disp_chan(object);
 	struct nv50_disp *disp = chan->root->disp;
-	if (chan->chid >= 0)
-		disp->chan[chan->chid] = NULL;
+	if (chan->chid.user >= 0)
+		disp->chan[chan->chid.user] = NULL;
 	return chan->func->dtor ? chan->func->dtor(chan) : chan;
 }
 
@@ -263,7 +263,7 @@
 int
 nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
 		    const struct nv50_disp_chan_mthd *mthd,
-		    struct nv50_disp_root *root, int chid, int head,
+		    struct nv50_disp_root *root, int ctrl, int user, int head,
 		    const struct nvkm_oclass *oclass,
 		    struct nv50_disp_chan *chan)
 {
@@ -273,21 +273,22 @@
 	chan->func = func;
 	chan->mthd = mthd;
 	chan->root = root;
-	chan->chid = chid;
+	chan->chid.ctrl = ctrl;
+	chan->chid.user = user;
 	chan->head = head;
 
-	if (disp->chan[chan->chid]) {
-		chan->chid = -1;
+	if (disp->chan[chan->chid.user]) {
+		chan->chid.user = -1;
 		return -EBUSY;
 	}
-	disp->chan[chan->chid] = chan;
+	disp->chan[chan->chid.user] = chan;
 	return 0;
 }
 
 int
 nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
 		    const struct nv50_disp_chan_mthd *mthd,
-		    struct nv50_disp_root *root, int chid, int head,
+		    struct nv50_disp_root *root, int ctrl, int user, int head,
 		    const struct nvkm_oclass *oclass,
 		    struct nvkm_object **pobject)
 {
@@ -297,5 +298,6 @@
 		return -ENOMEM;
 	*pobject = &chan->object;
 
-	return nv50_disp_chan_ctor(func, mthd, root, chid, head, oclass, chan);
+	return nv50_disp_chan_ctor(func, mthd, root, ctrl, user,
+				   head, oclass, chan);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index f5f683d..737b38f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -7,7 +7,11 @@
 	const struct nv50_disp_chan_func *func;
 	const struct nv50_disp_chan_mthd *mthd;
 	struct nv50_disp_root *root;
-	int chid;
+
+	struct {
+		int ctrl;
+		int user;
+	} chid;
 	int head;
 
 	struct nvkm_object object;
@@ -25,11 +29,11 @@
 
 int nv50_disp_chan_ctor(const struct nv50_disp_chan_func *,
 			const struct nv50_disp_chan_mthd *,
-			struct nv50_disp_root *, int chid, int head,
+			struct nv50_disp_root *, int ctrl, int user, int head,
 			const struct nvkm_oclass *, struct nv50_disp_chan *);
 int nv50_disp_chan_new_(const struct nv50_disp_chan_func *,
 			const struct nv50_disp_chan_mthd *,
-			struct nv50_disp_root *, int chid, int head,
+			struct nv50_disp_root *, int ctrl, int user, int head,
 			const struct nvkm_oclass *, struct nvkm_object **);
 
 extern const struct nv50_disp_chan_func nv50_disp_pioc_func;
@@ -90,13 +94,16 @@
 struct nv50_disp_pioc_oclass {
 	int (*ctor)(const struct nv50_disp_chan_func *,
 		    const struct nv50_disp_chan_mthd *,
-		    struct nv50_disp_root *, int chid,
+		    struct nv50_disp_root *, int ctrl, int user,
 		    const struct nvkm_oclass *, void *data, u32 size,
 		    struct nvkm_object **);
 	struct nvkm_sclass base;
 	const struct nv50_disp_chan_func *func;
 	const struct nv50_disp_chan_mthd *mthd;
-	int chid;
+	struct {
+		int ctrl;
+		int user;
+	} chid;
 };
 
 extern const struct nv50_disp_pioc_oclass nv50_disp_oimm_oclass;
@@ -114,15 +121,17 @@
 extern const struct nv50_disp_pioc_oclass gk104_disp_oimm_oclass;
 extern const struct nv50_disp_pioc_oclass gk104_disp_curs_oclass;
 
+extern const struct nv50_disp_pioc_oclass gp102_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass gp102_disp_curs_oclass;
 
 int nv50_disp_curs_new(const struct nv50_disp_chan_func *,
 		       const struct nv50_disp_chan_mthd *,
-		       struct nv50_disp_root *, int chid,
+		       struct nv50_disp_root *, int ctrl, int user,
 		       const struct nvkm_oclass *, void *data, u32 size,
 		       struct nvkm_object **);
 int nv50_disp_oimm_new(const struct nv50_disp_chan_func *,
 		       const struct nv50_disp_chan_mthd *,
-		       struct nv50_disp_root *, int chid,
+		       struct nv50_disp_root *, int ctrl, int user,
 		       const struct nvkm_oclass *, void *data, u32 size,
 		       struct nvkm_object **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
index dd99fc7..fa781b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
@@ -33,5 +33,5 @@
 	.base.maxver = 0,
 	.ctor = nv50_disp_curs_new,
 	.func = &nv50_disp_pioc_func,
-	.chid = 7,
+	.chid = { 7, 7 },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
index 2a1574e..2be6fb0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
@@ -33,5 +33,5 @@
 	.base.maxver = 0,
 	.ctor = nv50_disp_curs_new,
 	.func = &gf119_disp_pioc_func,
-	.chid = 13,
+	.chid = { 13, 13 },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
index 28e8f06..2a99db4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
@@ -33,5 +33,5 @@
 	.base.maxver = 0,
 	.ctor = nv50_disp_curs_new,
 	.func = &gf119_disp_pioc_func,
-	.chid = 13,
+	.chid = { 13, 13 },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c
new file mode 100644
index 0000000..e958210
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gp102_disp_curs_oclass = {
+	.base.oclass = GK104_DISP_CURSOR,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_curs_new,
+	.func = &gf119_disp_pioc_func,
+	.chid = { 13, 17 },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
index d8a4b9c..00a7f35 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
@@ -33,5 +33,5 @@
 	.base.maxver = 0,
 	.ctor = nv50_disp_curs_new,
 	.func = &nv50_disp_pioc_func,
-	.chid = 7,
+	.chid = { 7, 7 },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
index 8b13204..82ff82d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
@@ -33,7 +33,7 @@
 int
 nv50_disp_curs_new(const struct nv50_disp_chan_func *func,
 		   const struct nv50_disp_chan_mthd *mthd,
-		   struct nv50_disp_root *root, int chid,
+		   struct nv50_disp_root *root, int ctrl, int user,
 		   const struct nvkm_oclass *oclass, void *data, u32 size,
 		   struct nvkm_object **pobject)
 {
@@ -54,7 +54,7 @@
 	} else
 		return ret;
 
-	return nv50_disp_chan_new_(func, mthd, root, chid + head,
+	return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head,
 				   head, oclass, pobject);
 }
 
@@ -65,5 +65,5 @@
 	.base.maxver = 0,
 	.ctor = nv50_disp_curs_new,
 	.func = &nv50_disp_pioc_func,
-	.chid = 7,
+	.chid = { 7, 7 },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
index a57f7ce..ce7cd74 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
@@ -32,8 +32,8 @@
 		     struct nvkm_object *object, u32 handle)
 {
 	return nvkm_ramht_insert(chan->base.root->ramht, object,
-				 chan->base.chid, -9, handle,
-				 chan->base.chid << 27 | 0x00000001);
+				 chan->base.chid.user, -9, handle,
+				 chan->base.chid.user << 27 | 0x00000001);
 }
 
 void
@@ -42,22 +42,23 @@
 	struct nv50_disp *disp = chan->base.root->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int chid = chan->base.chid;
+	int ctrl = chan->base.chid.ctrl;
+	int user = chan->base.chid.user;
 
 	/* deactivate channel */
-	nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
-	nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
+	nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00001010, 0x00001000);
+	nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000003, 0x00000000);
 	if (nvkm_msec(device, 2000,
-		if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x001e0000))
+		if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x001e0000))
 			break;
 	) < 0) {
-		nvkm_error(subdev, "ch %d fini: %08x\n", chid,
-			   nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+		nvkm_error(subdev, "ch %d fini: %08x\n", user,
+			   nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
 	}
 
 	/* disable error reporting and completion notification */
-	nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
-	nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
+	nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000);
+	nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000);
 }
 
 static int
@@ -66,26 +67,27 @@
 	struct nv50_disp *disp = chan->base.root->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int chid = chan->base.chid;
+	int ctrl = chan->base.chid.ctrl;
+	int user = chan->base.chid.user;
 
 	/* enable error reporting */
-	nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+	nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
 
 	/* initialise channel for dma command submission */
-	nvkm_wr32(device, 0x610494 + (chid * 0x0010), chan->push);
-	nvkm_wr32(device, 0x610498 + (chid * 0x0010), 0x00010000);
-	nvkm_wr32(device, 0x61049c + (chid * 0x0010), 0x00000001);
-	nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
-	nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
-	nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
+	nvkm_wr32(device, 0x610494 + (ctrl * 0x0010), chan->push);
+	nvkm_wr32(device, 0x610498 + (ctrl * 0x0010), 0x00010000);
+	nvkm_wr32(device, 0x61049c + (ctrl * 0x0010), 0x00000001);
+	nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
+	nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
+	nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
 
 	/* wait for it to go inactive */
 	if (nvkm_msec(device, 2000,
-		if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
+		if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000))
 			break;
 	) < 0) {
-		nvkm_error(subdev, "ch %d init: %08x\n", chid,
-			   nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+		nvkm_error(subdev, "ch %d init: %08x\n", user,
+			   nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
 		return -EBUSY;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
index ad24c2c..d26d3b4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
@@ -32,26 +32,27 @@
 	struct nv50_disp *disp = chan->base.root->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int chid = chan->base.chid;
+	int ctrl = chan->base.chid.ctrl;
+	int user = chan->base.chid.user;
 
 	/* enable error reporting */
-	nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+	nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
 
 	/* initialise channel for dma command submission */
-	nvkm_wr32(device, 0x611494 + (chid * 0x0010), chan->push);
-	nvkm_wr32(device, 0x611498 + (chid * 0x0010), 0x00010000);
-	nvkm_wr32(device, 0x61149c + (chid * 0x0010), 0x00000001);
-	nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
-	nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
-	nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
+	nvkm_wr32(device, 0x611494 + (ctrl * 0x0010), chan->push);
+	nvkm_wr32(device, 0x611498 + (ctrl * 0x0010), 0x00010000);
+	nvkm_wr32(device, 0x61149c + (ctrl * 0x0010), 0x00000001);
+	nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
+	nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
+	nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
 
 	/* wait for it to go inactive */
 	if (nvkm_msec(device, 2000,
-		if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
+		if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000))
 			break;
 	) < 0) {
-		nvkm_error(subdev, "ch %d init: %08x\n", chid,
-			   nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+		nvkm_error(subdev, "ch %d init: %08x\n", user,
+			   nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
 		return -EBUSY;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
index 9c6645a..0a1381a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
@@ -149,7 +149,7 @@
 	chan->func = func;
 
 	ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, root,
-				  chid, head, oclass, &chan->base);
+				  chid, chid, head, oclass, &chan->base);
 	if (ret)
 		return ret;
 
@@ -179,9 +179,9 @@
 		    struct nvkm_object *object, u32 handle)
 {
 	return nvkm_ramht_insert(chan->base.root->ramht, object,
-				 chan->base.chid, -10, handle,
-				 chan->base.chid << 28 |
-				 chan->base.chid);
+				 chan->base.chid.user, -10, handle,
+				 chan->base.chid.user << 28 |
+				 chan->base.chid.user);
 }
 
 static void
@@ -190,21 +190,22 @@
 	struct nv50_disp *disp = chan->base.root->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int chid = chan->base.chid;
+	int ctrl = chan->base.chid.ctrl;
+	int user = chan->base.chid.user;
 
 	/* deactivate channel */
-	nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
-	nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
+	nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00001010, 0x00001000);
+	nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000003, 0x00000000);
 	if (nvkm_msec(device, 2000,
-		if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000))
+		if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x001e0000))
 			break;
 	) < 0) {
-		nvkm_error(subdev, "ch %d fini timeout, %08x\n", chid,
-			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+		nvkm_error(subdev, "ch %d fini timeout, %08x\n", user,
+			   nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
 	}
 
 	/* disable error reporting and completion notifications */
-	nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
+	nvkm_mask(device, 0x610028, 0x00010001 << user, 0x00000000 << user);
 }
 
 static int
@@ -213,26 +214,27 @@
 	struct nv50_disp *disp = chan->base.root->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int chid = chan->base.chid;
+	int ctrl = chan->base.chid.ctrl;
+	int user = chan->base.chid.user;
 
 	/* enable error reporting */
-	nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid);
+	nvkm_mask(device, 0x610028, 0x00010000 << user, 0x00010000 << user);
 
 	/* initialise channel for dma command submission */
-	nvkm_wr32(device, 0x610204 + (chid * 0x0010), chan->push);
-	nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000);
-	nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid);
-	nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
-	nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
-	nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013);
+	nvkm_wr32(device, 0x610204 + (ctrl * 0x0010), chan->push);
+	nvkm_wr32(device, 0x610208 + (ctrl * 0x0010), 0x00010000);
+	nvkm_wr32(device, 0x61020c + (ctrl * 0x0010), ctrl);
+	nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000010, 0x00000010);
+	nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
+	nvkm_wr32(device, 0x610200 + (ctrl * 0x0010), 0x00000013);
 
 	/* wait for it to go inactive */
 	if (nvkm_msec(device, 2000,
-		if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000))
+		if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x80000000))
 			break;
 	) < 0) {
-		nvkm_error(subdev, "ch %d init timeout, %08x\n", chid,
-			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+		nvkm_error(subdev, "ch %d init timeout, %08x\n", user,
+			   nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
 		return -EBUSY;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
index 54a4ae8..5ad5d0f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
@@ -33,5 +33,5 @@
 	.base.maxver = 0,
 	.ctor = nv50_disp_oimm_new,
 	.func = &nv50_disp_pioc_func,
-	.chid = 5,
+	.chid = { 5, 5 },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
index c658db5..1f9fd34 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
@@ -33,5 +33,5 @@
 	.base.maxver = 0,
 	.ctor = nv50_disp_oimm_new,
 	.func = &gf119_disp_pioc_func,
-	.chid = 9,
+	.chid = { 9, 9 },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
index b1fde8c..0c09fe8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
@@ -33,5 +33,5 @@
 	.base.maxver = 0,
 	.ctor = nv50_disp_oimm_new,
 	.func = &gf119_disp_pioc_func,
-	.chid = 9,
+	.chid = { 9, 9 },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c
new file mode 100644
index 0000000..abf8236
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gp102_disp_oimm_oclass = {
+	.base.oclass = GK104_DISP_OVERLAY,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_oimm_new,
+	.func = &gf119_disp_pioc_func,
+	.chid = { 9, 13 },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
index f4e7eb3..1281db2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
@@ -33,5 +33,5 @@
 	.base.maxver = 0,
 	.ctor = nv50_disp_oimm_new,
 	.func = &nv50_disp_pioc_func,
-	.chid = 5,
+	.chid = { 5, 5 },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
index 3940b9c..07540f3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
@@ -33,7 +33,7 @@
 int
 nv50_disp_oimm_new(const struct nv50_disp_chan_func *func,
 		   const struct nv50_disp_chan_mthd *mthd,
-		   struct nv50_disp_root *root, int chid,
+		   struct nv50_disp_root *root, int ctrl, int user,
 		   const struct nvkm_oclass *oclass, void *data, u32 size,
 		   struct nvkm_object **pobject)
 {
@@ -54,7 +54,7 @@
 	} else
 		return ret;
 
-	return nv50_disp_chan_new_(func, mthd, root, chid + head,
+	return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head,
 				   head, oclass, pobject);
 }
 
@@ -65,5 +65,5 @@
 	.base.maxver = 0,
 	.ctor = nv50_disp_oimm_new,
 	.func = &nv50_disp_pioc_func,
-	.chid = 5,
+	.chid = { 5, 5 },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
index a625a98..0abaa64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
@@ -32,20 +32,21 @@
 	struct nv50_disp *disp = chan->root->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int chid = chan->chid;
+	int ctrl = chan->chid.ctrl;
+	int user = chan->chid.user;
 
-	nvkm_mask(device, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x610490 + (ctrl * 0x10), 0x00000001, 0x00000000);
 	if (nvkm_msec(device, 2000,
-		if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x00030000))
+		if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x00030000))
 			break;
 	) < 0) {
-		nvkm_error(subdev, "ch %d fini: %08x\n", chid,
-			   nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+		nvkm_error(subdev, "ch %d fini: %08x\n", user,
+			   nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
 	}
 
 	/* disable error reporting and completion notification */
-	nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
-	nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
+	nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000);
+	nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000);
 }
 
 static int
@@ -54,20 +55,21 @@
 	struct nv50_disp *disp = chan->root->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int chid = chan->chid;
+	int ctrl = chan->chid.ctrl;
+	int user = chan->chid.user;
 
 	/* enable error reporting */
-	nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+	nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
 
 	/* activate channel */
-	nvkm_wr32(device, 0x610490 + (chid * 0x10), 0x00000001);
+	nvkm_wr32(device, 0x610490 + (ctrl * 0x10), 0x00000001);
 	if (nvkm_msec(device, 2000,
-		u32 tmp = nvkm_rd32(device, 0x610490 + (chid * 0x10));
+		u32 tmp = nvkm_rd32(device, 0x610490 + (ctrl * 0x10));
 		if ((tmp & 0x00030000) == 0x00010000)
 			break;
 	) < 0) {
-		nvkm_error(subdev, "ch %d init: %08x\n", chid,
-			   nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+		nvkm_error(subdev, "ch %d init: %08x\n", user,
+			   nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
 		return -EBUSY;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
index 9d2618d..0211e0e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
@@ -32,15 +32,16 @@
 	struct nv50_disp *disp = chan->root->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int chid = chan->chid;
+	int ctrl = chan->chid.ctrl;
+	int user = chan->chid.user;
 
-	nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x610200 + (ctrl * 0x10), 0x00000001, 0x00000000);
 	if (nvkm_msec(device, 2000,
-		if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
+		if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
 			break;
 	) < 0) {
-		nvkm_error(subdev, "ch %d timeout: %08x\n", chid,
-			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+		nvkm_error(subdev, "ch %d timeout: %08x\n", user,
+			   nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
 	}
 }
 
@@ -50,26 +51,27 @@
 	struct nv50_disp *disp = chan->root->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int chid = chan->chid;
+	int ctrl = chan->chid.ctrl;
+	int user = chan->chid.user;
 
-	nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000);
+	nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00002000);
 	if (nvkm_msec(device, 2000,
-		if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
+		if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
 			break;
 	) < 0) {
-		nvkm_error(subdev, "ch %d timeout0: %08x\n", chid,
-			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+		nvkm_error(subdev, "ch %d timeout0: %08x\n", user,
+			   nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
 		return -EBUSY;
 	}
 
-	nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001);
+	nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00000001);
 	if (nvkm_msec(device, 2000,
-		u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10));
+		u32 tmp = nvkm_rd32(device, 0x610200 + (ctrl * 0x10));
 		if ((tmp & 0x00030000) == 0x00010000)
 			break;
 	) < 0) {
-		nvkm_error(subdev, "ch %d timeout1: %08x\n", chid,
-			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+		nvkm_error(subdev, "ch %d timeout1: %08x\n", user,
+			   nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
 		return -EBUSY;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c
index 8443e04..b053b29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c
@@ -36,8 +36,8 @@
 		&gp104_disp_ovly_oclass,
 	},
 	.pioc = {
-		&gk104_disp_oimm_oclass,
-		&gk104_disp_curs_oclass,
+		&gp102_disp_oimm_oclass,
+		&gp102_disp_curs_oclass,
 	},
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index 2f9cecd..05c829a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -207,8 +207,8 @@
 {
 	const struct nv50_disp_pioc_oclass *sclass = oclass->priv;
 	struct nv50_disp_root *root = nv50_disp_root(oclass->parent);
-	return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid,
-			    oclass, data, size, pobject);
+	return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid.ctrl,
+			    sclass->chid.user, oclass, data, size, pobject);
 }
 
 static int
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index d544ff9..7aadce1 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -83,8 +83,7 @@
 	/* Which channel of the HVS this pixelvalve sources from. */
 	int hvs_channel;
 
-	enum vc4_encoder_type encoder0_type;
-	enum vc4_encoder_type encoder1_type;
+	enum vc4_encoder_type encoder_types[4];
 };
 
 #define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset))
@@ -669,6 +668,14 @@
 	CRTC_WRITE(PV_INTEN, 0);
 }
 
+/* Must be called with the event lock held */
+bool vc4_event_pending(struct drm_crtc *crtc)
+{
+	struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+
+	return !!vc4_crtc->event;
+}
+
 static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
 {
 	struct drm_crtc *crtc = &vc4_crtc->base;
@@ -859,20 +866,26 @@
 
 static const struct vc4_crtc_data pv0_data = {
 	.hvs_channel = 0,
-	.encoder0_type = VC4_ENCODER_TYPE_DSI0,
-	.encoder1_type = VC4_ENCODER_TYPE_DPI,
+	.encoder_types = {
+		[PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI0,
+		[PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_DPI,
+	},
 };
 
 static const struct vc4_crtc_data pv1_data = {
 	.hvs_channel = 2,
-	.encoder0_type = VC4_ENCODER_TYPE_DSI1,
-	.encoder1_type = VC4_ENCODER_TYPE_SMI,
+	.encoder_types = {
+		[PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI1,
+		[PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_SMI,
+	},
 };
 
 static const struct vc4_crtc_data pv2_data = {
 	.hvs_channel = 1,
-	.encoder0_type = VC4_ENCODER_TYPE_VEC,
-	.encoder1_type = VC4_ENCODER_TYPE_HDMI,
+	.encoder_types = {
+		[PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_HDMI,
+		[PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC,
+	},
 };
 
 static const struct of_device_id vc4_crtc_dt_match[] = {
@@ -886,17 +899,20 @@
 					struct drm_crtc *crtc)
 {
 	struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+	const struct vc4_crtc_data *crtc_data = vc4_crtc->data;
+	const enum vc4_encoder_type *encoder_types = crtc_data->encoder_types;
 	struct drm_encoder *encoder;
 
 	drm_for_each_encoder(encoder, drm) {
 		struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
+		int i;
 
-		if (vc4_encoder->type == vc4_crtc->data->encoder0_type) {
-			vc4_encoder->clock_select = 0;
-			encoder->possible_crtcs |= drm_crtc_mask(crtc);
-		} else if (vc4_encoder->type == vc4_crtc->data->encoder1_type) {
-			vc4_encoder->clock_select = 1;
-			encoder->possible_crtcs |= drm_crtc_mask(crtc);
+		for (i = 0; i < ARRAY_SIZE(crtc_data->encoder_types); i++) {
+			if (vc4_encoder->type == encoder_types[i]) {
+				vc4_encoder->clock_select = i;
+				encoder->possible_crtcs |= drm_crtc_mask(crtc);
+				break;
+			}
 		}
 	}
 }
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 8703f56..246d1ae 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -61,21 +61,24 @@
 		if (ret < 0)
 			return ret;
 		args->value = V3D_READ(V3D_IDENT0);
-		pm_runtime_put(&vc4->v3d->pdev->dev);
+		pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+		pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
 		break;
 	case DRM_VC4_PARAM_V3D_IDENT1:
 		ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
 		if (ret < 0)
 			return ret;
 		args->value = V3D_READ(V3D_IDENT1);
-		pm_runtime_put(&vc4->v3d->pdev->dev);
+		pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+		pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
 		break;
 	case DRM_VC4_PARAM_V3D_IDENT2:
 		ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
 		if (ret < 0)
 			return ret;
 		args->value = V3D_READ(V3D_IDENT2);
-		pm_runtime_put(&vc4->v3d->pdev->dev);
+		pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+		pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
 		break;
 	case DRM_VC4_PARAM_SUPPORTS_BRANCHES:
 		args->value = true;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 7c1e4d9..50a55ef 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -194,6 +194,7 @@
 }
 
 enum vc4_encoder_type {
+	VC4_ENCODER_TYPE_NONE,
 	VC4_ENCODER_TYPE_HDMI,
 	VC4_ENCODER_TYPE_VEC,
 	VC4_ENCODER_TYPE_DSI0,
@@ -440,6 +441,7 @@
 extern struct platform_driver vc4_crtc_driver;
 int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id);
 void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id);
+bool vc4_event_pending(struct drm_crtc *crtc);
 int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
 int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
 			    unsigned int flags, int *vpos, int *hpos,
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 18e3717..ab30169 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -711,8 +711,10 @@
 	}
 
 	mutex_lock(&vc4->power_lock);
-	if (--vc4->power_refcount == 0)
-		pm_runtime_put(&vc4->v3d->pdev->dev);
+	if (--vc4->power_refcount == 0) {
+		pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+		pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
+	}
 	mutex_unlock(&vc4->power_lock);
 
 	kfree(exec);
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index c1f65c6..67af2af 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -119,17 +119,34 @@
 
 	/* Make sure that any outstanding modesets have finished. */
 	if (nonblock) {
-		ret = down_trylock(&vc4->async_modeset);
-		if (ret) {
+		struct drm_crtc *crtc;
+		struct drm_crtc_state *crtc_state;
+		unsigned long flags;
+		bool busy = false;
+
+		/*
+		 * If there's an undispatched event to send then we're
+		 * obviously still busy.  If there isn't, then we can
+		 * unconditionally wait for the semaphore because it
+		 * shouldn't be contended (for long).
+		 *
+		 * This is to prevent a race where queuing a new flip
+		 * from userspace immediately on receipt of an event
+		 * beats our clean-up and returns EBUSY.
+		 */
+		spin_lock_irqsave(&dev->event_lock, flags);
+		for_each_crtc_in_state(state, crtc, crtc_state, i)
+			busy |= vc4_event_pending(crtc);
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		if (busy) {
 			kfree(c);
 			return -EBUSY;
 		}
-	} else {
-		ret = down_interruptible(&vc4->async_modeset);
-		if (ret) {
-			kfree(c);
-			return ret;
-		}
+	}
+	ret = down_interruptible(&vc4->async_modeset);
+	if (ret) {
+		kfree(c);
+		return ret;
 	}
 
 	ret = drm_atomic_helper_prepare_planes(dev, state);
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 1aa44c2..39f6886 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -177,8 +177,9 @@
 # define PV_CONTROL_WAIT_HSTART			BIT(12)
 # define PV_CONTROL_PIXEL_REP_MASK		VC4_MASK(5, 4)
 # define PV_CONTROL_PIXEL_REP_SHIFT		4
-# define PV_CONTROL_CLK_SELECT_DSI_VEC		0
+# define PV_CONTROL_CLK_SELECT_DSI		0
 # define PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI	1
+# define PV_CONTROL_CLK_SELECT_VEC		2
 # define PV_CONTROL_CLK_SELECT_MASK		VC4_MASK(3, 2)
 # define PV_CONTROL_CLK_SELECT_SHIFT		2
 # define PV_CONTROL_FIFO_CLR			BIT(1)
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index e6d3c60..7cc346a 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -222,6 +222,8 @@
 		return ret;
 	}
 
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_set_autosuspend_delay(dev, 40); /* a little over 2 frames. */
 	pm_runtime_enable(dev);
 
 	return 0;
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index 2543cf5..917321c 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -608,9 +608,7 @@
 vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
 {
 	uint32_t max_branch_target = 0;
-	bool found_shader_end = false;
 	int ip;
-	int shader_end_ip = 0;
 	int last_branch = -2;
 
 	for (ip = 0; ip < validation_state->max_ip; ip++) {
@@ -621,8 +619,13 @@
 		uint32_t branch_target_ip;
 
 		if (sig == QPU_SIG_PROG_END) {
-			shader_end_ip = ip;
-			found_shader_end = true;
+			/* There are two delay slots after program end is
+			 * signaled that are still executed, then we're
+			 * finished.  validation_state->max_ip is the
+			 * instruction after the last valid instruction in the
+			 * program.
+			 */
+			validation_state->max_ip = ip + 3;
 			continue;
 		}
 
@@ -676,15 +679,9 @@
 		}
 		set_bit(after_delay_ip, validation_state->branch_targets);
 		max_branch_target = max(max_branch_target, after_delay_ip);
-
-		/* There are two delay slots after program end is signaled
-		 * that are still executed, then we're finished.
-		 */
-		if (found_shader_end && ip == shader_end_ip + 2)
-			break;
 	}
 
-	if (max_branch_target > shader_end_ip) {
+	if (max_branch_target > validation_state->max_ip - 3) {
 		DRM_ERROR("Branch landed after QPU_SIG_PROG_END");
 		return false;
 	}
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 2639c49..218c6e7 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -70,6 +70,20 @@
 #define A6XX_CP_ADDR_MODE_CNTL           0x842
 #define A6XX_CP_PROTECT_CNTL             0x84F
 #define A6XX_CP_PROTECT_REG              0x850
+#define A6XX_CP_PERFCTR_CP_SEL_0         0x8D0
+#define A6XX_CP_PERFCTR_CP_SEL_1         0x8D1
+#define A6XX_CP_PERFCTR_CP_SEL_2         0x8D2
+#define A6XX_CP_PERFCTR_CP_SEL_3         0x8D3
+#define A6XX_CP_PERFCTR_CP_SEL_4         0x8D4
+#define A6XX_CP_PERFCTR_CP_SEL_5         0x8D5
+#define A6XX_CP_PERFCTR_CP_SEL_6         0x8D6
+#define A6XX_CP_PERFCTR_CP_SEL_7         0x8D7
+#define A6XX_CP_PERFCTR_CP_SEL_8         0x8D8
+#define A6XX_CP_PERFCTR_CP_SEL_9         0x8D9
+#define A6XX_CP_PERFCTR_CP_SEL_10        0x8DA
+#define A6XX_CP_PERFCTR_CP_SEL_11        0x8DB
+#define A6XX_CP_PERFCTR_CP_SEL_12        0x8DC
+#define A6XX_CP_PERFCTR_CP_SEL_13        0x8DD
 #define A6XX_CP_CRASH_SCRIPT_BASE_LO     0x900
 #define A6XX_CP_CRASH_SCRIPT_BASE_HI     0x901
 #define A6XX_CP_CRASH_DUMP_CNTL          0x902
@@ -108,7 +122,279 @@
 #define A6XX_RBBM_INT_0_STATUS                   0x201
 #define A6XX_RBBM_STATUS                         0x210
 #define A6XX_RBBM_STATUS3                        0x213
+#define A6XX_RBBM_PERFCTR_CP_0_LO                0x400
+#define A6XX_RBBM_PERFCTR_CP_0_HI                0x401
+#define A6XX_RBBM_PERFCTR_CP_1_LO                0x402
+#define A6XX_RBBM_PERFCTR_CP_1_HI                0x403
+#define A6XX_RBBM_PERFCTR_CP_2_LO                0x404
+#define A6XX_RBBM_PERFCTR_CP_2_HI                0x405
+#define A6XX_RBBM_PERFCTR_CP_3_LO                0x406
+#define A6XX_RBBM_PERFCTR_CP_3_HI                0x407
+#define A6XX_RBBM_PERFCTR_CP_4_LO                0x408
+#define A6XX_RBBM_PERFCTR_CP_4_HI                0x409
+#define A6XX_RBBM_PERFCTR_CP_5_LO                0x40a
+#define A6XX_RBBM_PERFCTR_CP_5_HI                0x40b
+#define A6XX_RBBM_PERFCTR_CP_6_LO                0x40c
+#define A6XX_RBBM_PERFCTR_CP_6_HI                0x40d
+#define A6XX_RBBM_PERFCTR_CP_7_LO                0x40e
+#define A6XX_RBBM_PERFCTR_CP_7_HI                0x40f
+#define A6XX_RBBM_PERFCTR_CP_8_LO                0x410
+#define A6XX_RBBM_PERFCTR_CP_8_HI                0x411
+#define A6XX_RBBM_PERFCTR_CP_9_LO                0x412
+#define A6XX_RBBM_PERFCTR_CP_9_HI                0x413
+#define A6XX_RBBM_PERFCTR_CP_10_LO               0x414
+#define A6XX_RBBM_PERFCTR_CP_10_HI               0x415
+#define A6XX_RBBM_PERFCTR_CP_11_LO               0x416
+#define A6XX_RBBM_PERFCTR_CP_11_HI               0x417
+#define A6XX_RBBM_PERFCTR_CP_12_LO               0x418
+#define A6XX_RBBM_PERFCTR_CP_12_HI               0x419
+#define A6XX_RBBM_PERFCTR_CP_13_LO               0x41a
+#define A6XX_RBBM_PERFCTR_CP_13_HI               0x41b
+#define A6XX_RBBM_PERFCTR_RBBM_0_LO              0x41c
+#define A6XX_RBBM_PERFCTR_RBBM_0_HI              0x41d
+#define A6XX_RBBM_PERFCTR_RBBM_1_LO              0x41e
+#define A6XX_RBBM_PERFCTR_RBBM_1_HI              0x41f
+#define A6XX_RBBM_PERFCTR_RBBM_2_LO              0x420
+#define A6XX_RBBM_PERFCTR_RBBM_2_HI              0x421
+#define A6XX_RBBM_PERFCTR_RBBM_3_LO              0x422
+#define A6XX_RBBM_PERFCTR_RBBM_3_HI              0x423
+#define A6XX_RBBM_PERFCTR_PC_0_LO                0x424
+#define A6XX_RBBM_PERFCTR_PC_0_HI                0x425
+#define A6XX_RBBM_PERFCTR_PC_1_LO                0x426
+#define A6XX_RBBM_PERFCTR_PC_1_HI                0x427
+#define A6XX_RBBM_PERFCTR_PC_2_LO                0x428
+#define A6XX_RBBM_PERFCTR_PC_2_HI                0x429
+#define A6XX_RBBM_PERFCTR_PC_3_LO                0x42a
+#define A6XX_RBBM_PERFCTR_PC_3_HI                0x42b
+#define A6XX_RBBM_PERFCTR_PC_4_LO                0x42c
+#define A6XX_RBBM_PERFCTR_PC_4_HI                0x42d
+#define A6XX_RBBM_PERFCTR_PC_5_LO                0x42e
+#define A6XX_RBBM_PERFCTR_PC_5_HI                0x42f
+#define A6XX_RBBM_PERFCTR_PC_6_LO                0x430
+#define A6XX_RBBM_PERFCTR_PC_6_HI                0x431
+#define A6XX_RBBM_PERFCTR_PC_7_LO                0x432
+#define A6XX_RBBM_PERFCTR_PC_7_HI                0x433
+#define A6XX_RBBM_PERFCTR_VFD_0_LO               0x434
+#define A6XX_RBBM_PERFCTR_VFD_0_HI               0x435
+#define A6XX_RBBM_PERFCTR_VFD_1_LO               0x436
+#define A6XX_RBBM_PERFCTR_VFD_1_HI               0x437
+#define A6XX_RBBM_PERFCTR_VFD_2_LO               0x438
+#define A6XX_RBBM_PERFCTR_VFD_2_HI               0x439
+#define A6XX_RBBM_PERFCTR_VFD_3_LO               0x43a
+#define A6XX_RBBM_PERFCTR_VFD_3_HI               0x43b
+#define A6XX_RBBM_PERFCTR_VFD_4_LO               0x43c
+#define A6XX_RBBM_PERFCTR_VFD_4_HI               0x43d
+#define A6XX_RBBM_PERFCTR_VFD_5_LO               0x43e
+#define A6XX_RBBM_PERFCTR_VFD_5_HI               0x43f
+#define A6XX_RBBM_PERFCTR_VFD_6_LO               0x440
+#define A6XX_RBBM_PERFCTR_VFD_6_HI               0x441
+#define A6XX_RBBM_PERFCTR_VFD_7_LO               0x442
+#define A6XX_RBBM_PERFCTR_VFD_7_HI               0x443
+#define A6XX_RBBM_PERFCTR_HLSQ_0_LO              0x444
+#define A6XX_RBBM_PERFCTR_HLSQ_0_HI              0x445
+#define A6XX_RBBM_PERFCTR_HLSQ_1_LO              0x446
+#define A6XX_RBBM_PERFCTR_HLSQ_1_HI              0x447
+#define A6XX_RBBM_PERFCTR_HLSQ_2_LO              0x448
+#define A6XX_RBBM_PERFCTR_HLSQ_2_HI              0x449
+#define A6XX_RBBM_PERFCTR_HLSQ_3_LO              0x44a
+#define A6XX_RBBM_PERFCTR_HLSQ_3_HI              0x44b
+#define A6XX_RBBM_PERFCTR_HLSQ_4_LO              0x44c
+#define A6XX_RBBM_PERFCTR_HLSQ_4_HI              0x44d
+#define A6XX_RBBM_PERFCTR_HLSQ_5_LO              0x44e
+#define A6XX_RBBM_PERFCTR_HLSQ_5_HI              0x44f
+#define A6XX_RBBM_PERFCTR_VPC_0_LO               0x450
+#define A6XX_RBBM_PERFCTR_VPC_0_HI               0x451
+#define A6XX_RBBM_PERFCTR_VPC_1_LO               0x452
+#define A6XX_RBBM_PERFCTR_VPC_1_HI               0x453
+#define A6XX_RBBM_PERFCTR_VPC_2_LO               0x454
+#define A6XX_RBBM_PERFCTR_VPC_2_HI               0x455
+#define A6XX_RBBM_PERFCTR_VPC_3_LO               0x456
+#define A6XX_RBBM_PERFCTR_VPC_3_HI               0x457
+#define A6XX_RBBM_PERFCTR_VPC_4_LO               0x458
+#define A6XX_RBBM_PERFCTR_VPC_4_HI               0x459
+#define A6XX_RBBM_PERFCTR_VPC_5_LO               0x45a
+#define A6XX_RBBM_PERFCTR_VPC_5_HI               0x45b
+#define A6XX_RBBM_PERFCTR_CCU_0_LO               0x45c
+#define A6XX_RBBM_PERFCTR_CCU_0_HI               0x45d
+#define A6XX_RBBM_PERFCTR_CCU_1_LO               0x45e
+#define A6XX_RBBM_PERFCTR_CCU_1_HI               0x45f
+#define A6XX_RBBM_PERFCTR_CCU_2_LO               0x460
+#define A6XX_RBBM_PERFCTR_CCU_2_HI               0x461
+#define A6XX_RBBM_PERFCTR_CCU_3_LO               0x462
+#define A6XX_RBBM_PERFCTR_CCU_3_HI               0x463
+#define A6XX_RBBM_PERFCTR_CCU_4_LO               0x464
+#define A6XX_RBBM_PERFCTR_CCU_4_HI               0x465
+#define A6XX_RBBM_PERFCTR_TSE_0_LO               0x466
+#define A6XX_RBBM_PERFCTR_TSE_0_HI               0x467
+#define A6XX_RBBM_PERFCTR_TSE_1_LO               0x468
+#define A6XX_RBBM_PERFCTR_TSE_1_HI               0x469
+#define A6XX_RBBM_PERFCTR_TSE_2_LO               0x46a
+#define A6XX_RBBM_PERFCTR_CCU_4_HI               0x465
+#define A6XX_RBBM_PERFCTR_TSE_0_LO               0x466
+#define A6XX_RBBM_PERFCTR_TSE_0_HI               0x467
+#define A6XX_RBBM_PERFCTR_TSE_1_LO               0x468
+#define A6XX_RBBM_PERFCTR_TSE_1_HI               0x469
+#define A6XX_RBBM_PERFCTR_TSE_2_LO               0x46a
+#define A6XX_RBBM_PERFCTR_TSE_2_HI               0x46b
+#define A6XX_RBBM_PERFCTR_TSE_3_LO               0x46c
+#define A6XX_RBBM_PERFCTR_TSE_3_HI               0x46d
+#define A6XX_RBBM_PERFCTR_RAS_0_LO               0x46e
+#define A6XX_RBBM_PERFCTR_RAS_0_HI               0x46f
+#define A6XX_RBBM_PERFCTR_RAS_1_LO               0x470
+#define A6XX_RBBM_PERFCTR_RAS_1_HI               0x471
+#define A6XX_RBBM_PERFCTR_RAS_2_LO               0x472
+#define A6XX_RBBM_PERFCTR_RAS_2_HI               0x473
+#define A6XX_RBBM_PERFCTR_RAS_3_LO               0x474
+#define A6XX_RBBM_PERFCTR_RAS_3_HI               0x475
+#define A6XX_RBBM_PERFCTR_UCHE_0_LO              0x476
+#define A6XX_RBBM_PERFCTR_UCHE_0_HI              0x477
+#define A6XX_RBBM_PERFCTR_UCHE_1_LO              0x478
+#define A6XX_RBBM_PERFCTR_UCHE_1_HI              0x479
+#define A6XX_RBBM_PERFCTR_UCHE_2_LO              0x47a
+#define A6XX_RBBM_PERFCTR_UCHE_2_HI              0x47b
+#define A6XX_RBBM_PERFCTR_UCHE_3_LO              0x47c
+#define A6XX_RBBM_PERFCTR_UCHE_3_HI              0x47d
+#define A6XX_RBBM_PERFCTR_UCHE_4_LO              0x47e
+#define A6XX_RBBM_PERFCTR_UCHE_4_HI              0x47f
+#define A6XX_RBBM_PERFCTR_UCHE_5_LO              0x480
+#define A6XX_RBBM_PERFCTR_UCHE_5_HI              0x481
+#define A6XX_RBBM_PERFCTR_UCHE_6_LO              0x482
+#define A6XX_RBBM_PERFCTR_UCHE_6_HI              0x483
+#define A6XX_RBBM_PERFCTR_UCHE_7_LO              0x484
+#define A6XX_RBBM_PERFCTR_UCHE_7_HI              0x485
+#define A6XX_RBBM_PERFCTR_UCHE_8_LO              0x486
+#define A6XX_RBBM_PERFCTR_UCHE_8_HI              0x487
+#define A6XX_RBBM_PERFCTR_UCHE_9_LO              0x488
+#define A6XX_RBBM_PERFCTR_UCHE_9_HI              0x489
+#define A6XX_RBBM_PERFCTR_UCHE_10_LO             0x48a
+#define A6XX_RBBM_PERFCTR_UCHE_10_HI             0x48b
+#define A6XX_RBBM_PERFCTR_UCHE_11_LO             0x48c
+#define A6XX_RBBM_PERFCTR_UCHE_11_HI             0x48d
+#define A6XX_RBBM_PERFCTR_TP_0_LO                0x48e
+#define A6XX_RBBM_PERFCTR_TP_0_HI                0x48f
+#define A6XX_RBBM_PERFCTR_TP_1_LO                0x490
+#define A6XX_RBBM_PERFCTR_TP_1_HI                0x491
+#define A6XX_RBBM_PERFCTR_TP_2_LO                0x492
+#define A6XX_RBBM_PERFCTR_TP_2_HI                0x493
+#define A6XX_RBBM_PERFCTR_TP_3_LO                0x494
+#define A6XX_RBBM_PERFCTR_TP_3_HI                0x495
+#define A6XX_RBBM_PERFCTR_TP_4_LO                0x496
+#define A6XX_RBBM_PERFCTR_TP_4_HI                0x497
+#define A6XX_RBBM_PERFCTR_TP_5_LO                0x498
+#define A6XX_RBBM_PERFCTR_TP_5_HI                0x499
+#define A6XX_RBBM_PERFCTR_TP_6_LO                0x49a
+#define A6XX_RBBM_PERFCTR_TP_6_HI                0x49b
+#define A6XX_RBBM_PERFCTR_TP_7_LO                0x49c
+#define A6XX_RBBM_PERFCTR_TP_7_HI                0x49d
+#define A6XX_RBBM_PERFCTR_TP_8_LO                0x49e
+#define A6XX_RBBM_PERFCTR_TP_8_HI                0x49f
+#define A6XX_RBBM_PERFCTR_TP_9_LO                0x4a0
+#define A6XX_RBBM_PERFCTR_TP_9_HI                0x4a1
+#define A6XX_RBBM_PERFCTR_TP_10_LO               0x4a2
+#define A6XX_RBBM_PERFCTR_TP_10_HI               0x4a3
+#define A6XX_RBBM_PERFCTR_TP_11_LO               0x4a4
+#define A6XX_RBBM_PERFCTR_TP_11_HI               0x4a5
+#define A6XX_RBBM_PERFCTR_SP_0_LO                0x4a6
+#define A6XX_RBBM_PERFCTR_SP_0_HI                0x4a7
+#define A6XX_RBBM_PERFCTR_SP_1_LO                0x4a8
+#define A6XX_RBBM_PERFCTR_SP_1_HI                0x4a9
+#define A6XX_RBBM_PERFCTR_SP_2_LO                0x4aa
+#define A6XX_RBBM_PERFCTR_SP_2_HI                0x4ab
+#define A6XX_RBBM_PERFCTR_SP_3_LO                0x4ac
+#define A6XX_RBBM_PERFCTR_SP_3_HI                0x4ad
+#define A6XX_RBBM_PERFCTR_SP_4_LO                0x4ae
+#define A6XX_RBBM_PERFCTR_SP_4_HI                0x4af
+#define A6XX_RBBM_PERFCTR_SP_5_LO                0x4b0
+#define A6XX_RBBM_PERFCTR_SP_5_HI                0x4b1
+#define A6XX_RBBM_PERFCTR_SP_6_LO                0x4b2
+#define A6XX_RBBM_PERFCTR_SP_6_HI                0x4b3
+#define A6XX_RBBM_PERFCTR_SP_7_LO                0x4b4
+#define A6XX_RBBM_PERFCTR_SP_7_HI                0x4b5
+#define A6XX_RBBM_PERFCTR_SP_8_LO                0x4b6
+#define A6XX_RBBM_PERFCTR_SP_8_HI                0x4b7
+#define A6XX_RBBM_PERFCTR_SP_9_LO                0x4b8
+#define A6XX_RBBM_PERFCTR_SP_9_HI                0x4b9
+#define A6XX_RBBM_PERFCTR_SP_10_LO               0x4ba
+#define A6XX_RBBM_PERFCTR_SP_10_HI               0x4bb
+#define A6XX_RBBM_PERFCTR_SP_11_LO               0x4bc
+#define A6XX_RBBM_PERFCTR_SP_11_HI               0x4bd
+#define A6XX_RBBM_PERFCTR_SP_12_LO               0x4be
+#define A6XX_RBBM_PERFCTR_SP_12_HI               0x4bf
+#define A6XX_RBBM_PERFCTR_SP_13_LO               0x4c0
+#define A6XX_RBBM_PERFCTR_SP_13_HI               0x4c1
+#define A6XX_RBBM_PERFCTR_SP_14_LO               0x4c2
+#define A6XX_RBBM_PERFCTR_SP_14_HI               0x4c3
+#define A6XX_RBBM_PERFCTR_SP_15_LO               0x4c4
+#define A6XX_RBBM_PERFCTR_SP_15_HI               0x4c5
+#define A6XX_RBBM_PERFCTR_SP_16_LO               0x4c6
+#define A6XX_RBBM_PERFCTR_SP_16_HI               0x4c7
+#define A6XX_RBBM_PERFCTR_SP_17_LO               0x4c8
+#define A6XX_RBBM_PERFCTR_SP_17_HI               0x4c9
+#define A6XX_RBBM_PERFCTR_SP_18_LO               0x4ca
+#define A6XX_RBBM_PERFCTR_SP_18_HI               0x4cb
+#define A6XX_RBBM_PERFCTR_SP_19_LO               0x4cc
+#define A6XX_RBBM_PERFCTR_SP_19_HI               0x4cd
+#define A6XX_RBBM_PERFCTR_SP_20_LO               0x4ce
+#define A6XX_RBBM_PERFCTR_SP_20_HI               0x4cf
+#define A6XX_RBBM_PERFCTR_SP_21_LO               0x4d0
+#define A6XX_RBBM_PERFCTR_SP_21_HI               0x4d1
+#define A6XX_RBBM_PERFCTR_SP_22_LO               0x4d2
+#define A6XX_RBBM_PERFCTR_SP_22_HI               0x4d3
+#define A6XX_RBBM_PERFCTR_SP_23_LO               0x4d4
+#define A6XX_RBBM_PERFCTR_SP_23_HI               0x4d5
+#define A6XX_RBBM_PERFCTR_RB_0_LO                0x4d6
+#define A6XX_RBBM_PERFCTR_RB_0_HI                0x4d7
+#define A6XX_RBBM_PERFCTR_RB_1_LO                0x4d8
+#define A6XX_RBBM_PERFCTR_RB_1_HI                0x4d9
+#define A6XX_RBBM_PERFCTR_RB_2_LO                0x4da
+#define A6XX_RBBM_PERFCTR_RB_2_HI                0x4db
+#define A6XX_RBBM_PERFCTR_RB_3_LO                0x4dc
+#define A6XX_RBBM_PERFCTR_RB_3_HI                0x4dd
+#define A6XX_RBBM_PERFCTR_RB_4_LO                0x4de
+#define A6XX_RBBM_PERFCTR_RB_4_HI                0x4df
+#define A6XX_RBBM_PERFCTR_RB_5_LO                0x4e0
+#define A6XX_RBBM_PERFCTR_RB_5_HI                0x4e1
+#define A6XX_RBBM_PERFCTR_RB_6_LO                0x4e2
+#define A6XX_RBBM_PERFCTR_RB_6_HI                0x4e3
+#define A6XX_RBBM_PERFCTR_RB_7_LO                0x4e4
+#define A6XX_RBBM_PERFCTR_RB_7_HI                0x4e5
+#define A6XX_RBBM_PERFCTR_VSC_0_LO               0x4e6
+#define A6XX_RBBM_PERFCTR_VSC_0_HI               0x4e7
+#define A6XX_RBBM_PERFCTR_VSC_1_LO               0x4e8
+#define A6XX_RBBM_PERFCTR_VSC_1_HI               0x4e9
+#define A6XX_RBBM_PERFCTR_LRZ_0_LO               0x4ea
+#define A6XX_RBBM_PERFCTR_LRZ_0_HI               0x4eb
+#define A6XX_RBBM_PERFCTR_LRZ_1_LO               0x4ec
+#define A6XX_RBBM_PERFCTR_LRZ_1_HI               0x4ed
+#define A6XX_RBBM_PERFCTR_LRZ_2_LO               0x4ee
+#define A6XX_RBBM_PERFCTR_LRZ_2_HI               0x4ef
+#define A6XX_RBBM_PERFCTR_LRZ_3_LO               0x4f0
+#define A6XX_RBBM_PERFCTR_LRZ_3_HI               0x4f1
+#define A6XX_RBBM_PERFCTR_CMP_0_LO               0x4f2
+#define A6XX_RBBM_PERFCTR_CMP_0_HI               0x4f3
+#define A6XX_RBBM_PERFCTR_CMP_1_LO               0x4f4
+#define A6XX_RBBM_PERFCTR_CMP_1_HI               0x4f5
+#define A6XX_RBBM_PERFCTR_CMP_2_LO               0x4f6
+#define A6XX_RBBM_PERFCTR_CMP_2_HI               0x4f7
+#define A6XX_RBBM_PERFCTR_CMP_3_LO               0x4f8
+#define A6XX_RBBM_PERFCTR_CMP_3_HI               0x4f9
+#define A6XX_RBBM_PERFCTR_CNTL                   0x500
+#define A6XX_RBBM_PERFCTR_LOAD_CMD0              0x501
+#define A6XX_RBBM_PERFCTR_LOAD_CMD1              0x502
+#define A6XX_RBBM_PERFCTR_LOAD_CMD2              0x503
+#define A6XX_RBBM_PERFCTR_LOAD_CMD3              0x504
+#define A6XX_RBBM_PERFCTR_LOAD_VALUE_LO          0x505
+#define A6XX_RBBM_PERFCTR_LOAD_VALUE_HI          0x506
+#define A6XX_RBBM_PERFCTR_RBBM_SEL_0             0x507
+#define A6XX_RBBM_PERFCTR_RBBM_SEL_1             0x508
+#define A6XX_RBBM_PERFCTR_RBBM_SEL_2             0x509
+#define A6XX_RBBM_PERFCTR_RBBM_SEL_3             0x50A
+
 #define A6XX_RBBM_SECVID_TRUST_CNTL              0xF400
+#define A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO     0xF800
+#define A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI     0xF801
+#define A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE        0xF802
+#define A6XX_RBBM_SECVID_TSB_CNTL                0xF803
 #define A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL      0xF810
 
 /* DBGC_CFG registers */
@@ -154,26 +440,87 @@
 #define A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2             0x630
 
 /* VSC registers */
+#define A6XX_VSC_PERFCTR_VSC_SEL_0          0xCD8
+#define A6XX_VSC_PERFCTR_VSC_SEL_1          0xCD9
+
+/* GRAS registers */
 #define A6XX_GRAS_ADDR_MODE_CNTL            0x8601
+#define A6XX_GRAS_PERFCTR_TSE_SEL_0         0x8610
+#define A6XX_GRAS_PERFCTR_TSE_SEL_1         0x8611
+#define A6XX_GRAS_PERFCTR_TSE_SEL_2         0x8612
+#define A6XX_GRAS_PERFCTR_TSE_SEL_3         0x8613
+#define A6XX_GRAS_PERFCTR_RAS_SEL_0         0x8614
+#define A6XX_GRAS_PERFCTR_RAS_SEL_1         0x8615
+#define A6XX_GRAS_PERFCTR_RAS_SEL_2         0x8616
+#define A6XX_GRAS_PERFCTR_RAS_SEL_3         0x8617
+#define A6XX_GRAS_PERFCTR_LRZ_SEL_0         0x8618
+#define A6XX_GRAS_PERFCTR_LRZ_SEL_1         0x8619
+#define A6XX_GRAS_PERFCTR_LRZ_SEL_2         0x861A
+#define A6XX_GRAS_PERFCTR_LRZ_SEL_3         0x861B
 
 /* RB registers */
 #define A6XX_RB_ADDR_MODE_CNTL              0x8E05
 #define A6XX_RB_NC_MODE_CNTL                0x8E08
+#define A6XX_RB_PERFCTR_RB_SEL_0            0x8E10
+#define A6XX_RB_PERFCTR_RB_SEL_1            0x8E11
+#define A6XX_RB_PERFCTR_RB_SEL_2            0x8E12
+#define A6XX_RB_PERFCTR_RB_SEL_3            0x8E13
+#define A6XX_RB_PERFCTR_RB_SEL_4            0x8E14
+#define A6XX_RB_PERFCTR_RB_SEL_5            0x8E15
+#define A6XX_RB_PERFCTR_RB_SEL_6            0x8E16
+#define A6XX_RB_PERFCTR_RB_SEL_7            0x8E17
+#define A6XX_RB_PERFCTR_CCU_SEL_0           0x8E18
+#define A6XX_RB_PERFCTR_CCU_SEL_1           0x8E19
+#define A6XX_RB_PERFCTR_CCU_SEL_2           0x8E1A
+#define A6XX_RB_PERFCTR_CCU_SEL_3           0x8E1B
+#define A6XX_RB_PERFCTR_CCU_SEL_4           0x8E1C
+#define A6XX_RB_PERFCTR_CMP_SEL_0           0x8E2C
+#define A6XX_RB_PERFCTR_CMP_SEL_1           0x8E2D
+#define A6XX_RB_PERFCTR_CMP_SEL_2           0x8E2E
+#define A6XX_RB_PERFCTR_CMP_SEL_3           0x8E2F
 
 /* PC registers */
 #define A6XX_PC_DBG_ECO_CNTL                0x9E00
 #define A6XX_PC_ADDR_MODE_CNTL              0x9E01
+#define A6XX_PC_PERFCTR_PC_SEL_0            0x9E34
+#define A6XX_PC_PERFCTR_PC_SEL_1            0x9E35
+#define A6XX_PC_PERFCTR_PC_SEL_2            0x9E36
+#define A6XX_PC_PERFCTR_PC_SEL_3            0x9E37
+#define A6XX_PC_PERFCTR_PC_SEL_4            0x9E38
+#define A6XX_PC_PERFCTR_PC_SEL_5            0x9E39
+#define A6XX_PC_PERFCTR_PC_SEL_6            0x9E3A
+#define A6XX_PC_PERFCTR_PC_SEL_7            0x9E3B
 
 /* HLSQ registers */
 #define A6XX_HLSQ_ADDR_MODE_CNTL            0xBE05
+#define A6XX_HLSQ_PERFCTR_HLSQ_SEL_0        0xBE10
+#define A6XX_HLSQ_PERFCTR_HLSQ_SEL_1        0xBE11
+#define A6XX_HLSQ_PERFCTR_HLSQ_SEL_2        0xBE12
+#define A6XX_HLSQ_PERFCTR_HLSQ_SEL_3        0xBE13
+#define A6XX_HLSQ_PERFCTR_HLSQ_SEL_4        0xBE14
+#define A6XX_HLSQ_PERFCTR_HLSQ_SEL_5        0xBE15
 #define A6XX_HLSQ_DBG_AHB_READ_APERTURE     0xC800
 #define A6XX_HLSQ_DBG_READ_SEL              0xD000
 
 /* VFD registers */
 #define A6XX_VFD_ADDR_MODE_CNTL             0xA601
+#define A6XX_VFD_PERFCTR_VFD_SEL_0          0xA610
+#define A6XX_VFD_PERFCTR_VFD_SEL_1          0xA611
+#define A6XX_VFD_PERFCTR_VFD_SEL_2          0xA612
+#define A6XX_VFD_PERFCTR_VFD_SEL_3          0xA613
+#define A6XX_VFD_PERFCTR_VFD_SEL_4          0xA614
+#define A6XX_VFD_PERFCTR_VFD_SEL_5          0xA615
+#define A6XX_VFD_PERFCTR_VFD_SEL_6          0xA616
+#define A6XX_VFD_PERFCTR_VFD_SEL_7          0xA617
 
 /* VPC registers */
 #define A6XX_VPC_ADDR_MODE_CNTL             0x9601
+#define A6XX_VPC_PERFCTR_VPC_SEL_0          0x9604
+#define A6XX_VPC_PERFCTR_VPC_SEL_1          0x9605
+#define A6XX_VPC_PERFCTR_VPC_SEL_2          0x9606
+#define A6XX_VPC_PERFCTR_VPC_SEL_3          0x9607
+#define A6XX_VPC_PERFCTR_VPC_SEL_4          0x9608
+#define A6XX_VPC_PERFCTR_VPC_SEL_5          0x9609
 
 /* UCHE registers */
 #define A6XX_UCHE_ADDR_MODE_CNTL            0xE00
@@ -190,20 +537,133 @@
 #define A6XX_UCHE_GMEM_RANGE_MAX_HI         0xE0E
 #define A6XX_UCHE_CACHE_WAYS                0xE17
 #define A6XX_UCHE_FILTER_CNTL               0xE18
+#define A6XX_UCHE_CLIENT_PF                 0xE19
+#define A6XX_UCHE_CLIENT_PF_CLIENT_ID_MASK  0x7
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_0        0xE1C
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_1        0xE1D
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_2        0xE1E
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_3        0xE1F
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_4        0xE20
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_5        0xE21
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_6        0xE22
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_7        0xE23
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_8        0xE24
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_9        0xE25
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_10       0xE26
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_11       0xE27
 
 /* SP registers */
 #define A6XX_SP_ADDR_MODE_CNTL              0xAE01
 #define A6XX_SP_NC_MODE_CNTL                0xAE02
+#define A6XX_SP_PERFCTR_SP_SEL_0            0xAE10
+#define A6XX_SP_PERFCTR_SP_SEL_1            0xAE11
+#define A6XX_SP_PERFCTR_SP_SEL_2            0xAE12
+#define A6XX_SP_PERFCTR_SP_SEL_3            0xAE13
+#define A6XX_SP_PERFCTR_SP_SEL_4            0xAE14
+#define A6XX_SP_PERFCTR_SP_SEL_5            0xAE15
+#define A6XX_SP_PERFCTR_SP_SEL_6            0xAE16
+#define A6XX_SP_PERFCTR_SP_SEL_7            0xAE17
+#define A6XX_SP_PERFCTR_SP_SEL_8            0xAE18
+#define A6XX_SP_PERFCTR_SP_SEL_9            0xAE19
+#define A6XX_SP_PERFCTR_SP_SEL_10           0xAE1A
+#define A6XX_SP_PERFCTR_SP_SEL_11           0xAE1B
+#define A6XX_SP_PERFCTR_SP_SEL_12           0xAE1C
+#define A6XX_SP_PERFCTR_SP_SEL_13           0xAE1D
+#define A6XX_SP_PERFCTR_SP_SEL_14           0xAE1E
+#define A6XX_SP_PERFCTR_SP_SEL_15           0xAE1F
+#define A6XX_SP_PERFCTR_SP_SEL_16           0xAE20
+#define A6XX_SP_PERFCTR_SP_SEL_17           0xAE21
+#define A6XX_SP_PERFCTR_SP_SEL_18           0xAE22
+#define A6XX_SP_PERFCTR_SP_SEL_19           0xAE23
+#define A6XX_SP_PERFCTR_SP_SEL_20           0xAE24
+#define A6XX_SP_PERFCTR_SP_SEL_21           0xAE25
+#define A6XX_SP_PERFCTR_SP_SEL_22           0xAE26
+#define A6XX_SP_PERFCTR_SP_SEL_23           0xAE27
 
 /* TP registers */
 #define A6XX_TPL1_ADDR_MODE_CNTL            0xB601
 #define A6XX_TPL1_NC_MODE_CNTL              0xB604
+#define A6XX_TPL1_PERFCTR_TP_SEL_0          0xB610
+#define A6XX_TPL1_PERFCTR_TP_SEL_1          0xB611
+#define A6XX_TPL1_PERFCTR_TP_SEL_2          0xB612
+#define A6XX_TPL1_PERFCTR_TP_SEL_3          0xB613
+#define A6XX_TPL1_PERFCTR_TP_SEL_4          0xB614
+#define A6XX_TPL1_PERFCTR_TP_SEL_5          0xB615
+#define A6XX_TPL1_PERFCTR_TP_SEL_6          0xB616
+#define A6XX_TPL1_PERFCTR_TP_SEL_7          0xB617
+#define A6XX_TPL1_PERFCTR_TP_SEL_8          0xB618
+#define A6XX_TPL1_PERFCTR_TP_SEL_9          0xB619
+#define A6XX_TPL1_PERFCTR_TP_SEL_10         0xB61A
+#define A6XX_TPL1_PERFCTR_TP_SEL_11         0xB61B
 
 /* VBIF registers */
 #define A6XX_VBIF_VERSION                       0x3000
 #define A6XX_VBIF_GATE_OFF_WRREQ_EN             0x302A
 #define A6XX_VBIF_XIN_HALT_CTRL0                0x3080
 #define A6XX_VBIF_XIN_HALT_CTRL1                0x3081
+#define A6XX_VBIF_PERF_CNT_SEL0                 0x30d0
+#define A6XX_VBIF_PERF_CNT_SEL1                 0x30d1
+#define A6XX_VBIF_PERF_CNT_SEL2                 0x30d2
+#define A6XX_VBIF_PERF_CNT_SEL3                 0x30d3
+#define A6XX_VBIF_PERF_CNT_LOW0                 0x30d8
+#define A6XX_VBIF_PERF_CNT_LOW1                 0x30d9
+#define A6XX_VBIF_PERF_CNT_LOW2                 0x30da
+#define A6XX_VBIF_PERF_CNT_LOW3                 0x30db
+#define A6XX_VBIF_PERF_CNT_HIGH0                0x30e0
+#define A6XX_VBIF_PERF_CNT_HIGH1                0x30e1
+#define A6XX_VBIF_PERF_CNT_HIGH2                0x30e2
+#define A6XX_VBIF_PERF_CNT_HIGH3                0x30e3
+#define A6XX_VBIF_PERF_PWR_CNT_EN0              0x3100
+#define A6XX_VBIF_PERF_PWR_CNT_EN1              0x3101
+#define A6XX_VBIF_PERF_PWR_CNT_EN2              0x3102
+#define A6XX_VBIF_PERF_PWR_CNT_LOW0             0x3110
+#define A6XX_VBIF_PERF_PWR_CNT_LOW1             0x3111
+#define A6XX_VBIF_PERF_PWR_CNT_LOW2             0x3112
+#define A6XX_VBIF_PERF_PWR_CNT_HIGH0            0x3118
+#define A6XX_VBIF_PERF_PWR_CNT_HIGH1            0x3119
+#define A6XX_VBIF_PERF_PWR_CNT_HIGH2            0x311a
+
+/* CX_DBGC_CFG registers */
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A                   0x18400
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_B                   0x18401
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_C                   0x18402
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D                   0x18403
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT                   0x18404
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT     0x0
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT       0xC
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT        0x1C
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM                   0x18405
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT      0x18
+#define A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0                  0x18408
+#define A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1                  0x18409
+#define A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2                  0x1840A
+#define A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3                  0x1840B
+#define A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0                 0x1840C
+#define A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1                 0x1840D
+#define A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2                 0x1840E
+#define A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3                 0x1840F
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0                 0x18410
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1                 0x18411
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT            0x0
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT            0x4
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT            0x8
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT            0xC
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT            0x10
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT            0x14
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT            0x18
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT            0x1C
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT            0x0
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT            0x4
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT           0x8
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT           0xC
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT           0x10
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT           0x14
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT           0x18
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT           0x1C
+#define A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1              0x1842F
+#define A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2              0x18430
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT    0x0
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT  0x8
 
 /* GMU control registers */
 #define A6XX_GMU_GX_SPTPRAC_POWER_CONTROL	0x1A881
@@ -260,7 +720,7 @@
 #define A6XX_GMU_HOST_INTERRUPT_CLR		0x23B04
 #define A6XX_GMU_HOST_INTERRUPT_STATUS		0x23B05
 #define A6XX_GMU_HOST_INTERRUPT_MASK		0x23B06
-#define A6XX_GMU_GPU_CX_BUSY_STATUS		0x23B0C
+#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS	0x23B0C
 #define A6XX_GMU_AHB_FENCE_STATUS		0x23B13
 #define A6XX_GMU_RBBM_INT_UNMASKED_STATUS	0x23B15
 #define A6XX_GMU_AO_SPARE_CNTL			0x23B16
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index ec3cade..68d7653 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1392,7 +1392,7 @@
 {
 	int ret = 0;
 
-	if (!adreno_is_a5xx(adreno_dev))
+	if (!adreno_is_a5xx(adreno_dev) && !adreno_is_a6xx(adreno_dev))
 		return -EINVAL;
 
 	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS) &&
@@ -1854,6 +1854,30 @@
 			status = 0;
 		}
 		break;
+	case KGSL_PROP_DEVICE_QTIMER:
+		{
+			struct kgsl_qtimer_prop qtimerprop = {0};
+			struct kgsl_memdesc *qtimer_desc =
+				kgsl_mmu_get_qtimer_global_entry(device);
+
+			if (sizebytes != sizeof(qtimerprop)) {
+				status = -EINVAL;
+				break;
+			}
+
+			if (qtimer_desc) {
+				qtimerprop.gpuaddr = qtimer_desc->gpuaddr;
+				qtimerprop.size = qtimer_desc->size;
+			}
+
+			if (copy_to_user(value, &qtimerprop,
+						sizeof(qtimerprop))) {
+				status = -EFAULT;
+				break;
+			}
+			status = 0;
+		}
+		break;
 	case KGSL_PROP_MMU_ENABLE:
 		{
 			/* Report MMU only if we can handle paged memory */
@@ -2243,6 +2267,11 @@
 {
 	const struct adreno_gpu_core *gpucore = adreno_dev->gpucore;
 	unsigned int reg_rbbm_status;
+	struct adreno_gpudev *gpudev  = ADRENO_GPU_DEVICE(adreno_dev);
+
+	/* if hw driver implements idle check - use it */
+	if (gpudev->hw_isidle)
+		return gpudev->hw_isidle(adreno_dev);
 
 	if (adreno_is_a540(adreno_dev))
 		/**
@@ -3034,6 +3063,7 @@
 	.regulator_disable_poll = adreno_regulator_disable_poll,
 	.clk_set_options = adreno_clk_set_options,
 	.gpu_model = adreno_gpu_model,
+	.stop_fault_timer = adreno_dispatcher_stop_fault_timer,
 };
 
 static struct platform_driver adreno_platform_driver = {
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 1e08a5e..75d5587 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -852,10 +852,12 @@
 				unsigned int clear_mask);
 	void (*oob_clear)(struct adreno_device *adreno_dev,
 				unsigned int clear_mask);
-	bool (*hw_isidle)(struct adreno_device *);
 	int (*rpmh_gpu_pwrctrl)(struct adreno_device *, unsigned int ops,
 				unsigned int arg1, unsigned int arg2);
-	bool (*gmu_isidle)(struct adreno_device *);
+	bool (*hw_isidle)(struct adreno_device *);
+	int (*wait_for_gmu_idle)(struct adreno_device *);
+	const char *(*iommu_fault_block)(struct adreno_device *adreno_dev,
+				unsigned int fsynr1);
 };
 
 /**
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 77da9c9..49d784c 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -11,6 +11,7 @@
  *
  */
 #include <linux/firmware.h>
+#include <soc/qcom/subsystem_restart.h>
 #include <linux/pm_opp.h>
 
 #include "adreno.h"
@@ -116,34 +117,29 @@
 static void a6xx_protect_init(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	struct kgsl_protected_registers *mmu_prot = NULL;
-	int i;
-	int num_sets;
-	int num_sets_array;
-	unsigned int mmu_base;
-	unsigned int mmu_range;
+	struct kgsl_protected_registers *mmu_prot =
+		kgsl_mmu_get_prot_regs(&device->mmu);
+	int i, num_sets;
+	int req_sets = ARRAY_SIZE(a6xx_protected_regs_group);
+	int max_sets = adreno_dev->gpucore->num_protected_regs;
+	unsigned int mmu_base = 0, mmu_range = 0, cur_range;
 
 	/* enable access protection to privileged registers */
 	kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL, 0x00000007);
 
-	num_sets = ARRAY_SIZE(a6xx_protected_regs_group);
-
-	mmu_prot = kgsl_mmu_get_prot_regs(&device->mmu);
-
 	if (mmu_prot) {
 		mmu_base = mmu_prot->base;
 		mmu_range = 1 << mmu_prot->range;
-		num_sets += DIV_ROUND_UP(mmu_range, 0x2000);
+		req_sets += DIV_ROUND_UP(mmu_range, 0x2000);
 	}
 
-	if (num_sets > adreno_dev->gpucore->num_protected_regs) {
+	if (req_sets > max_sets)
 		WARN(1, "Size exceeds the num of protection regs available\n");
-		num_sets = adreno_dev->gpucore->num_protected_regs;
-	}
 
-	num_sets_array = min_t(unsigned int,
-		ARRAY_SIZE(a6xx_protected_regs_group), num_sets);
-	for (i = 0; i < num_sets_array; i++) {
+	/* Protect GPU registers */
+	num_sets = min_t(unsigned int,
+		ARRAY_SIZE(a6xx_protected_regs_group), max_sets);
+	for (i = 0; i < num_sets; i++) {
 		struct a6xx_protected_regs *regs =
 					&a6xx_protected_regs_group[i];
 
@@ -152,15 +148,18 @@
 				(regs->read_protect << 31));
 	}
 
-	for (; i < num_sets; i++) {
-		unsigned int cur_range = min_t(unsigned int, mmu_range,
+	/* Protect MMU registers */
+	if (mmu_prot) {
+		while ((i < max_sets) && (mmu_range > 0)) {
+			cur_range = min_t(unsigned int, mmu_range,
 						0x2000);
+			kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
+				mmu_base | ((cur_range - 1) << 18) | (1 << 31));
 
-		kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
-			mmu_base | ((cur_range - 1) << 18) | (1 << 31));
-
-		mmu_base += cur_range;
-		mmu_range -= cur_range;
+			mmu_base += cur_range;
+			mmu_range -= cur_range;
+			i++;
+		}
 	}
 }
 
@@ -194,6 +193,11 @@
 	unsigned int bit, mal, mode, glbl_inv;
 	unsigned int amsbc = 0;
 
+	/* runtime adjust callbacks based on feature sets */
+	if (!kgsl_gmu_isenabled(device))
+		/* Legacy idle management if gmu is disabled */
+		ADRENO_GPU_DEVICE(adreno_dev)->hw_isidle = NULL;
+
 	adreno_vbif_start(adreno_dev, a6xx_vbif_platforms,
 			ARRAY_SIZE(a6xx_vbif_platforms));
 	/*
@@ -228,9 +232,6 @@
 	/* Setting the primFifo thresholds default values */
 	kgsl_regwrite(device, A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
 
-	/* Disable secured mode */
-	kgsl_regwrite(device, A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
-
 	/* Set the AHB default slave response to "ERROR" */
 	kgsl_regwrite(device, A6XX_CP_AHB_CNTL, 0x1);
 
@@ -284,6 +285,8 @@
 	kgsl_regwrite(device, A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
 					  (1 << 30) | 0x4000);
 
+	kgsl_regwrite(device, A6XX_UCHE_CLIENT_PF, 1);
+
 	/* Set TWOPASSUSEWFI in A6XX_PC_DBG_ECO_CNTL if requested */
 	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_TWO_PASS_USE_WFI))
 		kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
@@ -300,6 +303,8 @@
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
 	uint64_t gpuaddr;
+	static void *zap;
+	int ret = 0;
 
 	gpuaddr = fw->memdesc.gpuaddr;
 	kgsl_regwrite(device, A6XX_CP_SQE_INSTR_BASE_LO,
@@ -307,7 +312,18 @@
 	kgsl_regwrite(device, A6XX_CP_SQE_INSTR_BASE_HI,
 				upper_32_bits(gpuaddr));
 
-	return 0;
+	/* Load the zap shader firmware through PIL if its available */
+	if (adreno_dev->gpucore->zap_name && !zap) {
+		zap = subsystem_get(adreno_dev->gpucore->zap_name);
+
+		/* Return error if the zap shader cannot be loaded */
+		if (IS_ERR_OR_NULL(zap)) {
+			ret = (zap == NULL) ? -ENODEV : PTR_ERR(zap);
+			zap = NULL;
+		}
+	}
+
+	return ret;
 }
 
 
@@ -438,7 +454,12 @@
 	/* Clear the SQE_HALT to start the CP engine */
 	kgsl_regwrite(device, A6XX_CP_SQE_CNTL, 1);
 
-	return a6xx_send_cp_init(adreno_dev, rb);
+	ret = a6xx_send_cp_init(adreno_dev, rb);
+	if (ret)
+		return ret;
+
+	/* GPU comes up in secured mode, make it unsecured by default */
+	return adreno_set_unsecured_mode(adreno_dev, rb);
 }
 
 static int _load_firmware(struct kgsl_device *device, const char *fwfile,
@@ -880,6 +901,72 @@
 }
 
 /*
+ * a6xx_hm_sptprac_enable() - Turn on HM and SPTPRAC
+ * @device: Pointer to KGSL device
+ */
+static int a6xx_hm_sptprac_enable(struct kgsl_device *device)
+{
+	int ret = 0;
+	struct gmu_device *gmu = &device->gmu;
+
+	/* If GMU does not control HM we must */
+	if (gmu->idle_level < GPU_HW_IFPC) {
+		ret = a6xx_hm_enable(ADRENO_DEVICE(device));
+		if (ret) {
+			dev_err(&gmu->pdev->dev, "Failed to power on GPU HM\n");
+			return ret;
+		}
+	}
+
+	/* If GMU does not control SPTPRAC we must */
+	if (gmu->idle_level < GPU_HW_SPTP_PC) {
+		ret = a6xx_sptprac_enable(ADRENO_DEVICE(device));
+		if (ret) {
+			a6xx_hm_disable(ADRENO_DEVICE(device));
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+/*
+ * a6xx_hm_sptprac_disable() - Turn off SPTPRAC and HM
+ * @device: Pointer to KGSL device
+ */
+static int a6xx_hm_sptprac_disable(struct kgsl_device *device)
+{
+	int ret = 0;
+	struct gmu_device *gmu = &device->gmu;
+
+	/* If GMU does not control SPTPRAC we must */
+	if (gmu->idle_level < GPU_HW_SPTP_PC)
+		a6xx_sptprac_disable(ADRENO_DEVICE(device));
+
+	/* If GMU does not control HM we must */
+	if (gmu->idle_level < GPU_HW_IFPC) {
+		ret = a6xx_hm_disable(ADRENO_DEVICE(device));
+		if (ret)
+			dev_err(&gmu->pdev->dev, "Failed to power off GPU HM\n");
+	}
+
+	return ret;
+}
+
+/*
+ * a6xx_hm_sptprac_control() - Turn HM and SPTPRAC on or off
+ * @device: Pointer to KGSL device
+ * @on: True to turn on or false to turn off
+ */
+static int a6xx_hm_sptprac_control(struct kgsl_device *device, bool on)
+{
+	if (on)
+		return a6xx_hm_sptprac_enable(device);
+	else
+		return a6xx_hm_sptprac_disable(device);
+}
+
+/*
  * a6xx_gfx_rail_on() - request GMU to power GPU at given OPP.
  * @device: Pointer to KGSL device
  *
@@ -911,8 +998,6 @@
 	return ret;
 }
 
-#define GMU_POWER_STATE_SLUMBER 15
-
 /*
  * a6xx_notify_slumber() - initiate request to GMU to prepare to slumber
  * @device: Pointer to KGSL device
@@ -945,7 +1030,7 @@
 		dev_err(&gmu->pdev->dev, "OOB set for slumber timed out\n");
 	else {
 		kgsl_gmu_regread(device, A6XX_GMU_RPMH_POWER_STATE, &state);
-		if (state != GMU_POWER_STATE_SLUMBER) {
+		if (state != GPU_HW_SLUMBER) {
 			dev_err(&gmu->pdev->dev,
 					"Failed to prepare for slumber\n");
 			ret = -EINVAL;
@@ -959,7 +1044,7 @@
 {
 	struct gmu_device *gmu = &device->gmu;
 	struct device *dev = &gmu->pdev->dev;
-	int ret;
+	int ret = 0;
 
 	if (device->state != KGSL_STATE_INIT &&
 		device->state != KGSL_STATE_SUSPEND) {
@@ -985,26 +1070,11 @@
 				0xFFFFFFFF))
 			goto error_rsc;
 
-		/* If GMU does not control HM we must */
-		if (gmu->idle_level < GPU_HW_IFPC) {
-			ret = a6xx_hm_enable(ADRENO_DEVICE(device));
-			if (ret) {
-				dev_err(dev, "Failed to power on GPU HM\n");
-				return ret;
-			}
-		}
-
-		/* If GMU does not control SPTP we must */
-		if (gmu->idle_level < GPU_HW_SPTP_PC) {
-			ret = a6xx_sptprac_enable(ADRENO_DEVICE(device));
-			if (ret) {
-				a6xx_hm_disable(ADRENO_DEVICE(device));
-				return ret;
-			}
-		}
+		/* Turn on the HM and SPTP head switches */
+		ret = a6xx_hm_sptprac_control(device, true);
 	}
 
-	return 0;
+	return ret;
 
 error_rsc:
 	dev_err(dev, "GPU RSC sequence stuck in waking up GPU\n");
@@ -1014,19 +1084,10 @@
 static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device)
 {
 	struct gmu_device *gmu = &device->gmu;
-	struct device *dev = &gmu->pdev->dev;
-	int val, ret;
+	int val, ret = 0;
 
-	/* If GMU does not control SPTP we must */
-	if (gmu->idle_level < GPU_HW_SPTP_PC)
-		a6xx_sptprac_disable(ADRENO_DEVICE(device));
-
-	/* If GMU does not control HM we must */
-	if (gmu->idle_level < GPU_HW_IFPC) {
-		ret = a6xx_hm_disable(ADRENO_DEVICE(device));
-		if (ret)
-			dev_err(dev, "Failed to power off GPU HM\n");
-	}
+	/* Turn off the SPTP and HM head switches */
+	ret = a6xx_hm_sptprac_control(device, false);
 
 	/* RSC sleep sequence */
 	_regwrite(gmu->pdc_reg_virt, PDC_GPU_TIMESTAMP_UNIT1_EN_DRV0, 1);
@@ -1052,7 +1113,7 @@
 
 	/* FIXME: v2 has different procedure to trigger sequence */
 
-	return 0;
+	return ret;
 }
 
 /*
@@ -1066,30 +1127,16 @@
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct gmu_device *gmu = &device->gmu;
 	struct gmu_memdesc *mem_addr = gmu->hfi_mem;
-	struct device *dev = &gmu->pdev->dev;
 	int ret, i;
 
 	a6xx_gmu_power_config(device);
 
-	/* If GMU does not control HM then we must */
-	if (gmu->idle_level < GPU_HW_IFPC) {
-		ret = a6xx_hm_enable(adreno_dev);
-		if (ret) {
-			dev_err(dev, "Failed to power on GPU HM\n");
-			return ret;
-		}
-	}
-
-	/* If GMU does not control SPTP then we must */
-	if (gmu->idle_level < GPU_HW_SPTP_PC) {
-		ret = a6xx_sptprac_enable(adreno_dev);
-		if (ret) {
-			a6xx_hm_disable(adreno_dev);
-			return ret;
-		}
-	}
-
 	if (boot_state == GMU_COLD_BOOT || boot_state == GMU_RESET) {
+		/* Turn on the HM and SPTP head switches */
+		ret = a6xx_hm_sptprac_control(device, true);
+		if (ret)
+			return ret;
+
 		/* Turn on TCM retention */
 		kgsl_gmu_regwrite(device, A6XX_GMU_GENERAL_7, 1);
 
@@ -1244,29 +1291,35 @@
 	return ret;
 }
 
-static bool a6xx_gmu_isidle(struct adreno_device *adreno_dev)
+static bool a6xx_hw_isidle(struct adreno_device *adreno_dev)
+{
+	unsigned int reg;
+
+	kgsl_gmu_regread(KGSL_DEVICE(adreno_dev),
+		A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &reg);
+	return ((~reg & GPUBUSYIGNAHB) != 0);
+}
+
+static int a6xx_wait_for_gmu_idle(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct gmu_device *gmu = &device->gmu;
-	unsigned int value;
 
-	/* Check if GMU on */
-	if (!(gmu->flags & GMU_CLK_ON))
-		return true;
+	if (timed_poll_check(device, A6XX_GMU_RPMH_POWER_STATE,
+		gmu->idle_level, GMU_START_TIMEOUT, 0xf)) {
+		dev_err(&gmu->pdev->dev,
+			"GMU is not going to powerstate %d\n",
+			gmu->idle_level);
+		return -ETIMEDOUT;
+	}
 
-	/* Ensure GPU is in its lowest power state */
-	kgsl_gmu_regread(device, A6XX_GMU_RPMH_POWER_STATE, &value);
+	if (timed_poll_check(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS,
+		0, GMU_START_TIMEOUT, CXGXCPUBUSYIGNAHB)) {
+		dev_err(&gmu->pdev->dev, "GMU is not idling\n");
+		return -ETIMEDOUT;
+	}
 
-	if (value < gmu->idle_level)
-		return false;
-
-	/* Ensure GPU and GMU are both idle */
-	kgsl_gmu_regread(device->reg_virt, A6XX_GMU_GPU_CX_BUSY_STATUS,
-			&value);
-	if ((value & SLUMBER_CHECK_MASK) != SLUMBER_CHECK_MASK)
-		return false;
-
-	return true;
+	return 0;
 }
 
 /*
@@ -1471,6 +1524,46 @@
 	iounmap(gpu_cx_reg);
 }
 
+static const char *fault_block[8] = {
+	[0] = "CP",
+	[1] = "UCHE",
+	[2] = "VFD",
+	[3] = "UCHE",
+	[4] = "CCU",
+	[5] = "unknown",
+	[6] = "CDP Prefetch",
+	[7] = "GPMU",
+};
+
+static const char *uche_client[8] = {
+	[0] = "VFD",
+	[1] = "SP",
+	[2] = "VSC",
+	[3] = "VPC",
+	[4] = "HLSQ",
+	[5] = "PC",
+	[6] = "LRZ",
+	[7] = "unknown",
+};
+
+static const char *a6xx_iommu_fault_block(struct adreno_device *adreno_dev,
+						unsigned int fsynr1)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	unsigned int client_id;
+	unsigned int uche_client_id;
+
+	client_id = fsynr1 & 0xff;
+
+	if (client_id >= ARRAY_SIZE(fault_block))
+		return "unknown";
+	else if (client_id != 3)
+		return fault_block[client_id];
+
+	kgsl_regread(device, A6XX_UCHE_CLIENT_PF, &uche_client_id);
+	return uche_client[uche_client_id & A6XX_UCHE_CLIENT_PF_CLIENT_ID_MASK];
+}
+
 #define A6XX_INT_MASK \
 	((1 << A6XX_INT_CP_AHB_ERROR) |			\
 	 (1 << A6XX_INT_ATB_ASYNCFIFO_OVERFLOW) |	\
@@ -1537,6 +1630,367 @@
 	.sect_sizes = &a6xx_snap_sizes,
 };
 
+static struct adreno_perfcount_register a6xx_perfcounters_cp[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_0_LO,
+		A6XX_RBBM_PERFCTR_CP_0_HI, 0, A6XX_CP_PERFCTR_CP_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_1_LO,
+		A6XX_RBBM_PERFCTR_CP_1_HI, 1, A6XX_CP_PERFCTR_CP_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_2_LO,
+		A6XX_RBBM_PERFCTR_CP_2_HI, 2, A6XX_CP_PERFCTR_CP_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_3_LO,
+		A6XX_RBBM_PERFCTR_CP_3_HI, 3, A6XX_CP_PERFCTR_CP_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_4_LO,
+		A6XX_RBBM_PERFCTR_CP_4_HI, 4, A6XX_CP_PERFCTR_CP_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_5_LO,
+		A6XX_RBBM_PERFCTR_CP_5_HI, 5, A6XX_CP_PERFCTR_CP_SEL_5 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_6_LO,
+		A6XX_RBBM_PERFCTR_CP_6_HI, 6, A6XX_CP_PERFCTR_CP_SEL_6 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_7_LO,
+		A6XX_RBBM_PERFCTR_CP_7_HI, 7, A6XX_CP_PERFCTR_CP_SEL_7 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_8_LO,
+		A6XX_RBBM_PERFCTR_CP_8_HI, 8, A6XX_CP_PERFCTR_CP_SEL_8 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_9_LO,
+		A6XX_RBBM_PERFCTR_CP_9_HI, 9, A6XX_CP_PERFCTR_CP_SEL_9 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_10_LO,
+		A6XX_RBBM_PERFCTR_CP_10_HI, 10, A6XX_CP_PERFCTR_CP_SEL_10 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_11_LO,
+		A6XX_RBBM_PERFCTR_CP_11_HI, 11, A6XX_CP_PERFCTR_CP_SEL_11 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_12_LO,
+		A6XX_RBBM_PERFCTR_CP_12_HI, 12, A6XX_CP_PERFCTR_CP_SEL_12 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_13_LO,
+		A6XX_RBBM_PERFCTR_CP_13_HI, 13, A6XX_CP_PERFCTR_CP_SEL_13 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_rbbm[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_0_LO,
+		A6XX_RBBM_PERFCTR_RBBM_0_HI, 15, A6XX_RBBM_PERFCTR_RBBM_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_1_LO,
+		A6XX_RBBM_PERFCTR_RBBM_1_HI, 15, A6XX_RBBM_PERFCTR_RBBM_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_2_LO,
+		A6XX_RBBM_PERFCTR_RBBM_2_HI, 16, A6XX_RBBM_PERFCTR_RBBM_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_3_LO,
+		A6XX_RBBM_PERFCTR_RBBM_3_HI, 17, A6XX_RBBM_PERFCTR_RBBM_SEL_3 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_pc[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_0_LO,
+		A6XX_RBBM_PERFCTR_PC_0_HI, 18, A6XX_PC_PERFCTR_PC_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_1_LO,
+		A6XX_RBBM_PERFCTR_PC_1_HI, 19, A6XX_PC_PERFCTR_PC_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_2_LO,
+		A6XX_RBBM_PERFCTR_PC_2_HI, 20, A6XX_PC_PERFCTR_PC_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_3_LO,
+		A6XX_RBBM_PERFCTR_PC_3_HI, 21, A6XX_PC_PERFCTR_PC_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_4_LO,
+		A6XX_RBBM_PERFCTR_PC_4_HI, 22, A6XX_PC_PERFCTR_PC_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_5_LO,
+		A6XX_RBBM_PERFCTR_PC_5_HI, 23, A6XX_PC_PERFCTR_PC_SEL_5 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_6_LO,
+		A6XX_RBBM_PERFCTR_PC_6_HI, 24, A6XX_PC_PERFCTR_PC_SEL_6 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_7_LO,
+		A6XX_RBBM_PERFCTR_PC_7_HI, 25, A6XX_PC_PERFCTR_PC_SEL_7 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_vfd[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_0_LO,
+		A6XX_RBBM_PERFCTR_VFD_0_HI, 26, A6XX_VFD_PERFCTR_VFD_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_1_LO,
+		A6XX_RBBM_PERFCTR_VFD_1_HI, 27, A6XX_VFD_PERFCTR_VFD_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_2_LO,
+		A6XX_RBBM_PERFCTR_VFD_2_HI, 28, A6XX_VFD_PERFCTR_VFD_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_3_LO,
+		A6XX_RBBM_PERFCTR_VFD_3_HI, 29, A6XX_VFD_PERFCTR_VFD_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_4_LO,
+		A6XX_RBBM_PERFCTR_VFD_4_HI, 30, A6XX_VFD_PERFCTR_VFD_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_5_LO,
+		A6XX_RBBM_PERFCTR_VFD_5_HI, 31, A6XX_VFD_PERFCTR_VFD_SEL_5 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_6_LO,
+		A6XX_RBBM_PERFCTR_VFD_6_HI, 32, A6XX_VFD_PERFCTR_VFD_SEL_6 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_7_LO,
+		A6XX_RBBM_PERFCTR_VFD_7_HI, 33, A6XX_VFD_PERFCTR_VFD_SEL_7 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_hlsq[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_0_LO,
+		A6XX_RBBM_PERFCTR_HLSQ_0_HI, 34, A6XX_HLSQ_PERFCTR_HLSQ_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_1_LO,
+		A6XX_RBBM_PERFCTR_HLSQ_1_HI, 35, A6XX_HLSQ_PERFCTR_HLSQ_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_2_LO,
+		A6XX_RBBM_PERFCTR_HLSQ_2_HI, 36, A6XX_HLSQ_PERFCTR_HLSQ_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_3_LO,
+		A6XX_RBBM_PERFCTR_HLSQ_3_HI, 37, A6XX_HLSQ_PERFCTR_HLSQ_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_4_LO,
+		A6XX_RBBM_PERFCTR_HLSQ_4_HI, 38, A6XX_HLSQ_PERFCTR_HLSQ_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_5_LO,
+		A6XX_RBBM_PERFCTR_HLSQ_5_HI, 39, A6XX_HLSQ_PERFCTR_HLSQ_SEL_5 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_vpc[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_0_LO,
+		A6XX_RBBM_PERFCTR_VPC_0_HI, 40, A6XX_VPC_PERFCTR_VPC_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_1_LO,
+		A6XX_RBBM_PERFCTR_VPC_1_HI, 41, A6XX_VPC_PERFCTR_VPC_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_2_LO,
+		A6XX_RBBM_PERFCTR_VPC_2_HI, 42, A6XX_VPC_PERFCTR_VPC_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_3_LO,
+		A6XX_RBBM_PERFCTR_VPC_3_HI, 43, A6XX_VPC_PERFCTR_VPC_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_4_LO,
+		A6XX_RBBM_PERFCTR_VPC_4_HI, 44, A6XX_VPC_PERFCTR_VPC_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_5_LO,
+		A6XX_RBBM_PERFCTR_VPC_5_HI, 45, A6XX_VPC_PERFCTR_VPC_SEL_5 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_ccu[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_0_LO,
+		A6XX_RBBM_PERFCTR_CCU_0_HI, 46, A6XX_RB_PERFCTR_CCU_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_1_LO,
+		A6XX_RBBM_PERFCTR_CCU_1_HI, 47, A6XX_RB_PERFCTR_CCU_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_2_LO,
+		A6XX_RBBM_PERFCTR_CCU_2_HI, 48, A6XX_RB_PERFCTR_CCU_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_3_LO,
+		A6XX_RBBM_PERFCTR_CCU_3_HI, 49, A6XX_RB_PERFCTR_CCU_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_4_LO,
+		A6XX_RBBM_PERFCTR_CCU_4_HI, 50, A6XX_RB_PERFCTR_CCU_SEL_4 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_tse[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_0_LO,
+		A6XX_RBBM_PERFCTR_TSE_0_HI, 51, A6XX_GRAS_PERFCTR_TSE_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_1_LO,
+		A6XX_RBBM_PERFCTR_TSE_1_HI, 52, A6XX_GRAS_PERFCTR_TSE_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_2_LO,
+		A6XX_RBBM_PERFCTR_TSE_2_HI, 53, A6XX_GRAS_PERFCTR_TSE_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_3_LO,
+		A6XX_RBBM_PERFCTR_TSE_3_HI, 54, A6XX_GRAS_PERFCTR_TSE_SEL_3 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_ras[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_0_LO,
+		A6XX_RBBM_PERFCTR_RAS_0_HI, 55, A6XX_GRAS_PERFCTR_RAS_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_1_LO,
+		A6XX_RBBM_PERFCTR_RAS_1_HI, 56, A6XX_GRAS_PERFCTR_RAS_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_2_LO,
+		A6XX_RBBM_PERFCTR_RAS_2_HI, 57, A6XX_GRAS_PERFCTR_RAS_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_3_LO,
+		A6XX_RBBM_PERFCTR_RAS_3_HI, 58, A6XX_GRAS_PERFCTR_RAS_SEL_3 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_uche[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_0_LO,
+		A6XX_RBBM_PERFCTR_UCHE_0_HI, 59, A6XX_UCHE_PERFCTR_UCHE_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_1_LO,
+		A6XX_RBBM_PERFCTR_UCHE_1_HI, 60, A6XX_UCHE_PERFCTR_UCHE_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_2_LO,
+		A6XX_RBBM_PERFCTR_UCHE_2_HI, 61, A6XX_UCHE_PERFCTR_UCHE_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_3_LO,
+		A6XX_RBBM_PERFCTR_UCHE_3_HI, 62, A6XX_UCHE_PERFCTR_UCHE_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_4_LO,
+		A6XX_RBBM_PERFCTR_UCHE_4_HI, 63, A6XX_UCHE_PERFCTR_UCHE_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_5_LO,
+		A6XX_RBBM_PERFCTR_UCHE_5_HI, 64, A6XX_UCHE_PERFCTR_UCHE_SEL_5 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_6_LO,
+		A6XX_RBBM_PERFCTR_UCHE_6_HI, 65, A6XX_UCHE_PERFCTR_UCHE_SEL_6 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_7_LO,
+		A6XX_RBBM_PERFCTR_UCHE_7_HI, 66, A6XX_UCHE_PERFCTR_UCHE_SEL_7 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_8_LO,
+		A6XX_RBBM_PERFCTR_UCHE_8_HI, 67, A6XX_UCHE_PERFCTR_UCHE_SEL_8 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_9_LO,
+		A6XX_RBBM_PERFCTR_UCHE_9_HI, 68, A6XX_UCHE_PERFCTR_UCHE_SEL_9 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_10_LO,
+		A6XX_RBBM_PERFCTR_UCHE_10_HI, 69,
+					A6XX_UCHE_PERFCTR_UCHE_SEL_10 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_11_LO,
+		A6XX_RBBM_PERFCTR_UCHE_11_HI, 70,
+					A6XX_UCHE_PERFCTR_UCHE_SEL_11 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_tp[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_0_LO,
+		A6XX_RBBM_PERFCTR_TP_0_HI, 71, A6XX_TPL1_PERFCTR_TP_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_1_LO,
+		A6XX_RBBM_PERFCTR_TP_1_HI, 72, A6XX_TPL1_PERFCTR_TP_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_2_LO,
+		A6XX_RBBM_PERFCTR_TP_2_HI, 73, A6XX_TPL1_PERFCTR_TP_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_3_LO,
+		A6XX_RBBM_PERFCTR_TP_3_HI, 74, A6XX_TPL1_PERFCTR_TP_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_4_LO,
+		A6XX_RBBM_PERFCTR_TP_4_HI, 75, A6XX_TPL1_PERFCTR_TP_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_5_LO,
+		A6XX_RBBM_PERFCTR_TP_5_HI, 76, A6XX_TPL1_PERFCTR_TP_SEL_5 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_6_LO,
+		A6XX_RBBM_PERFCTR_TP_6_HI, 77, A6XX_TPL1_PERFCTR_TP_SEL_6 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_7_LO,
+		A6XX_RBBM_PERFCTR_TP_7_HI, 78, A6XX_TPL1_PERFCTR_TP_SEL_7 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_8_LO,
+		A6XX_RBBM_PERFCTR_TP_8_HI, 79, A6XX_TPL1_PERFCTR_TP_SEL_8 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_9_LO,
+		A6XX_RBBM_PERFCTR_TP_9_HI, 80, A6XX_TPL1_PERFCTR_TP_SEL_9 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_10_LO,
+		A6XX_RBBM_PERFCTR_TP_10_HI, 81, A6XX_TPL1_PERFCTR_TP_SEL_10 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_11_LO,
+		A6XX_RBBM_PERFCTR_TP_11_HI, 82, A6XX_TPL1_PERFCTR_TP_SEL_11 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_sp[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_0_LO,
+		A6XX_RBBM_PERFCTR_SP_0_HI, 83, A6XX_SP_PERFCTR_SP_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_1_LO,
+		A6XX_RBBM_PERFCTR_SP_1_HI, 84, A6XX_SP_PERFCTR_SP_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_2_LO,
+		A6XX_RBBM_PERFCTR_SP_2_HI, 85, A6XX_SP_PERFCTR_SP_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_3_LO,
+		A6XX_RBBM_PERFCTR_SP_3_HI, 86, A6XX_SP_PERFCTR_SP_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_4_LO,
+		A6XX_RBBM_PERFCTR_SP_4_HI, 87, A6XX_SP_PERFCTR_SP_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_5_LO,
+		A6XX_RBBM_PERFCTR_SP_5_HI, 88, A6XX_SP_PERFCTR_SP_SEL_5 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_6_LO,
+		A6XX_RBBM_PERFCTR_SP_6_HI, 89, A6XX_SP_PERFCTR_SP_SEL_6 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_7_LO,
+		A6XX_RBBM_PERFCTR_SP_7_HI, 90, A6XX_SP_PERFCTR_SP_SEL_7 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_8_LO,
+		A6XX_RBBM_PERFCTR_SP_8_HI, 91, A6XX_SP_PERFCTR_SP_SEL_8 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_9_LO,
+		A6XX_RBBM_PERFCTR_SP_9_HI, 92, A6XX_SP_PERFCTR_SP_SEL_9 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_10_LO,
+		A6XX_RBBM_PERFCTR_SP_10_HI, 93, A6XX_SP_PERFCTR_SP_SEL_10 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_11_LO,
+		A6XX_RBBM_PERFCTR_SP_11_HI, 94, A6XX_SP_PERFCTR_SP_SEL_11 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_12_LO,
+		A6XX_RBBM_PERFCTR_SP_12_HI, 95, A6XX_SP_PERFCTR_SP_SEL_12 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_13_LO,
+		A6XX_RBBM_PERFCTR_SP_13_HI, 96, A6XX_SP_PERFCTR_SP_SEL_13 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_14_LO,
+		A6XX_RBBM_PERFCTR_SP_14_HI, 97, A6XX_SP_PERFCTR_SP_SEL_14 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_15_LO,
+		A6XX_RBBM_PERFCTR_SP_15_HI, 98, A6XX_SP_PERFCTR_SP_SEL_15 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_16_LO,
+		A6XX_RBBM_PERFCTR_SP_16_HI, 99, A6XX_SP_PERFCTR_SP_SEL_16 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_17_LO,
+		A6XX_RBBM_PERFCTR_SP_17_HI, 100, A6XX_SP_PERFCTR_SP_SEL_17 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_18_LO,
+		A6XX_RBBM_PERFCTR_SP_18_HI, 101, A6XX_SP_PERFCTR_SP_SEL_18 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_19_LO,
+		A6XX_RBBM_PERFCTR_SP_19_HI, 102, A6XX_SP_PERFCTR_SP_SEL_19 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_20_LO,
+		A6XX_RBBM_PERFCTR_SP_20_HI, 103, A6XX_SP_PERFCTR_SP_SEL_20 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_21_LO,
+		A6XX_RBBM_PERFCTR_SP_21_HI, 104, A6XX_SP_PERFCTR_SP_SEL_21 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_22_LO,
+		A6XX_RBBM_PERFCTR_SP_22_HI, 105, A6XX_SP_PERFCTR_SP_SEL_22 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_23_LO,
+		A6XX_RBBM_PERFCTR_SP_23_HI, 106, A6XX_SP_PERFCTR_SP_SEL_23 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_rb[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_0_LO,
+		A6XX_RBBM_PERFCTR_RB_0_HI, 107, A6XX_RB_PERFCTR_RB_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_1_LO,
+		A6XX_RBBM_PERFCTR_RB_1_HI, 108, A6XX_RB_PERFCTR_RB_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_2_LO,
+		A6XX_RBBM_PERFCTR_RB_2_HI, 109, A6XX_RB_PERFCTR_RB_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_3_LO,
+		A6XX_RBBM_PERFCTR_RB_3_HI, 110, A6XX_RB_PERFCTR_RB_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_4_LO,
+		A6XX_RBBM_PERFCTR_RB_4_HI, 111, A6XX_RB_PERFCTR_RB_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_5_LO,
+		A6XX_RBBM_PERFCTR_RB_5_HI, 112, A6XX_RB_PERFCTR_RB_SEL_5 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_6_LO,
+		A6XX_RBBM_PERFCTR_RB_6_HI, 113, A6XX_RB_PERFCTR_RB_SEL_6 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_7_LO,
+		A6XX_RBBM_PERFCTR_RB_7_HI, 114, A6XX_RB_PERFCTR_RB_SEL_7 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_vsc[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VSC_0_LO,
+		A6XX_RBBM_PERFCTR_VSC_0_HI, 115, A6XX_VSC_PERFCTR_VSC_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VSC_1_LO,
+		A6XX_RBBM_PERFCTR_VSC_1_HI, 116, A6XX_VSC_PERFCTR_VSC_SEL_1 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_lrz[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_0_LO,
+		A6XX_RBBM_PERFCTR_LRZ_0_HI, 117, A6XX_GRAS_PERFCTR_LRZ_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_1_LO,
+		A6XX_RBBM_PERFCTR_LRZ_1_HI, 118, A6XX_GRAS_PERFCTR_LRZ_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_2_LO,
+		A6XX_RBBM_PERFCTR_LRZ_2_HI, 119, A6XX_GRAS_PERFCTR_LRZ_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_3_LO,
+		A6XX_RBBM_PERFCTR_LRZ_3_HI, 120, A6XX_GRAS_PERFCTR_LRZ_SEL_3 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_cmp[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_0_LO,
+		A6XX_RBBM_PERFCTR_CMP_0_HI, 121, A6XX_RB_PERFCTR_CMP_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_1_LO,
+		A6XX_RBBM_PERFCTR_CMP_1_HI, 122, A6XX_RB_PERFCTR_CMP_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_2_LO,
+		A6XX_RBBM_PERFCTR_CMP_2_HI, 123, A6XX_RB_PERFCTR_CMP_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_3_LO,
+		A6XX_RBBM_PERFCTR_CMP_3_HI, 124, A6XX_RB_PERFCTR_CMP_SEL_3 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_vbif[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW0,
+		A6XX_VBIF_PERF_CNT_HIGH0, -1, A6XX_VBIF_PERF_CNT_SEL0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW1,
+		A6XX_VBIF_PERF_CNT_HIGH1, -1, A6XX_VBIF_PERF_CNT_SEL1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW2,
+		A6XX_VBIF_PERF_CNT_HIGH2, -1, A6XX_VBIF_PERF_CNT_SEL2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW3,
+		A6XX_VBIF_PERF_CNT_HIGH3, -1, A6XX_VBIF_PERF_CNT_SEL3 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_vbif_pwr[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW0,
+		A6XX_VBIF_PERF_PWR_CNT_HIGH0, -1, A6XX_VBIF_PERF_PWR_CNT_EN0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW1,
+		A6XX_VBIF_PERF_PWR_CNT_HIGH1, -1, A6XX_VBIF_PERF_PWR_CNT_EN1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW2,
+		A6XX_VBIF_PERF_PWR_CNT_HIGH2, -1, A6XX_VBIF_PERF_PWR_CNT_EN2 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_alwayson[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_CP_ALWAYS_ON_COUNTER_LO,
+		A6XX_CP_ALWAYS_ON_COUNTER_HI, -1 },
+};
+
+#define A6XX_PERFCOUNTER_GROUP(offset, name) \
+	ADRENO_PERFCOUNTER_GROUP(a6xx, offset, name)
+
+#define A6XX_PERFCOUNTER_GROUP_FLAGS(offset, name, flags) \
+	ADRENO_PERFCOUNTER_GROUP_FLAGS(a6xx, offset, name, flags)
+
+static struct adreno_perfcount_group a6xx_perfcounter_groups
+				[KGSL_PERFCOUNTER_GROUP_MAX] = {
+	A6XX_PERFCOUNTER_GROUP(CP, cp),
+	A6XX_PERFCOUNTER_GROUP(RBBM, rbbm),
+	A6XX_PERFCOUNTER_GROUP(PC, pc),
+	A6XX_PERFCOUNTER_GROUP(VFD, vfd),
+	A6XX_PERFCOUNTER_GROUP(HLSQ, hlsq),
+	A6XX_PERFCOUNTER_GROUP(VPC, vpc),
+	A6XX_PERFCOUNTER_GROUP(CCU, ccu),
+	A6XX_PERFCOUNTER_GROUP(CMP, cmp),
+	A6XX_PERFCOUNTER_GROUP(TSE, tse),
+	A6XX_PERFCOUNTER_GROUP(RAS, ras),
+	A6XX_PERFCOUNTER_GROUP(LRZ, lrz),
+	A6XX_PERFCOUNTER_GROUP(UCHE, uche),
+	A6XX_PERFCOUNTER_GROUP(TP, tp),
+	A6XX_PERFCOUNTER_GROUP(SP, sp),
+	A6XX_PERFCOUNTER_GROUP(RB, rb),
+	A6XX_PERFCOUNTER_GROUP(VSC, vsc),
+	A6XX_PERFCOUNTER_GROUP(VBIF, vbif),
+	A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, vbif_pwr,
+		ADRENO_PERFCOUNTER_GROUP_FIXED),
+	A6XX_PERFCOUNTER_GROUP_FLAGS(ALWAYSON, alwayson,
+		ADRENO_PERFCOUNTER_GROUP_FIXED),
+};
+
+static struct adreno_perfcounters a6xx_perfcounters = {
+	a6xx_perfcounter_groups,
+	ARRAY_SIZE(a6xx_perfcounter_groups),
+};
+
 /* Register offset defines for A6XX, in order of enum adreno_regs */
 static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
 
@@ -1562,6 +2016,15 @@
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_DATA, A6XX_CP_ROQ_DBG_DATA),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A6XX_RBBM_STATUS),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS3, A6XX_RBBM_STATUS3),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_CTL, A6XX_RBBM_PERFCTR_CNTL),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD0,
+					A6XX_RBBM_PERFCTR_LOAD_CMD0),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD1,
+					A6XX_RBBM_PERFCTR_LOAD_CMD1),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD2,
+					A6XX_RBBM_PERFCTR_LOAD_CMD2),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD3,
+					A6XX_RBBM_PERFCTR_LOAD_CMD3),
 
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_MASK, A6XX_RBBM_INT_0_MASK),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_STATUS, A6XX_RBBM_INT_0_STATUS),
@@ -1573,6 +2036,10 @@
 					  A6XX_RBBM_BLOCK_SW_RESET_CMD),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD2,
 					  A6XX_RBBM_BLOCK_SW_RESET_CMD2),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
+				A6XX_RBBM_PERFCTR_LOAD_VALUE_LO),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
+				A6XX_RBBM_PERFCTR_LOAD_VALUE_HI),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
 				A6XX_CP_ALWAYS_ON_COUNTER_LO),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
@@ -1612,6 +2079,17 @@
 				A6XX_GMU_HOST2GMU_INTR_CLR),
 	ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_RAW_INFO,
 				A6XX_GMU_HOST2GMU_INTR_RAW_INFO),
+
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TRUST_CONTROL,
+				A6XX_RBBM_SECVID_TRUST_CNTL),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
+				A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
+				A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
+				A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_CONTROL,
+				A6XX_RBBM_SECVID_TSB_CNTL),
 };
 
 static const struct adreno_reg_offsets a6xx_reg_offsets = {
@@ -1632,6 +2110,7 @@
 	.rb_start = a6xx_rb_start,
 	.regulator_enable = a6xx_sptprac_enable,
 	.regulator_disable = a6xx_sptprac_disable,
+	.perfcounters = &a6xx_perfcounters,
 	.microcode_read = a6xx_microcode_read,
 	.enable_64bit = a6xx_enable_64bit,
 	.llc_configure_gpu_scid = a6xx_llc_configure_gpu_scid,
@@ -1640,5 +2119,7 @@
 	.oob_set = a6xx_oob_set,
 	.oob_clear = a6xx_oob_clear,
 	.rpmh_gpu_pwrctrl = a6xx_rpmh_gpu_pwrctrl,
-	.gmu_isidle = a6xx_gmu_isidle,
+	.hw_isidle = a6xx_hw_isidle, /* Replaced by NULL if GMU is disabled */
+	.wait_for_gmu_idle = a6xx_wait_for_gmu_idle,
+	.iommu_fault_block = a6xx_iommu_fault_block,
 };
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index e82975e..e501a68 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -18,7 +18,7 @@
 #include "adreno_snapshot.h"
 #include "a6xx_reg.h"
 #include "adreno_a6xx.h"
-
+#include "kgsl_gmu.h"
 
 #define A6XX_NUM_CTXTS 2
 
@@ -202,6 +202,11 @@
 	0x3410, 0x3410, 0x3800, 0x3801,
 };
 
+static const unsigned int a6xx_gmu_registers[] = {
+	/* GMU */
+	0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
+};
+
 static const struct adreno_vbif_snapshot_registers
 a6xx_vbif_snapshot_registers[] = {
 	{ 0x20040000, 0xFF000000, a6xx_vbif_ver_20xxxxxx_registers,
@@ -337,6 +342,13 @@
 	{ A6XX_DBGBUS_TPL1_3, 0x100, },
 };
 
+static void __iomem *a6xx_cx_dbgc;
+static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
+	{ A6XX_DBGBUS_VBIF, 0x100, },
+	{ A6XX_DBGBUS_GMU, 0x100, },
+	{ A6XX_DBGBUS_CX, 0x100, },
+};
+
 #define A6XX_NUM_SHADER_BANKS 3
 #define A6XX_SHADER_STATETYPE_SHIFT 8
 
@@ -899,6 +911,100 @@
 	return size;
 }
 
+static void _cx_dbgc_regread(unsigned int offsetwords, unsigned int *value)
+{
+	void __iomem *reg;
+
+	if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
+		(offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
+		"Read beyond CX_DBGC block: 0x%x\n", offsetwords))
+		return;
+
+	reg = a6xx_cx_dbgc +
+		((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
+	*value = __raw_readl(reg);
+
+	/*
+	 * ensure this read finishes before the next one.
+	 * i.e. act like normal readl()
+	 */
+	rmb();
+}
+
+static void _cx_dbgc_regwrite(unsigned int offsetwords, unsigned int value)
+{
+	void __iomem *reg;
+
+	if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
+		(offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
+		"Write beyond CX_DBGC block: 0x%x\n", offsetwords))
+		return;
+
+	reg = a6xx_cx_dbgc +
+		((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
+
+	/*
+	 * ensure previous writes post before this one,
+	 * i.e. act like normal writel()
+	 */
+	wmb();
+	__raw_writel(value, reg);
+}
+
+/* a6xx_cx_dbgc_debug_bus_read() - Read data from trace bus */
+static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
+	unsigned int block_id, unsigned int index, unsigned int *val)
+{
+	unsigned int reg;
+
+	reg = (block_id << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
+			(index << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
+
+	_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
+	_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
+	_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
+	_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
+
+	_cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
+	val++;
+	_cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
+}
+
+/*
+ * a6xx_snapshot_cx_dbgc_debugbus_block() - Capture debug data for a gpu
+ * block from the CX DBGC block
+ */
+static size_t a6xx_snapshot_cx_dbgc_debugbus_block(struct kgsl_device *device,
+	u8 *buf, size_t remain, void *priv)
+{
+	struct kgsl_snapshot_debugbus *header =
+		(struct kgsl_snapshot_debugbus *)buf;
+	struct adreno_debugbus_block *block = priv;
+	int i;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	unsigned int dwords;
+	size_t size;
+
+	dwords = block->dwords;
+
+	/* For a6xx each debug bus data unit is 2 DWRODS */
+	size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
+
+	if (remain < size) {
+		SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
+		return 0;
+	}
+
+	header->id = block->block_id;
+	header->count = dwords * 2;
+
+	for (i = 0; i < dwords; i++)
+		a6xx_cx_debug_bus_read(device, block->block_id, i,
+					&data[i*2]);
+
+	return size;
+}
+
 /* a6xx_snapshot_debugbus() - Capture debug bus data */
 static void a6xx_snapshot_debugbus(struct kgsl_device *device,
 		struct kgsl_snapshot *snapshot)
@@ -942,12 +1048,144 @@
 	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
 	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
 
+	a6xx_cx_dbgc = ioremap(device->reg_phys +
+			(A6XX_CX_DBGC_CFG_DBGBUS_SEL_A << 2),
+			(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 -
+				A6XX_CX_DBGC_CFG_DBGBUS_SEL_A + 1) << 2);
+
+	if (a6xx_cx_dbgc) {
+		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
+		(0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
+		(0x4 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
+		(0x20 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
+
+		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
+			0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
+
+		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+
+		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
+			(0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
+			(1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
+			(2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
+			(3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
+			(4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
+			(5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
+			(6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
+			(7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
+		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
+			(8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
+			(9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
+			(10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
+			(11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
+			(12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
+			(13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
+			(14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
+			(15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
+
+		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+	} else
+		KGSL_DRV_ERR(device, "Unable to ioremap CX_DBGC_CFG block\n");
+
 	for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
 		kgsl_snapshot_add_section(device,
 			KGSL_SNAPSHOT_SECTION_DEBUGBUS,
 			snapshot, a6xx_snapshot_dbgc_debugbus_block,
 			(void *) &a6xx_dbgc_debugbus_blocks[i]);
 	}
+
+	if (a6xx_cx_dbgc) {
+		for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
+			kgsl_snapshot_add_section(device,
+				KGSL_SNAPSHOT_SECTION_DEBUGBUS,
+				snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
+				(void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
+		}
+		iounmap(a6xx_cx_dbgc);
+	}
+}
+
+static size_t a6xx_snapshot_dump_gmu_registers(struct kgsl_device *device,
+		u8 *buf, size_t remain, void *priv)
+{
+	struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
+	struct kgsl_snapshot_registers *regs = priv;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	int count = 0, j, k;
+
+	/* Figure out how many registers we are going to dump */
+	for (j = 0; j < regs->count; j++) {
+		int start = regs->regs[j * 2];
+		int end = regs->regs[j * 2 + 1];
+
+		count += (end - start + 1);
+	}
+
+	if (remain < (count * 8) + sizeof(*header)) {
+		SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+		return 0;
+	}
+
+	for (j = 0; j < regs->count; j++) {
+		unsigned int start = regs->regs[j * 2];
+		unsigned int end = regs->regs[j * 2 + 1];
+
+		for (k = start; k <= end; k++) {
+			unsigned int val;
+
+			kgsl_gmu_regread(device, k, &val);
+			*data++ = k;
+			*data++ = val;
+		}
+	}
+
+	header->count = count;
+
+	/* Return the size of the section */
+	return (count * 8) + sizeof(*header);
+}
+
+static void a6xx_snapshot_gmu(struct kgsl_device *device,
+		struct kgsl_snapshot *snapshot)
+{
+	struct kgsl_snapshot_registers gmu_regs = {
+		.regs = a6xx_gmu_registers,
+		.count = ARRAY_SIZE(a6xx_gmu_registers) / 2,
+	};
+
+	if (!kgsl_gmu_isenabled(device))
+		return;
+
+	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
+			snapshot, a6xx_snapshot_dump_gmu_registers, &gmu_regs);
+}
+
+/* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
+static size_t a6xx_snapshot_sqe(struct kgsl_device *device, u8 *buf,
+		size_t remain, void *priv)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
+
+	if (remain < DEBUG_SECTION_SZ(1)) {
+		SNAPSHOT_ERR_NOMEM(device, "SQE VERSION DEBUG");
+		return 0;
+	}
+
+	/* Dump the SQE firmware version */
+	header->type = SNAPSHOT_DEBUG_SQE_VERSION;
+	header->size = 1;
+	*data = fw->version;
+
+	return DEBUG_SECTION_SZ(1);
 }
 
 static void _a6xx_do_crashdump(struct kgsl_device *device)
@@ -1039,6 +1277,10 @@
 		snapshot, adreno_snapshot_cp_roq,
 		&snap_data->sect_sizes->roq);
 
+	/* SQE Firmware */
+	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
+		snapshot, a6xx_snapshot_sqe, NULL);
+
 	/* Mempool debug data */
 	a6xx_snapshot_mempool(device, snapshot);
 
@@ -1052,6 +1294,9 @@
 	a6xx_snapshot_dbgahb_regs(device, snapshot);
 
 	a6xx_snapshot_debugbus(device, snapshot);
+
+	/* GMU TCM data dumped through AHB */
+	a6xx_snapshot_gmu(device, snapshot);
 }
 
 static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
diff --git a/drivers/gpu/msm/adreno_compat.c b/drivers/gpu/msm/adreno_compat.c
index d86a0c6..5a8d587 100644
--- a/drivers/gpu/msm/adreno_compat.c
+++ b/drivers/gpu/msm/adreno_compat.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -113,6 +113,30 @@
 			status = 0;
 		}
 		break;
+	case KGSL_PROP_DEVICE_QTIMER:
+		{
+			struct kgsl_qtimer_prop qtimerprop = {0};
+			struct kgsl_memdesc *qtimer_desc =
+				kgsl_mmu_get_qtimer_global_entry(device);
+
+			if (sizebytes != sizeof(qtimerprop)) {
+				status = -EINVAL;
+				break;
+			}
+
+			if (qtimer_desc) {
+				qtimerprop.gpuaddr = qtimer_desc->gpuaddr;
+				qtimerprop.size = qtimer_desc->size;
+			}
+
+			if (copy_to_user(value, &qtimerprop,
+						sizeof(qtimerprop))) {
+				status = -EFAULT;
+				break;
+			}
+			status = 0;
+		}
+		break;
 	default:
 		/*
 		 * Call the adreno_getproperty to check if the property type
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 3fa38fa..ed5b714 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -208,6 +208,9 @@
 	if (!kgsl_state_is_awake(KGSL_DEVICE(adreno_dev)))
 		goto ret;
 
+	if (adreno_rb_empty(adreno_dev->cur_rb))
+		goto ret;
+
 	/* only check rbbm status to determine if GPU is idle */
 	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS, &reg_rbbm_status);
 
@@ -2055,12 +2058,25 @@
 		return 0;
 
 	/*
-	 * On A5xx, read RBBM_STATUS3:SMMU_STALLED_ON_FAULT (BIT 24) to
-	 * tell if this function was entered after a pagefault. If so, only
+	 * In the very unlikely case that the power is off, do nothing - the
+	 * state will be reset on power up and everybody will be happy
+	 */
+
+	if (!kgsl_state_is_awake(device) && (fault & ADRENO_SOFT_FAULT)) {
+		/* Clear the existing register values */
+		memset(adreno_ft_regs_val, 0,
+				adreno_ft_regs_num * sizeof(unsigned int));
+		return 0;
+	}
+
+	/*
+	 * On A5xx and A6xx, read RBBM_STATUS3:SMMU_STALLED_ON_FAULT (BIT 24)
+	 * to tell if this function was entered after a pagefault. If so, only
 	 * proceed if the fault handler has already run in the IRQ thread,
 	 * else return early to give the fault handler a chance to run.
 	 */
-	if (!(fault & ADRENO_IOMMU_PAGE_FAULT) && adreno_is_a5xx(adreno_dev)) {
+	if (!(fault & ADRENO_IOMMU_PAGE_FAULT) &&
+		(adreno_is_a5xx(adreno_dev) || adreno_is_a6xx(adreno_dev))) {
 		unsigned int val;
 
 		mutex_lock(&device->mutex);
@@ -2086,7 +2102,7 @@
 	 */
 	if (!(fault & ADRENO_HARD_FAULT)) {
 		adreno_readreg(adreno_dev, ADRENO_REG_CP_ME_CNTL, &reg);
-		if (adreno_is_a5xx(adreno_dev))
+		if (adreno_is_a5xx(adreno_dev) || adreno_is_a6xx(adreno_dev))
 			reg |= 1 | (1 << 1);
 		else
 			reg |= (1 << 27) | (1 << 28);
@@ -2508,7 +2524,7 @@
 	if (!fault_detect_read_compare(adreno_dev)) {
 		adreno_set_gpu_fault(adreno_dev, ADRENO_SOFT_FAULT);
 		adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
-	} else {
+	} else if (dispatcher->inflight > 0) {
 		mod_timer(&dispatcher->fault_timer,
 			jiffies + msecs_to_jiffies(_fault_timer_interval));
 	}
@@ -2553,6 +2569,20 @@
 }
 
 /**
+ * adreno_dispatcher_stop_fault_timer() - stop the dispatcher fault timer
+ * @device: pointer to the KGSL device structure
+ *
+ * Stop the dispatcher fault timer
+ */
+void adreno_dispatcher_stop_fault_timer(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+	del_timer_sync(&dispatcher->fault_timer);
+}
+
+/**
  * adreno_dispatcher_close() - close the dispatcher
  * @adreno_dev: pointer to the adreno device structure
  *
diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h
index cb9106f..72545db 100644
--- a/drivers/gpu/msm/adreno_dispatch.h
+++ b/drivers/gpu/msm/adreno_dispatch.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -108,6 +108,7 @@
 int adreno_dispatcher_idle(struct adreno_device *adreno_dev);
 void adreno_dispatcher_irq_fault(struct adreno_device *adreno_dev);
 void adreno_dispatcher_stop(struct adreno_device *adreno_dev);
+void adreno_dispatcher_stop_fault_timer(struct kgsl_device *device);
 
 int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv,
 		struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 78182b7..32175f5 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -796,10 +796,10 @@
 		dwords += 6;
 
 		/*
-		 * REG_TO_MEM packet on A5xx needs another ordinal.
+		 * REG_TO_MEM packet on A5xx and above needs another ordinal.
 		 * Add 2 more dwords since we do profiling before and after.
 		 */
-		if (adreno_is_a5xx(adreno_dev))
+		if (!ADRENO_LEGACY_PM4(adreno_dev))
 			dwords += 2;
 
 		/*
@@ -816,7 +816,7 @@
 	if (test_bit(CMDOBJ_PROFILE, &cmdobj->priv)) {
 		kernel_profiling = true;
 		dwords += 6;
-		if (adreno_is_a5xx(adreno_dev))
+		if (!ADRENO_LEGACY_PM4(adreno_dev))
 			dwords += 2;
 	}
 
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index b4725c1..bf31c00 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -178,6 +178,7 @@
 		const char *name, struct clk *clk);
 	void (*gpu_model)(struct kgsl_device *device, char *str,
 		size_t bufsz);
+	void (*stop_fault_timer)(struct kgsl_device *device);
 };
 
 struct kgsl_ioctl {
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 49630e6..2e9f108 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -1086,7 +1086,7 @@
 
 	hfi_init(&gmu->hfi, mem_addr, HFI_QUEUE_SIZE);
 
-	gmu->idle_level = GPU_HW_CGC;
+	gmu->idle_level = GPU_HW_ACTIVE;
 
 	return 0;
 
@@ -1312,7 +1312,11 @@
 	if (!test_bit(GMU_CLK_ON, &gmu->flags))
 		return;
 
-	/* TODO: Check for conditions to enter slumber */
+	if (gpudev->wait_for_gmu_idle &&
+		!gpudev->wait_for_gmu_idle(adreno_dev)) {
+		dev_err(&gmu->pdev->dev, "Failure to stop gmu");
+		return;
+	}
 
 	gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_NOTIFY_SLUMBER, 0, 0);
 
@@ -1345,12 +1349,19 @@
 
 	gmu_stop(device);
 
-	disable_irq(gmu->gmu_interrupt_num);
-	disable_irq(hfi->hfi_interrupt_num);
-	devm_free_irq(&gmu->pdev->dev,
-			gmu->gmu_interrupt_num, gmu);
-	devm_free_irq(&gmu->pdev->dev,
-			hfi->hfi_interrupt_num, gmu);
+	if (gmu->gmu_interrupt_num) {
+		disable_irq(gmu->gmu_interrupt_num);
+		devm_free_irq(&gmu->pdev->dev,
+				gmu->gmu_interrupt_num, gmu);
+		gmu->gmu_interrupt_num = 0;
+	}
+
+	if (hfi->hfi_interrupt_num) {
+		disable_irq(hfi->hfi_interrupt_num);
+		devm_free_irq(&gmu->pdev->dev,
+				hfi->hfi_interrupt_num, gmu);
+		hfi->hfi_interrupt_num = 0;
+	}
 
 	if (gmu->ccl) {
 		msm_bus_scale_unregister_client(gmu->ccl);
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index b5c0c96..ac2c151 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -53,11 +53,9 @@
 				CX_VOTE_ENABLE		| \
 				GFX_VOTE_ENABLE)
 
-/* Bitmask for GMU idle status check */
-#define CXGX_CPUBUSY_IGNAHB_IDLE	BIT(30)
-#define GPUBUSY_IGNAHB_IDLE		BIT(23)
-#define SLUMBER_CHECK_MASK		(CXGX_CPUBUSY_IGNAHB_IDLE  | \
-					GPUBUSY_IGNAHB_IDLE)
+/* Bitmask for GPU idle status check */
+#define GPUBUSYIGNAHB		BIT(23)
+#define CXGXCPUBUSYIGNAHB	BIT(30)
 
 /* Constants for GMU OOBs */
 #define OOB_BOOT_OPTION         0
@@ -143,12 +141,13 @@
 };
 
 enum gpu_idle_level {
-	GPU_HW_ACTIVE,
-	GPU_HW_CGC,
-	GPU_HW_SPTP_PC,
-	GPU_HW_IFPC,
-	GPU_HW_NAP,
-	GPU_HW_MIN_VOLT,
+	GPU_HW_ACTIVE = 0x0,
+	GPU_HW_SPTP_PC = 0x2,
+	GPU_HW_IFPC = 0x3,
+	GPU_HW_NAP = 0x4,
+	GPU_HW_MIN_VOLT = 0x5,
+	GPU_HW_MIN_DDR = 0x6,
+	GPU_HW_SLUMBER = 0xF
 };
 
 /**
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index cfd5cd1..86d4d61 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -113,6 +113,7 @@
 static int global_pt_count;
 uint64_t global_pt_alloc;
 static struct kgsl_memdesc gpu_qdss_desc;
+static struct kgsl_memdesc gpu_qtimer_desc;
 
 void kgsl_print_global_pt_entries(struct seq_file *s)
 {
@@ -272,6 +273,50 @@
 	kgsl_sharedmem_free(&gpu_qdss_desc);
 }
 
+struct kgsl_memdesc *kgsl_iommu_get_qtimer_global_entry(void)
+{
+	return &gpu_qtimer_desc;
+}
+
+static void kgsl_setup_qtimer_desc(struct kgsl_device *device)
+{
+	int result = 0;
+	uint32_t gpu_qtimer_entry[2];
+
+	if (!of_find_property(device->pdev->dev.of_node,
+		"qcom,gpu-qtimer", NULL))
+		return;
+
+	if (of_property_read_u32_array(device->pdev->dev.of_node,
+				"qcom,gpu-qtimer", gpu_qtimer_entry, 2)) {
+		KGSL_CORE_ERR("Failed to read gpu qtimer dts entry\n");
+		return;
+	}
+
+	gpu_qtimer_desc.flags = 0;
+	gpu_qtimer_desc.priv = 0;
+	gpu_qtimer_desc.physaddr = gpu_qtimer_entry[0];
+	gpu_qtimer_desc.size = gpu_qtimer_entry[1];
+	gpu_qtimer_desc.pagetable = NULL;
+	gpu_qtimer_desc.ops = NULL;
+	gpu_qtimer_desc.dev = device->dev->parent;
+	gpu_qtimer_desc.hostptr = NULL;
+
+	result = memdesc_sg_dma(&gpu_qtimer_desc, gpu_qtimer_desc.physaddr,
+			gpu_qtimer_desc.size);
+	if (result) {
+		KGSL_CORE_ERR("memdesc_sg_dma failed: %d\n", result);
+		return;
+	}
+
+	kgsl_mmu_add_global(device, &gpu_qtimer_desc, "gpu-qtimer");
+}
+
+static inline void kgsl_cleanup_qtimer_desc(struct kgsl_mmu *mmu)
+{
+	kgsl_iommu_remove_global(mmu, &gpu_qtimer_desc);
+	kgsl_sharedmem_free(&gpu_qtimer_desc);
+}
 
 static inline void _iommu_sync_mmu_pc(bool lock)
 {
@@ -752,6 +797,7 @@
 	int write;
 	struct kgsl_device *device;
 	struct adreno_device *adreno_dev;
+	struct adreno_gpudev *gpudev;
 	unsigned int no_page_fault_log = 0;
 	unsigned int curr_context_id = 0;
 	struct kgsl_context *context;
@@ -768,6 +814,7 @@
 	ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
 	device = KGSL_MMU_DEVICE(mmu);
 	adreno_dev = ADRENO_DEVICE(device);
+	gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 
 	if (pt->name == KGSL_MMU_SECURE_PT)
 		ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
@@ -841,6 +888,16 @@
 			ctx->name, ptbase, contextidr,
 			write ? "write" : "read", fault_type);
 
+		if (gpudev->iommu_fault_block) {
+			unsigned int fsynr1;
+
+			fsynr1 = KGSL_IOMMU_GET_CTX_REG(ctx, FSYNR1);
+			KGSL_MEM_CRIT(ctx->kgsldev,
+				"FAULTING BLOCK: %s\n",
+				gpudev->iommu_fault_block(adreno_dev,
+								fsynr1));
+		}
+
 		/* Don't print the debug if this is a permissions fault */
 		if (!(flags & IOMMU_FAULT_PERMISSION)) {
 			_check_if_freed(ctx, addr, ptname);
@@ -1452,6 +1509,7 @@
 	kgsl_iommu_remove_global(mmu, &iommu->setstate);
 	kgsl_sharedmem_free(&iommu->setstate);
 	kgsl_cleanup_qdss_desc(mmu);
+	kgsl_cleanup_qtimer_desc(mmu);
 }
 
 static int _setstate_alloc(struct kgsl_device *device,
@@ -1523,6 +1581,7 @@
 
 	kgsl_iommu_add_global(mmu, &iommu->setstate, "setstate");
 	kgsl_setup_qdss_desc(device);
+	kgsl_setup_qtimer_desc(device);
 
 done:
 	if (status)
@@ -2671,6 +2730,7 @@
 	.mmu_remove_global = kgsl_iommu_remove_global,
 	.mmu_getpagetable = kgsl_iommu_getpagetable,
 	.mmu_get_qdss_global_entry = kgsl_iommu_get_qdss_global_entry,
+	.mmu_get_qtimer_global_entry = kgsl_iommu_get_qtimer_global_entry,
 	.probe = kgsl_iommu_probe,
 };
 
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 9e516e1..8ea4492 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -617,6 +617,18 @@
 }
 EXPORT_SYMBOL(kgsl_mmu_get_qdss_global_entry);
 
+struct kgsl_memdesc *kgsl_mmu_get_qtimer_global_entry(
+		struct kgsl_device *device)
+{
+	struct kgsl_mmu *mmu = &device->mmu;
+
+	if (MMU_OP_VALID(mmu, mmu_get_qtimer_global_entry))
+		return mmu->mmu_ops->mmu_get_qtimer_global_entry();
+
+	return NULL;
+}
+EXPORT_SYMBOL(kgsl_mmu_get_qtimer_global_entry);
+
 /*
  * NOMMU definitions - NOMMU really just means that the MMU is kept in pass
  * through and the GPU directly accesses physical memory. Used in debug mode
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 0f9f486..56bb317 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -83,6 +83,7 @@
 	struct kgsl_pagetable * (*mmu_getpagetable)(struct kgsl_mmu *mmu,
 			unsigned long name);
 	struct kgsl_memdesc* (*mmu_get_qdss_global_entry)(void);
+	struct kgsl_memdesc* (*mmu_get_qtimer_global_entry)(void);
 };
 
 struct kgsl_mmu_pt_ops {
@@ -233,6 +234,9 @@
 
 struct kgsl_memdesc *kgsl_mmu_get_qdss_global_entry(struct kgsl_device *device);
 
+struct kgsl_memdesc *kgsl_mmu_get_qtimer_global_entry(
+		struct kgsl_device *device);
+
 int kgsl_mmu_sparse_dummy_map(struct kgsl_pagetable *pagetable,
 		struct kgsl_memdesc *memdesc, uint64_t offset, uint64_t size);
 
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index b0e9292..b3e2b6a 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -26,6 +26,7 @@
 #include "kgsl.h"
 #include "kgsl_pwrscale.h"
 #include "kgsl_device.h"
+#include "kgsl_gmu.h"
 #include "kgsl_trace.h"
 
 #define KGSL_PWRFLAGS_POWER_ON 0
@@ -65,7 +66,8 @@
 	"alwayson_clk",
 	"isense_clk",
 	"rbcpr_clk",
-	"iref_clk"
+	"iref_clk",
+	"gmu_clk"
 };
 
 static unsigned int ib_votes[KGSL_MAX_BUSLEVELS];
@@ -214,6 +216,69 @@
 #endif
 
 /**
+ * kgsl_bus_scale_request() - set GPU BW vote
+ * @device: Pointer to the kgsl_device struct
+ * @buslevel: index of bw vector[] table
+ */
+static int kgsl_bus_scale_request(struct kgsl_device *device,
+		unsigned int buslevel)
+{
+	struct gmu_device *gmu = &device->gmu;
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	int ret;
+
+	/* GMU scales BW */
+	if (kgsl_gmu_isenabled(device)) {
+		if (!(gmu->flags & GMU_HFI_ON))
+			return 0;
+
+		ret = gmu_dcvs_set(gmu, INVALID_DCVS_IDX, buslevel);
+	} else {
+		/* Linux bus driver scales BW */
+		ret = msm_bus_scale_client_update_request(pwr->pcl, buslevel);
+	}
+
+	if (ret)
+		KGSL_PWR_ERR(device, "GPU BW scaling failure\n");
+
+	return ret;
+}
+
+/**
+ * kgsl_clk_set_rate() - set GPU clock rate
+ * @device: Pointer to the kgsl_device struct
+ * @pwrlevel: power level in pwrlevels[] table
+ */
+static int kgsl_clk_set_rate(struct kgsl_device *device,
+		unsigned int pwrlevel)
+{
+	struct gmu_device *gmu = &device->gmu;
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	int ret = 0;
+
+	/* GMU scales GPU freq */
+	if (kgsl_gmu_isenabled(device)) {
+		/* If GMU has not been started, save it */
+		if (!(gmu->flags & GMU_HFI_ON)) {
+			gmu->wakeup_pwrlevel = pwrlevel;
+			return 0;
+		}
+
+		ret = gmu_dcvs_set(gmu, pwrlevel, INVALID_DCVS_IDX);
+	} else {
+		/* Linux clock driver scales GPU freq */
+		struct kgsl_pwrlevel *Pl = &pwr->pwrlevels[pwrlevel];
+
+		ret = clk_set_rate(pwr->grp_clks[0], Pl->gpu_freq);
+	}
+
+	if (ret)
+		KGSL_PWR_ERR(device, "GPU clk freq set failure\n");
+
+	return ret;
+}
+
+/**
  * kgsl_pwrctrl_buslevel_update() - Recalculate the bus vote and send it
  * @device: Pointer to the kgsl_device struct
  * @on: true for setting and active bus vote, false to turn off the vote
@@ -259,7 +324,7 @@
 
 	/* vote for bus if gpubw-dev support is not enabled */
 	if (pwr->pcl)
-		msm_bus_scale_client_update_request(pwr->pcl, buslevel);
+		kgsl_bus_scale_request(device, buslevel);
 
 	kgsl_pwrctrl_vbif_update(ab);
 }
@@ -388,7 +453,7 @@
 	pwrlevel = &pwr->pwrlevels[pwr->active_pwrlevel];
 	/* Change register settings if any  BEFORE pwrlevel change*/
 	kgsl_pwrctrl_pwrlevel_change_settings(device, 0);
-	clk_set_rate(pwr->grp_clks[0], pwrlevel->gpu_freq);
+	kgsl_clk_set_rate(device, pwr->active_pwrlevel);
 	_isense_clk_set_rate(pwr, pwr->active_pwrlevel);
 
 	trace_kgsl_pwrlevel(device,
@@ -1631,9 +1696,8 @@
 				(requested_state != KGSL_STATE_NAP)) {
 				for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
 					clk_unprepare(pwr->grp_clks[i]);
-				clk_set_rate(pwr->grp_clks[0],
-					pwr->pwrlevels[pwr->num_pwrlevels - 1].
-					gpu_freq);
+				kgsl_clk_set_rate(device,
+						pwr->num_pwrlevels - 1);
 				_isense_clk_set_rate(pwr,
 					pwr->num_pwrlevels - 1);
 			}
@@ -1645,9 +1709,8 @@
 			for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
 				clk_unprepare(pwr->grp_clks[i]);
 			if ((pwr->pwrlevels[0].gpu_freq > 0)) {
-				clk_set_rate(pwr->grp_clks[0],
-					pwr->pwrlevels[pwr->num_pwrlevels - 1].
-					gpu_freq);
+				kgsl_clk_set_rate(device,
+						pwr->num_pwrlevels - 1);
 				_isense_clk_set_rate(pwr,
 					pwr->num_pwrlevels - 1);
 			}
@@ -1660,10 +1723,8 @@
 			/* High latency clock maintenance. */
 			if (device->state != KGSL_STATE_NAP) {
 				if (pwr->pwrlevels[0].gpu_freq > 0) {
-					clk_set_rate(pwr->grp_clks[0],
-						pwr->pwrlevels
-						[pwr->active_pwrlevel].
-						gpu_freq);
+					kgsl_clk_set_rate(device,
+							pwr->active_pwrlevel);
 					_isense_clk_set_rate(pwr,
 						pwr->active_pwrlevel);
 				}
@@ -2101,11 +2162,11 @@
 		if (freq > 0)
 			freq = clk_round_rate(pwr->grp_clks[0], freq);
 
-		pwr->pwrlevels[i].gpu_freq = freq;
+		if (freq >= pwr->pwrlevels[i].gpu_freq)
+			pwr->pwrlevels[i].gpu_freq = freq;
 	}
 
-	clk_set_rate(pwr->grp_clks[0],
-		pwr->pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
+	kgsl_clk_set_rate(device, pwr->num_pwrlevels - 1);
 
 	clk_set_rate(pwr->grp_clks[6],
 		clk_round_rate(pwr->grp_clks[6], KGSL_RBBMTIMER_CLK_FREQ));
@@ -2362,8 +2423,12 @@
 	/* In order to touch a register you must hold the device mutex */
 	WARN_ON(!mutex_is_locked(&device->mutex));
 
-	/* A register access without device power will cause a fatal timeout */
-	BUG_ON(!kgsl_pwrctrl_isenabled(device));
+	/*
+	 * A register access without device power will cause a fatal timeout.
+	 * This is not valid for targets with a GMU.
+	 */
+	if (!kgsl_gmu_isenabled(device))
+		WARN_ON(!kgsl_pwrctrl_isenabled(device));
 }
 EXPORT_SYMBOL(kgsl_pre_hwaccess);
 
@@ -2383,6 +2448,9 @@
 
 	kgsl_pwrctrl_pwrlevel_change(device, level);
 
+	if (kgsl_gmu_isenabled(device))
+		return gmu_start(device);
+
 	/* Order pwrrail/clk sequence based upon platform */
 	status = kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
 	if (status)
@@ -2394,6 +2462,9 @@
 
 static void kgsl_pwrctrl_disable(struct kgsl_device *device)
 {
+	if (kgsl_gmu_isenabled(device))
+		return gmu_stop(device);
+
 	/* Order pwrrail/clk sequence based upon platform */
 	device->ftbl->regulator_disable(device);
 	kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
@@ -2550,6 +2621,7 @@
 			return -EBUSY;
 		}
 
+		device->ftbl->stop_fault_timer(device);
 		kgsl_pwrscale_midframe_timer_cancel(device);
 
 		/*
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 58f16e8..62ee597 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -25,7 +25,7 @@
 
 #define KGSL_PWR_ON	0xFFFF
 
-#define KGSL_MAX_CLKS 14
+#define KGSL_MAX_CLKS 15
 #define KGSL_MAX_REGULATORS 2
 
 #define KGSL_MAX_PWRLEVELS 10
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index fc5f5a8..07a54d9 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -14,6 +14,7 @@
 #include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/hrtimer.h>
+#include <linux/devfreq_cooling.h>
 
 #include "kgsl.h"
 #include "kgsl_pwrscale.h"
@@ -530,7 +531,8 @@
 	struct kgsl_pwrctrl *pwr;
 	struct kgsl_pwrlevel *pwr_level;
 	int level, i;
-	unsigned long cur_freq;
+	unsigned long cur_freq, rec_freq;
+	struct dev_pm_opp *opp;
 
 	if (device == NULL)
 		return -ENODEV;
@@ -549,16 +551,31 @@
 		return 0;
 	}
 
+	/*
+	 * Thermal framework might have disabled/enabled OPP entries
+	 * for mitigation. So find the recommended frequency matching
+	 * the available opp entries
+	 */
+	rcu_read_lock();
+	rec_freq = *freq;
+	opp = devfreq_recommended_opp(dev, &rec_freq, flags);
+	if (IS_ERR(opp)) {
+		rcu_read_unlock();
+		return PTR_ERR(opp);
+	}
+	rec_freq = dev_pm_opp_get_freq(opp);
+	rcu_read_unlock();
+
 	mutex_lock(&device->mutex);
 	cur_freq = kgsl_pwrctrl_active_freq(pwr);
 	level = pwr->active_pwrlevel;
 	pwr_level = &pwr->pwrlevels[level];
 
 	/* If the governor recommends a new frequency, update it here */
-	if (*freq != cur_freq) {
+	if (rec_freq != cur_freq) {
 		level = pwr->max_pwrlevel;
 		for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--)
-			if (*freq <= pwr->pwrlevels[i].gpu_freq) {
+			if (rec_freq <= pwr->pwrlevels[i].gpu_freq) {
 				if (pwr->thermal_cycle == CYCLE_ACTIVE)
 					level = _thermal_adjust(pwr, i);
 				else
@@ -963,6 +980,10 @@
 	}
 
 	pwrscale->devfreqptr = devfreq;
+	pwrscale->cooling_dev = of_devfreq_cooling_register(
+					device->pdev->dev.of_node, devfreq);
+	if (IS_ERR(pwrscale->cooling_dev))
+		pwrscale->cooling_dev = NULL;
 
 	pwrscale->gpu_profile.bus_devfreq = NULL;
 	if (data->bus.num) {
@@ -1025,6 +1046,8 @@
 	pwrscale = &device->pwrscale;
 	if (!pwrscale->devfreqptr)
 		return;
+	if (pwrscale->cooling_dev)
+		devfreq_cooling_unregister(pwrscale->cooling_dev);
 
 	kgsl_pwrscale_midframe_timer_cancel(device);
 	flush_workqueue(pwrscale->devfreq_wq);
diff --git a/drivers/gpu/msm/kgsl_pwrscale.h b/drivers/gpu/msm/kgsl_pwrscale.h
index e3d3dc7..7e906a0 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.h
+++ b/drivers/gpu/msm/kgsl_pwrscale.h
@@ -90,6 +90,7 @@
  * @history - History of power events with timestamps and durations
  * @popp_level - Current level of POPP mitigation
  * @popp_state - Control state for POPP, on/off, recently pushed, etc
+ * @cooling_dev - Thermal cooling device handle
  */
 struct kgsl_pwrscale {
 	struct devfreq *devfreqptr;
@@ -111,6 +112,7 @@
 	struct kgsl_pwr_history history[KGSL_PWREVENT_MAX];
 	int popp_level;
 	unsigned long popp_state;
+	struct thermal_cooling_device *cooling_dev;
 };
 
 int kgsl_pwrscale_init(struct device *dev, const char *governor);
diff --git a/drivers/gpu/msm/kgsl_snapshot.h b/drivers/gpu/msm/kgsl_snapshot.h
index d2ff8f1..340a7db 100644
--- a/drivers/gpu/msm/kgsl_snapshot.h
+++ b/drivers/gpu/msm/kgsl_snapshot.h
@@ -225,6 +225,7 @@
 #define SNAPSHOT_DEBUG_CP_ROQ     10
 #define SNAPSHOT_DEBUG_SHADER_MEMORY 11
 #define SNAPSHOT_DEBUG_CP_MERCIU 12
+#define SNAPSHOT_DEBUG_SQE_VERSION 14
 
 struct kgsl_snapshot_debug {
 	int type;    /* Type identifier for the attached tata */
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index be34547..1606e7f 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -506,12 +506,15 @@
 
 	wait_for_completion(&info->waitevent);
 
-	if (channel->rescind) {
-		ret = -ENODEV;
-		goto post_msg_err;
-	}
-
 post_msg_err:
+	/*
+	 * If the channel has been rescinded;
+	 * we will be awakened by the rescind
+	 * handler; set the error code to zero so we don't leak memory.
+	 */
+	if (channel->rescind)
+		ret = 0;
+
 	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 	list_del(&info->msglistentry);
 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index cb95315..d8bc4b9 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -779,6 +779,7 @@
 	/* Allocate the channel object and save this offer. */
 	newchannel = alloc_channel();
 	if (!newchannel) {
+		vmbus_release_relid(offer->child_relid);
 		pr_err("Unable to allocate channel object\n");
 		return;
 	}
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 6f0a51a..d439736 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -218,8 +218,10 @@
 	else
 		intel_th_trace_enable(thdev);
 
-	if (ret)
+	if (ret) {
 		pm_runtime_put(&thdev->dev);
+		module_put(thdrv->driver.owner);
+	}
 
 	return ret;
 }
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 8e38a24..58e8850 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -21,6 +21,7 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/qcom-geni-se.h>
 
 #define SE_I2C_TX_TRANS_LEN		(0x26C)
@@ -57,6 +58,7 @@
 	struct i2c_adapter adap;
 	struct completion xfer;
 	struct i2c_msg *cur;
+	struct se_geni_rsc i2c_rsc;
 	int cur_wr;
 	int cur_rd;
 };
@@ -153,7 +155,15 @@
 	gi2c->err = 0;
 	gi2c->cur = &msgs[0];
 	reinit_completion(&gi2c->xfer);
-	enable_irq(gi2c->irq);
+	ret = pm_runtime_get_sync(gi2c->dev);
+	if (ret < 0) {
+		dev_err(gi2c->dev, "error turning SE resources:%d\n", ret);
+		pm_runtime_put_noidle(gi2c->dev);
+		/* Set device in suspended since resume failed */
+		pm_runtime_set_suspended(gi2c->dev);
+		return ret;
+	}
+	geni_se_init(gi2c->base, FIFO_MODE, 0xF, 0x10);
 	qcom_geni_i2c_conf(gi2c->base, 0, 2);
 	se_config_packing(gi2c->base, 8, 4, true);
 	dev_dbg(gi2c->dev, "i2c xfer:num:%d, msgs:len:%d,flg:%d\n",
@@ -163,7 +173,7 @@
 		u32 m_param = 0;
 		u32 m_cmd = 0;
 
-		m_param |= (stretch ? STOP_STRETCH : ~(STOP_STRETCH));
+		m_param |= (stretch ? STOP_STRETCH : 0);
 		m_param |= ((msgs[i].addr & 0x7F) << SLV_ADDR_SHFT);
 
 		gi2c->cur = &msgs[i];
@@ -206,7 +216,7 @@
 	}
 	if (ret == 0)
 		ret = i;
-	disable_irq(gi2c->irq);
+	pm_runtime_put_sync(gi2c->dev);
 	gi2c->cur = NULL;
 	gi2c->err = 0;
 	dev_dbg(gi2c->dev, "i2c txn ret:%d\n", ret);
@@ -239,10 +249,54 @@
 	if (!res)
 		return -EINVAL;
 
+	gi2c->i2c_rsc.se_clk = devm_clk_get(&pdev->dev, "se-clk");
+	if (IS_ERR(gi2c->i2c_rsc.se_clk)) {
+		ret = PTR_ERR(gi2c->i2c_rsc.se_clk);
+		dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
+		return ret;
+	}
+
+	gi2c->i2c_rsc.m_ahb_clk = devm_clk_get(&pdev->dev, "m-ahb");
+	if (IS_ERR(gi2c->i2c_rsc.m_ahb_clk)) {
+		ret = PTR_ERR(gi2c->i2c_rsc.m_ahb_clk);
+		dev_err(&pdev->dev, "Err getting M AHB clk %d\n", ret);
+		return ret;
+	}
+
+	gi2c->i2c_rsc.s_ahb_clk = devm_clk_get(&pdev->dev, "s-ahb");
+	if (IS_ERR(gi2c->i2c_rsc.s_ahb_clk)) {
+		ret = PTR_ERR(gi2c->i2c_rsc.s_ahb_clk);
+		dev_err(&pdev->dev, "Err getting S AHB clk %d\n", ret);
+		return ret;
+	}
+
 	gi2c->base = devm_ioremap_resource(gi2c->dev, res);
 	if (IS_ERR(gi2c->base))
 		return PTR_ERR(gi2c->base);
 
+	gi2c->i2c_rsc.geni_pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR_OR_NULL(gi2c->i2c_rsc.geni_pinctrl)) {
+		dev_err(&pdev->dev, "No pinctrl config specified\n");
+		ret = PTR_ERR(gi2c->i2c_rsc.geni_pinctrl);
+		return ret;
+	}
+	gi2c->i2c_rsc.geni_gpio_active =
+		pinctrl_lookup_state(gi2c->i2c_rsc.geni_pinctrl,
+							PINCTRL_DEFAULT);
+	if (IS_ERR_OR_NULL(gi2c->i2c_rsc.geni_gpio_active)) {
+		dev_err(&pdev->dev, "No default config specified\n");
+		ret = PTR_ERR(gi2c->i2c_rsc.geni_gpio_active);
+		return ret;
+	}
+	gi2c->i2c_rsc.geni_gpio_sleep =
+		pinctrl_lookup_state(gi2c->i2c_rsc.geni_pinctrl,
+							PINCTRL_SLEEP);
+	if (IS_ERR_OR_NULL(gi2c->i2c_rsc.geni_gpio_sleep)) {
+		dev_err(&pdev->dev, "No sleep config specified\n");
+		ret = PTR_ERR(gi2c->i2c_rsc.geni_gpio_sleep);
+		return ret;
+	}
+
 	gi2c->irq = platform_get_irq(pdev, 0);
 	if (gi2c->irq < 0) {
 		dev_err(gi2c->dev, "IRQ error for i2c-geni\n");
@@ -266,8 +320,9 @@
 
 	strlcpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
 
+	pm_runtime_set_suspended(gi2c->dev);
+	pm_runtime_enable(gi2c->dev);
 	i2c_add_adapter(&gi2c->adap);
-	geni_se_init(gi2c->base, FIFO_MODE, 0xF, 0x10);
 
 	return 0;
 }
@@ -276,27 +331,67 @@
 {
 	struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
 
-	disable_irq(gi2c->irq);
+	pm_runtime_disable(gi2c->dev);
 	i2c_del_adapter(&gi2c->adap);
 	return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int geni_i2c_suspend(struct device *device)
+static int geni_i2c_resume_noirq(struct device *device)
 {
 	return 0;
 }
 
-static int geni_i2c_resume(struct device *device)
+#ifdef CONFIG_PM
+static int geni_i2c_runtime_suspend(struct device *dev)
+{
+	struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+
+	disable_irq(gi2c->irq);
+	se_geni_resources_off(&gi2c->i2c_rsc);
+	return 0;
+}
+
+static int geni_i2c_runtime_resume(struct device *dev)
+{
+	int ret;
+	struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+
+	ret = se_geni_resources_on(&gi2c->i2c_rsc);
+	if (ret)
+		return ret;
+
+	enable_irq(gi2c->irq);
+	return 0;
+}
+
+static int geni_i2c_suspend_noirq(struct device *device)
+{
+	if (!pm_runtime_status_suspended(device))
+		return -EBUSY;
+	return 0;
+}
+#else
+static int geni_i2c_runtime_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int geni_i2c_runtime_resume(struct device *dev)
+{
+	return 0;
+}
+
+static int geni_i2c_suspend_noirq(struct device *device)
 {
 	return 0;
 }
 #endif
 
 static const struct dev_pm_ops geni_i2c_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(
-		geni_i2c_suspend,
-		geni_i2c_resume)
+	.suspend_noirq		= geni_i2c_suspend_noirq,
+	.resume_noirq		= geni_i2c_resume_noirq,
+	.runtime_suspend	= geni_i2c_runtime_suspend,
+	.runtime_resume		= geni_i2c_runtime_resume,
 };
 
 static const struct of_device_id geni_i2c_dt_match[] = {
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index 302cf14..e412230 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -163,12 +163,16 @@
 #define FG_ADC_RR_DIE_TEMP_SLOPE		2
 #define FG_ADC_RR_DIE_TEMP_OFFSET_MILLI_DEGC	25000
 
-#define FAB_ID_GF				0x30
-#define FAB_ID_SMIC				0x11
 #define FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV		1303168
 #define FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C	3784
 #define FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV	1338433
 #define FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C	3655
+#define FG_ADC_RR_CHG_TEMP_660_GF_OFFSET_UV	1309001
+#define FG_RR_CHG_TEMP_660_GF_SLOPE_UV_PER_C	3403
+#define FG_ADC_RR_CHG_TEMP_660_SMIC_OFFSET_UV	1295898
+#define FG_RR_CHG_TEMP_660_SMIC_SLOPE_UV_PER_C	3596
+#define FG_ADC_RR_CHG_TEMP_660_MGNA_OFFSET_UV	1314779
+#define FG_RR_CHG_TEMP_660_MGNA_SLOPE_UV_PER_C	3496
 #define FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC	25000
 #define FG_ADC_RR_CHG_THRESHOLD_SCALE		4
 
@@ -388,23 +392,70 @@
 	return 0;
 }
 
+static int rradc_get_660_fab_coeff(struct rradc_chip *chip,
+		int64_t *offset, int64_t *slope)
+{
+	switch (chip->pmic_fab_id->fab_id) {
+	case PM660_FAB_ID_GF:
+		*offset = FG_ADC_RR_CHG_TEMP_660_GF_OFFSET_UV;
+		*slope = FG_RR_CHG_TEMP_660_GF_SLOPE_UV_PER_C;
+		break;
+	case PM660_FAB_ID_TSMC:
+		*offset = FG_ADC_RR_CHG_TEMP_660_SMIC_OFFSET_UV;
+		*slope = FG_RR_CHG_TEMP_660_SMIC_SLOPE_UV_PER_C;
+		break;
+	default:
+		*offset = FG_ADC_RR_CHG_TEMP_660_MGNA_OFFSET_UV;
+		*slope = FG_RR_CHG_TEMP_660_MGNA_SLOPE_UV_PER_C;
+	}
+
+	return 0;
+}
+
+static int rradc_get_8998_fab_coeff(struct rradc_chip *chip,
+		int64_t *offset, int64_t *slope)
+{
+	switch (chip->pmic_fab_id->fab_id) {
+	case PMI8998_FAB_ID_GF:
+		*offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV;
+		*slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C;
+		break;
+	case PMI8998_FAB_ID_SMIC:
+		*offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV;
+		*slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int rradc_post_process_chg_temp_hot(struct rradc_chip *chip,
 			struct rradc_chan_prop *prop, u16 adc_code,
 			int *result_millidegc)
 {
 	int64_t uv = 0, offset = 0, slope = 0;
+	int rc = 0;
 
 	if (chip->revid_dev_node) {
-		switch (chip->pmic_fab_id->fab_id) {
-		case FAB_ID_GF:
-			offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV;
-			slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C;
+		switch (chip->pmic_fab_id->pmic_subtype) {
+		case PM660_SUBTYPE:
+			rc = rradc_get_660_fab_coeff(chip, &offset, &slope);
+			if (rc < 0) {
+				pr_err("Unable to get fab id coefficients\n");
+				return -EINVAL;
+			}
 			break;
-		case FAB_ID_SMIC:
-			offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV;
-			slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C;
+		case PMI8998_SUBTYPE:
+			rc = rradc_get_8998_fab_coeff(chip, &offset, &slope);
+			if (rc < 0) {
+				pr_err("Unable to get fab id coefficients\n");
+				return -EINVAL;
+			}
 			break;
 		default:
+			pr_err("No PMIC subtype found\n");
 			return -EINVAL;
 		}
 	} else {
@@ -444,18 +495,26 @@
 			int *result_millidegc)
 {
 	int64_t uv = 0, offset = 0, slope = 0;
+	int rc = 0;
 
 	if (chip->revid_dev_node) {
-		switch (chip->pmic_fab_id->fab_id) {
-		case FAB_ID_GF:
-			offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV;
-			slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C;
+		switch (chip->pmic_fab_id->pmic_subtype) {
+		case PM660_SUBTYPE:
+			rc = rradc_get_660_fab_coeff(chip, &offset, &slope);
+			if (rc < 0) {
+				pr_err("Unable to get fab id coefficients\n");
+				return -EINVAL;
+			}
 			break;
-		case FAB_ID_SMIC:
-			offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV;
-			slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C;
+		case PMI8998_SUBTYPE:
+			rc = rradc_get_8998_fab_coeff(chip, &offset, &slope);
+			if (rc < 0) {
+				pr_err("Unable to get fab id coefficients\n");
+				return -EINVAL;
+			}
 			break;
 		default:
+			pr_err("No PMIC subtype found\n");
 			return -EINVAL;
 		}
 	} else {
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index c3cfacc..2de1f52 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -151,7 +151,9 @@
 {
 	struct iio_dev *indio_dev = private;
 	struct tiadc_device *adc_dev = iio_priv(indio_dev);
-	unsigned int status, config;
+	unsigned int status, config, adc_fsm;
+	unsigned short count = 0;
+
 	status = tiadc_readl(adc_dev, REG_IRQSTATUS);
 
 	/*
@@ -165,6 +167,15 @@
 		tiadc_writel(adc_dev, REG_CTRL, config);
 		tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1OVRRUN
 				| IRQENB_FIFO1UNDRFLW | IRQENB_FIFO1THRES);
+
+		/* wait for idle state.
+		 * ADC needs to finish the current conversion
+		 * before disabling the module
+		 */
+		do {
+			adc_fsm = tiadc_readl(adc_dev, REG_ADCFSM);
+		} while (adc_fsm != 0x10 && count++ < 100);
+
 		tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_TSCSSENB));
 		return IRQ_HANDLED;
 	} else if (status & IRQENB_FIFO1THRES) {
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index a3cce3a..ecf592d 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -51,8 +51,6 @@
 			st->report_state.report_id,
 			st->report_state.index,
 			HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM);
-
-		poll_value = hid_sensor_read_poll_value(st);
 	} else {
 		int val;
 
@@ -89,7 +87,9 @@
 	sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
 			       st->power_state.index,
 			       sizeof(state_val), &state_val);
-	if (state && poll_value)
+	if (state)
+		poll_value = hid_sensor_read_poll_value(st);
+	if (poll_value > 0)
 		msleep_interruptible(poll_value * 2);
 
 	return 0;
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 2173531..dd3fcd1 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -767,7 +767,7 @@
 	return ret;
 }
 
-static int __exit ak8974_remove(struct i2c_client *i2c)
+static int ak8974_remove(struct i2c_client *i2c)
 {
 	struct iio_dev *indio_dev = i2c_get_clientdata(i2c);
 	struct ak8974 *ak8974 = iio_priv(indio_dev);
@@ -849,7 +849,7 @@
 		.of_match_table = of_match_ptr(ak8974_of_match),
 	},
 	.probe	  = ak8974_probe,
-	.remove	  = __exit_p(ak8974_remove),
+	.remove	  = ak8974_remove,
 	.id_table = ak8974_id,
 };
 module_i2c_driver(ak8974_driver);
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index d96aa27..db64adf 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -141,6 +141,9 @@
 
 	interface = intf->cur_altsetting;
 
+	if (interface->desc.bNumEndpoints < 2)
+		return -ENODEV;
+
 	epirq = &interface->endpoint[0].desc;
 	epout = &interface->endpoint[1].desc;
 
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index 9cc6d05..23c191a 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -700,6 +700,10 @@
 	int error = -ENOMEM;
 
 	interface = intf->cur_altsetting;
+
+	if (interface->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	endpoint = &interface->endpoint[0].desc;
 
 	if (!usb_endpoint_is_int_in(endpoint))
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 9c0ea36..f4e8fbe 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1667,6 +1667,10 @@
 		return -EINVAL;
 
 	alt = pcu->ctrl_intf->cur_altsetting;
+
+	if (alt->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	pcu->ep_ctrl = &alt->endpoint[0].desc;
 	pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl);
 
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index 79c964c..6e7ff95 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -875,6 +875,10 @@
 	int ret, pipe, i;
 
 	interface = intf->cur_altsetting;
+
+	if (interface->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	endpoint = &interface->endpoint[0].desc;
 	if (!usb_endpoint_is_int_in(endpoint))
 		return -ENODEV;
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index b93fe83..518e8a7 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1290,10 +1290,8 @@
 	/* handle buttons */
 	if (pkt_id == SS4_PACKET_ID_STICK) {
 		f->ts_left = !!(SS4_BTN_V2(p) & 0x01);
-		if (!(priv->flags & ALPS_BUTTONPAD)) {
-			f->ts_right = !!(SS4_BTN_V2(p) & 0x02);
-			f->ts_middle = !!(SS4_BTN_V2(p) & 0x04);
-		}
+		f->ts_right = !!(SS4_BTN_V2(p) & 0x02);
+		f->ts_middle = !!(SS4_BTN_V2(p) & 0x04);
 	} else {
 		f->left = !!(SS4_BTN_V2(p) & 0x01);
 		if (!(priv->flags & ALPS_BUTTONPAD)) {
@@ -2461,14 +2459,34 @@
 	int num_y_electrode;
 	int x_pitch, y_pitch, x_phys, y_phys;
 
-	num_x_electrode = SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F);
-	num_y_electrode = SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F);
+	if (IS_SS4PLUS_DEV(priv->dev_id)) {
+		num_x_electrode =
+			SS4PLUS_NUMSENSOR_XOFFSET + (otp[0][2] & 0x0F);
+		num_y_electrode =
+			SS4PLUS_NUMSENSOR_YOFFSET + ((otp[0][2] >> 4) & 0x0F);
 
-	priv->x_max = (num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
-	priv->y_max = (num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
+		priv->x_max =
+			(num_x_electrode - 1) * SS4PLUS_COUNT_PER_ELECTRODE;
+		priv->y_max =
+			(num_y_electrode - 1) * SS4PLUS_COUNT_PER_ELECTRODE;
 
-	x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM;
-	y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM;
+		x_pitch = (otp[0][1] & 0x0F) + SS4PLUS_MIN_PITCH_MM;
+		y_pitch = ((otp[0][1] >> 4) & 0x0F) + SS4PLUS_MIN_PITCH_MM;
+
+	} else {
+		num_x_electrode =
+			SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F);
+		num_y_electrode =
+			SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F);
+
+		priv->x_max =
+			(num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
+		priv->y_max =
+			(num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
+
+		x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM;
+		y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM;
+	}
 
 	x_phys = x_pitch * (num_x_electrode - 1); /* In 0.1 mm units */
 	y_phys = y_pitch * (num_y_electrode - 1); /* In 0.1 mm units */
@@ -2484,7 +2502,10 @@
 {
 	unsigned char is_btnless;
 
-	is_btnless = (otp[1][1] >> 3) & 0x01;
+	if (IS_SS4PLUS_DEV(priv->dev_id))
+		is_btnless = (otp[1][0] >> 1) & 0x01;
+	else
+		is_btnless = (otp[1][1] >> 3) & 0x01;
 
 	if (is_btnless)
 		priv->flags |= ALPS_BUTTONPAD;
@@ -2492,6 +2513,21 @@
 	return 0;
 }
 
+static int alps_update_dual_info_ss4_v2(unsigned char otp[][4],
+				       struct alps_data *priv)
+{
+	bool is_dual = false;
+
+	if (IS_SS4PLUS_DEV(priv->dev_id))
+		is_dual = (otp[0][0] >> 4) & 0x01;
+
+	if (is_dual)
+		priv->flags |= ALPS_DUALPOINT |
+					ALPS_DUALPOINT_WITH_PRESSURE;
+
+	return 0;
+}
+
 static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
 				    struct alps_data *priv)
 {
@@ -2507,6 +2543,8 @@
 
 	alps_update_btn_info_ss4_v2(otp, priv);
 
+	alps_update_dual_info_ss4_v2(otp, priv);
+
 	return 0;
 }
 
@@ -2752,10 +2790,6 @@
 		if (alps_set_defaults_ss4_v2(psmouse, priv))
 			return -EIO;
 
-		if (priv->fw_ver[1] == 0x1)
-			priv->flags |= ALPS_DUALPOINT |
-					ALPS_DUALPOINT_WITH_PRESSURE;
-
 		break;
 	}
 
@@ -2826,10 +2860,7 @@
 			   ec[2] >= 0x90 && ec[2] <= 0x9d) {
 			protocol = &alps_v3_protocol_data;
 		} else if (e7[0] == 0x73 && e7[1] == 0x03 &&
-			   e7[2] == 0x14 && ec[1] == 0x02) {
-			protocol = &alps_v8_protocol_data;
-		} else if (e7[0] == 0x73 && e7[1] == 0x03 &&
-			   e7[2] == 0x28 && ec[1] == 0x01) {
+			   (e7[2] == 0x14 || e7[2] == 0x28)) {
 			protocol = &alps_v8_protocol_data;
 		} else {
 			psmouse_dbg(psmouse,
@@ -2839,7 +2870,8 @@
 	}
 
 	if (priv) {
-		/* Save the Firmware version */
+		/* Save Device ID and Firmware version */
+		memcpy(priv->dev_id, e7, 3);
 		memcpy(priv->fw_ver, ec, 3);
 		error = alps_set_protocol(psmouse, priv, protocol);
 		if (error)
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index b9417e2..dbfd260 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -54,6 +54,16 @@
 
 #define SS4_MASK_NORMAL_BUTTONS		0x07
 
+#define SS4PLUS_COUNT_PER_ELECTRODE	128
+#define SS4PLUS_NUMSENSOR_XOFFSET	16
+#define SS4PLUS_NUMSENSOR_YOFFSET	5
+#define SS4PLUS_MIN_PITCH_MM		37
+
+#define IS_SS4PLUS_DEV(_b)	(((_b[0]) == 0x73) &&	\
+				 ((_b[1]) == 0x03) &&	\
+				 ((_b[2]) == 0x28)		\
+				)
+
 #define SS4_1F_X_V2(_b)		((_b[0] & 0x0007) |		\
 				 ((_b[1] << 3) & 0x0078) |	\
 				 ((_b[1] << 2) & 0x0380) |	\
@@ -263,6 +273,7 @@
 	int addr_command;
 	u16 proto_version;
 	u8 byte0, mask0;
+	u8 dev_id[3];
 	u8 fw_ver[3];
 	int flags;
 	int x_max;
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index ed1935f..da5458d 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -218,17 +218,19 @@
 
 static int elan_check_ASUS_special_fw(struct elan_tp_data *data)
 {
-	if (data->ic_type != 0x0E)
-		return false;
-
-	switch (data->product_id) {
-	case 0x05 ... 0x07:
-	case 0x09:
-	case 0x13:
+	if (data->ic_type == 0x0E) {
+		switch (data->product_id) {
+		case 0x05 ... 0x07:
+		case 0x09:
+		case 0x13:
+			return true;
+		}
+	} else if (data->ic_type == 0x08 && data->product_id == 0x26) {
+		/* ASUS EeeBook X205TA */
 		return true;
-	default:
-		return false;
 	}
+
+	return false;
 }
 
 static int __elan_initialize(struct elan_tp_data *data)
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 0cdd958..25eab45 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -120,6 +120,13 @@
 		},
 	},
 	{
+		/* Dell Embedded Box PC 3000 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
+		},
+	},
+	{
 		/* OQO Model 01 */
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c
index cd85205..df4bea9 100644
--- a/drivers/input/tablet/hanwang.c
+++ b/drivers/input/tablet/hanwang.c
@@ -340,6 +340,9 @@
 	int error;
 	int i;
 
+	if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL);
 	input_dev = input_allocate_device();
 	if (!hanwang || !input_dev) {
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index e850d7e..4d9d649 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -122,6 +122,9 @@
 	struct input_dev *input_dev;
 	int error = -ENOMEM;
 
+	if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
 	input_dev = input_allocate_device();
 	if (!kbtab || !input_dev)
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index aefb6e1..4c0eeca 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -527,6 +527,9 @@
 	if (iface_desc->desc.bInterfaceClass != 0xFF)
 		return -ENODEV;
 
+	if (iface_desc->desc.bNumEndpoints < 5)
+		return -ENODEV;
+
 	/* Use endpoint #4 (0x86). */
 	endpoint = &iface_desc->endpoint[4].desc;
 	if (endpoint->bEndpointAddress != TOUCH_ENDPOINT)
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 37dfe0a..34df44c 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -322,9 +322,6 @@
 	void (*device_reset)(struct arm_smmu_device *smmu);
 	phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
 					 dma_addr_t iova);
-	void (*iova_to_phys_fault)(struct iommu_domain *domain,
-				dma_addr_t iova, phys_addr_t *phys1,
-				phys_addr_t *phys_post_tlbiall);
 };
 
 struct arm_smmu_impl_def_reg {
@@ -499,6 +496,7 @@
 
 struct arm_smmu_domain {
 	struct arm_smmu_device		*smmu;
+	struct device			*dev;
 	struct io_pgtable_ops		*pgtbl_ops;
 	struct io_pgtable_cfg		pgtbl_cfg;
 	spinlock_t			pgtbl_lock;
@@ -1141,32 +1139,21 @@
 					 dma_addr_t iova, u32 fsr)
 {
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-	struct arm_smmu_device *smmu;
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
 	phys_addr_t phys;
 	phys_addr_t phys_post_tlbiall;
 
-	smmu = smmu_domain->smmu;
-
-	if (smmu->arch_ops && smmu->arch_ops->iova_to_phys_fault) {
-		smmu->arch_ops->iova_to_phys_fault(domain, iova, &phys,
-		&phys_post_tlbiall);
-	} else {
-		phys = arm_smmu_iova_to_phys_hard(domain, iova);
-		arm_smmu_tlb_inv_context(smmu_domain);
-		phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
-	}
+	phys = arm_smmu_iova_to_phys_hard(domain, iova);
+	arm_smmu_tlb_inv_context(smmu_domain);
+	phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
 
 	if (phys != phys_post_tlbiall) {
 		dev_err(smmu->dev,
 			"ATOS results differed across TLBIALL...\n"
 			"Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
 	}
-	if (!phys_post_tlbiall) {
-		dev_err(smmu->dev,
-			"ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
-	}
 
-	return phys_post_tlbiall;
+	return (phys == 0 ? phys_post_tlbiall : phys);
 }
 
 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
@@ -1260,8 +1247,11 @@
 				dev_err(smmu->dev,
 					"SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
 					dev_name(smmu->dev));
-			dev_err(smmu->dev,
-				"hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
+			if (phys_atos)
+				dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
+					&phys_atos);
+			else
+				dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n");
 			dev_err(smmu->dev, "SID=0x%x\n", frsynra);
 		}
 		ret = IRQ_NONE;
@@ -1493,7 +1483,8 @@
 }
 
 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
-					struct arm_smmu_device *smmu)
+					struct arm_smmu_device *smmu,
+					struct device *dev)
 {
 	int irq, start, ret = 0;
 	unsigned long ias, oas;
@@ -1642,6 +1633,7 @@
 	};
 
 	smmu_domain->smmu = smmu;
+	smmu_domain->dev = dev;
 	pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
 					smmu_domain);
 	if (!pgtbl_ops) {
@@ -2130,7 +2122,7 @@
 		return ret;
 
 	/* Ensure that the domain is finalised */
-	ret = arm_smmu_init_domain_context(domain, smmu);
+	ret = arm_smmu_init_domain_context(domain, smmu, dev);
 	if (ret < 0)
 		goto out_power_off;
 
@@ -2331,9 +2323,11 @@
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 
 	if (smmu_domain->smmu->arch_ops &&
-	    smmu_domain->smmu->arch_ops->iova_to_phys_hard)
-		return smmu_domain->smmu->arch_ops->iova_to_phys_hard(
+	    smmu_domain->smmu->arch_ops->iova_to_phys_hard) {
+		ret = smmu_domain->smmu->arch_ops->iova_to_phys_hard(
 						domain, iova);
+		return ret;
+	}
 
 	spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
 	if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
@@ -3068,64 +3062,27 @@
 	qsmmuv2_resume(smmu);
 }
 
-static phys_addr_t __qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
-					      dma_addr_t iova, bool halt)
+static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
+				dma_addr_t iova)
 {
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 	struct arm_smmu_device *smmu = smmu_domain->smmu;
 	int ret;
 	phys_addr_t phys = 0;
 	unsigned long flags;
+	u32 sctlr, sctlr_orig, fsr;
+	void __iomem *cb_base;
 
 	ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
 	if (ret)
-		return 0;
+		return ret;
 
-	if (halt) {
-		ret = qsmmuv2_halt(smmu);
-		if (ret)
-			goto out_power_off;
-	}
-
-	spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
-	spin_lock(&smmu->atos_lock);
-	phys = __arm_smmu_iova_to_phys_hard(domain, iova);
-	spin_unlock(&smmu->atos_lock);
-	spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
-
-	if (halt)
-		qsmmuv2_resume(smmu);
-
-out_power_off:
-	arm_smmu_power_off(smmu_domain->smmu->pwr);
-	return phys;
-}
-
-static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
-					      dma_addr_t iova)
-{
-	return __qsmmuv2_iova_to_phys_hard(domain, iova, true);
-}
-
-static void qsmmuv2_iova_to_phys_fault(
-				struct iommu_domain *domain,
-				dma_addr_t iova, phys_addr_t *phys,
-				phys_addr_t *phys_post_tlbiall)
-{
-	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
-	struct arm_smmu_device *smmu;
-	void __iomem *cb_base;
-	u64 sctlr, sctlr_orig;
-	u32 fsr;
-
-	smmu = smmu_domain->smmu;
-	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+	spin_lock_irqsave(&smmu->atos_lock, flags);
+	cb_base = ARM_SMMU_CB_BASE(smmu) +
+			ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
 
 	qsmmuv2_halt_nowait(smmu);
-
 	writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
-
 	qsmmuv2_wait_for_halt(smmu);
 
 	/* clear FSR to allow ATOS to log any faults */
@@ -3137,20 +3094,21 @@
 	sctlr = sctlr_orig & ~SCTLR_CFCFG;
 	writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
 
-	*phys = __qsmmuv2_iova_to_phys_hard(domain, iova, false);
-	arm_smmu_tlb_inv_context(smmu_domain);
-	*phys_post_tlbiall = __qsmmuv2_iova_to_phys_hard(domain, iova, false);
+	phys = __arm_smmu_iova_to_phys_hard(domain, iova);
 
 	/* restore SCTLR */
 	writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
 
 	qsmmuv2_resume(smmu);
+	spin_unlock_irqrestore(&smmu->atos_lock, flags);
+
+	arm_smmu_power_off(smmu_domain->smmu->pwr);
+	return phys;
 }
 
 struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
 	.device_reset = qsmmuv2_device_reset,
 	.iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
-	.iova_to_phys_fault = qsmmuv2_iova_to_phys_fault,
 };
 
 static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
@@ -3988,14 +3946,38 @@
 IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
 IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
 
+#define TCU_HW_VERSION_HLOS1		(0x18)
+
 #define DEBUG_SID_HALT_REG		0x0
 #define DEBUG_SID_HALT_VAL		(0x1 << 16)
+#define DEBUG_SID_HALT_SID_MASK		0x3ff
+
+#define DEBUG_VA_ADDR_REG		0x8
+
+#define DEBUG_TXN_TRIGG_REG		0x18
+#define DEBUG_TXN_AXPROT_SHIFT		6
+#define DEBUG_TXN_AXCACHE_SHIFT		2
+#define DEBUG_TRX_WRITE			(0x1 << 1)
+#define DEBUG_TXN_READ			(0x0 << 1)
+#define DEBUG_TXN_TRIGGER		0x1
 
 #define DEBUG_SR_HALT_ACK_REG		0x20
 #define DEBUG_SR_HALT_ACK_VAL		(0x1 << 1)
+#define DEBUG_SR_ECATS_RUNNING_VAL	(0x1 << 0)
+
+#define DEBUG_PAR_REG			0x28
+#define DEBUG_PAR_PA_MASK		((0x1ULL << 36) - 1)
+#define DEBUG_PAR_PA_SHIFT		12
+#define DEBUG_PAR_FAULT_VAL		0x1
 
 #define TBU_DBG_TIMEOUT_US		30000
 
+struct qsmmuv500_archdata {
+	struct list_head		tbus;
+	void __iomem			*tcu_base;
+	u32				version;
+};
+
 struct qsmmuv500_tbu_device {
 	struct list_head		list;
 	struct device			*dev;
@@ -4004,6 +3986,8 @@
 	void __iomem			*status_reg;
 
 	struct arm_smmu_power_resources *pwr;
+	u32				sid_start;
+	u32				num_sids;
 
 	/* Protects halt count */
 	spinlock_t			halt_lock;
@@ -4013,10 +3997,10 @@
 static int qsmmuv500_tbu_power_on_all(struct arm_smmu_device *smmu)
 {
 	struct qsmmuv500_tbu_device *tbu;
-	struct list_head *list = smmu->archdata;
+	struct qsmmuv500_archdata *data = smmu->archdata;
 	int ret = 0;
 
-	list_for_each_entry(tbu, list, list) {
+	list_for_each_entry(tbu, &data->tbus, list) {
 		ret = arm_smmu_power_on(tbu->pwr);
 		if (ret)
 			break;
@@ -4024,7 +4008,7 @@
 	if (!ret)
 		return 0;
 
-	list_for_each_entry_continue_reverse(tbu, list, list) {
+	list_for_each_entry_continue_reverse(tbu, &data->tbus, list) {
 		arm_smmu_power_off(tbu->pwr);
 	}
 	return ret;
@@ -4033,9 +4017,9 @@
 static void qsmmuv500_tbu_power_off_all(struct arm_smmu_device *smmu)
 {
 	struct qsmmuv500_tbu_device *tbu;
-	struct list_head *list = smmu->archdata;
+	struct qsmmuv500_archdata *data = smmu->archdata;
 
-	list_for_each_entry_reverse(tbu, list, list) {
+	list_for_each_entry_reverse(tbu, &data->tbus, list) {
 		arm_smmu_power_off(tbu->pwr);
 	}
 }
@@ -4101,10 +4085,10 @@
 static int qsmmuv500_halt_all(struct arm_smmu_device *smmu)
 {
 	struct qsmmuv500_tbu_device *tbu;
-	struct list_head *list = smmu->archdata;
+	struct qsmmuv500_archdata *data = smmu->archdata;
 	int ret = 0;
 
-	list_for_each_entry(tbu, list, list) {
+	list_for_each_entry(tbu, &data->tbus, list) {
 		ret = qsmmuv500_tbu_halt(tbu);
 		if (ret)
 			break;
@@ -4113,7 +4097,7 @@
 	if (!ret)
 		return 0;
 
-	list_for_each_entry_continue_reverse(tbu, list, list) {
+	list_for_each_entry_continue_reverse(tbu, &data->tbus, list) {
 		qsmmuv500_tbu_resume(tbu);
 	}
 	return ret;
@@ -4122,13 +4106,27 @@
 static void qsmmuv500_resume_all(struct arm_smmu_device *smmu)
 {
 	struct qsmmuv500_tbu_device *tbu;
-	struct list_head *list = smmu->archdata;
+	struct qsmmuv500_archdata *data = smmu->archdata;
 
-	list_for_each_entry(tbu, list, list) {
+	list_for_each_entry(tbu, &data->tbus, list) {
 		qsmmuv500_tbu_resume(tbu);
 	}
 }
 
+static struct qsmmuv500_tbu_device *qsmmuv500_find_tbu(
+	struct arm_smmu_device *smmu, u32 sid)
+{
+	struct qsmmuv500_tbu_device *tbu = NULL;
+	struct qsmmuv500_archdata *data = smmu->archdata;
+
+	list_for_each_entry(tbu, &data->tbus, list) {
+		if (tbu->sid_start <= sid &&
+		    sid < tbu->sid_start + tbu->num_sids)
+			break;
+	}
+	return tbu;
+}
+
 static void qsmmuv500_device_reset(struct arm_smmu_device *smmu)
 {
 	int i, ret;
@@ -4147,6 +4145,187 @@
 	qsmmuv500_tbu_power_off_all(smmu);
 }
 
+static int qsmmuv500_ecats_lock(struct arm_smmu_domain *smmu_domain,
+				struct qsmmuv500_tbu_device *tbu,
+				unsigned long *flags)
+{
+	struct arm_smmu_device *smmu = tbu->smmu;
+	struct qsmmuv500_archdata *data = smmu->archdata;
+	u32 val;
+
+	spin_lock_irqsave(&smmu->atos_lock, *flags);
+	/* The status register is not accessible on version 1.0 */
+	if (data->version == 0x01000000)
+		return 0;
+
+	if (readl_poll_timeout_atomic(tbu->status_reg,
+					val, (val == 0x1), 0,
+					TBU_DBG_TIMEOUT_US)) {
+		dev_err(tbu->dev, "ECATS hw busy!\n");
+		spin_unlock_irqrestore(&smmu->atos_lock, *flags);
+		return  -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static void qsmmuv500_ecats_unlock(struct arm_smmu_domain *smmu_domain,
+					struct qsmmuv500_tbu_device *tbu,
+					unsigned long *flags)
+{
+	struct arm_smmu_device *smmu = tbu->smmu;
+	struct qsmmuv500_archdata *data = smmu->archdata;
+
+	/* The status register is not accessible on version 1.0 */
+	if (data->version != 0x01000000)
+		writel_relaxed(0, tbu->status_reg);
+	spin_unlock_irqrestore(&smmu->atos_lock, *flags);
+}
+
+/*
+ * Zero means failure.
+ */
+static phys_addr_t qsmmuv500_iova_to_phys(
+		struct iommu_domain *domain, dma_addr_t iova, u32 sid)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+	struct qsmmuv500_tbu_device *tbu;
+	int ret;
+	phys_addr_t phys = 0;
+	u64 val, fsr;
+	unsigned long flags;
+	void __iomem *cb_base;
+	u32 sctlr_orig, sctlr;
+	int needs_redo = 0;
+
+	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+	tbu = qsmmuv500_find_tbu(smmu, sid);
+	if (!tbu)
+		return 0;
+
+	ret = arm_smmu_power_on(tbu->pwr);
+	if (ret)
+		return 0;
+
+	/*
+	 * Disable client transactions & wait for existing operations to
+	 * complete.
+	 */
+	ret = qsmmuv500_tbu_halt(tbu);
+	if (ret)
+		goto out_power_off;
+
+	/* Only one concurrent atos operation */
+	ret = qsmmuv500_ecats_lock(smmu_domain, tbu, &flags);
+	if (ret)
+		goto out_resume;
+
+	/*
+	 * We can be called from an interrupt handler with FSR already set
+	 * so terminate the faulting transaction prior to starting ecats.
+	 * No new racing faults can occur since we in the halted state.
+	 * ECATS can trigger the fault interrupt, so disable it temporarily
+	 * and check for an interrupt manually.
+	 */
+	fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
+	if (fsr & FSR_FAULT) {
+		writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
+		writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
+	}
+	sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
+	sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
+	writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
+
+redo:
+	/* Set address and stream-id */
+	val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
+	val |= sid & DEBUG_SID_HALT_SID_MASK;
+	writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
+	writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
+
+	/*
+	 * Write-back Read and Write-Allocate
+	 * Priviledged, nonsecure, data transaction
+	 * Read operation.
+	 */
+	val = 0xF << DEBUG_TXN_AXCACHE_SHIFT;
+	val |= 0x3 << DEBUG_TXN_AXPROT_SHIFT;
+	val |= DEBUG_TXN_TRIGGER;
+	writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
+
+	ret = 0;
+	if (readl_poll_timeout_atomic(tbu->base + DEBUG_SR_HALT_ACK_REG,
+				val, !(val & DEBUG_SR_ECATS_RUNNING_VAL),
+				0, TBU_DBG_TIMEOUT_US)) {
+		dev_err(tbu->dev, "ECATS translation timed out!\n");
+	}
+
+	fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
+	if (fsr & FSR_FAULT) {
+		dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n",
+			val);
+		ret = -EINVAL;
+
+		writel_relaxed(val, cb_base + ARM_SMMU_CB_FSR);
+		/*
+		 * Clear pending interrupts
+		 * Barrier required to ensure that the FSR is cleared
+		 * before resuming SMMU operation
+		 */
+		wmb();
+		writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
+	}
+
+	val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
+	if (val & DEBUG_PAR_FAULT_VAL) {
+		dev_err(tbu->dev, "ECATS translation failed! PAR = %llx\n",
+			val);
+		ret = -EINVAL;
+	}
+
+	phys = (val >> DEBUG_PAR_PA_SHIFT) & DEBUG_PAR_PA_MASK;
+	if (ret < 0)
+		phys = 0;
+
+	/* Reset hardware */
+	writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
+	writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
+
+	/*
+	 * After a failed translation, the next successful translation will
+	 * incorrectly be reported as a failure.
+	 */
+	if (!phys && needs_redo++ < 2)
+		goto redo;
+
+	writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
+	qsmmuv500_ecats_unlock(smmu_domain, tbu, &flags);
+
+out_resume:
+	qsmmuv500_tbu_resume(tbu);
+
+out_power_off:
+	arm_smmu_power_off(tbu->pwr);
+
+	return phys;
+}
+
+static phys_addr_t qsmmuv500_iova_to_phys_hard(
+		struct iommu_domain *domain, dma_addr_t iova)
+{
+	u16 sid;
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct iommu_fwspec *fwspec;
+
+	/* Select a sid */
+	fwspec = smmu_domain->dev->iommu_fwspec;
+	sid = (u16)fwspec->ids[0];
+
+	return qsmmuv500_iova_to_phys(domain, iova, sid);
+}
+
 static int qsmmuv500_tbu_register(struct device *dev, void *data)
 {
 	struct arm_smmu_device *smmu = data;
@@ -4168,16 +4347,26 @@
 
 static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
 {
+	struct resource *res;
 	struct device *dev = smmu->dev;
-	struct list_head *list;
+	struct qsmmuv500_archdata *data;
+	struct platform_device *pdev;
 	int ret;
 
-	list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
-	if (!list)
+	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
 		return -ENOMEM;
 
-	INIT_LIST_HEAD(list);
-	smmu->archdata = list;
+	INIT_LIST_HEAD(&data->tbus);
+
+	pdev = container_of(dev, struct platform_device, dev);
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcu-base");
+	data->tcu_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(data->tcu_base))
+		return PTR_ERR(data->tcu_base);
+
+	data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
+	smmu->archdata = data;
 
 	ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
 	if (ret)
@@ -4194,6 +4383,7 @@
 struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
 	.init = qsmmuv500_arch_init,
 	.device_reset = qsmmuv500_device_reset,
+	.iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
 };
 
 static const struct of_device_id qsmmuv500_tbu_of_match[] = {
@@ -4206,6 +4396,8 @@
 	struct resource *res;
 	struct device *dev = &pdev->dev;
 	struct qsmmuv500_tbu_device *tbu;
+	const __be32 *cell;
+	int len;
 
 	tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
 	if (!tbu)
@@ -4225,6 +4417,13 @@
 	if (IS_ERR(tbu->status_reg))
 		return PTR_ERR(tbu->status_reg);
 
+	cell = of_get_property(dev->of_node, "qcom,stream-id-range", &len);
+	if (!cell || len < 8)
+		return -EINVAL;
+
+	tbu->sid_start = of_read_number(cell, 1);
+	tbu->num_sids = of_read_number(cell + 1, 1);
+
 	tbu->pwr = arm_smmu_init_power_resources(pdev);
 	if (IS_ERR(tbu->pwr))
 		return PTR_ERR(tbu->pwr);
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index 34c7381..aded314 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,7 +17,8 @@
 #include <linux/vmalloc.h>
 #include <asm/cacheflush.h>
 #include <asm/dma-iommu.h>
-
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
 
 /* some redundant definitions... :( TODO: move to io-pgtable-fast.h */
 #define FAST_PAGE_SHIFT		12
@@ -633,7 +634,7 @@
 	dev_err(fast->dev, "Mapped over stale tlb at %pa\n", &iova);
 	dev_err(fast->dev, "bitmap (failure at idx %lu):\n", bitmap_idx);
 	dev_err(fast->dev, "ptep: %p pmds: %p diff: %lu\n", ptep,
-		fast->pgtbl_pmds, ptep - fast->pgtbl_pmds);
+		fast->pgtbl_pmds, bitmap_idx);
 	print_hex_dump(KERN_ERR, "bmap: ", DUMP_PREFIX_ADDRESS,
 		       32, 8, fast->bitmap, fast->bitmap_size, false);
 }
@@ -683,7 +684,7 @@
  * fast_smmu_attach_device function.
  */
 static struct dma_fast_smmu_mapping *__fast_smmu_create_mapping_sized(
-	dma_addr_t base, size_t size)
+	dma_addr_t base, u64 size)
 {
 	struct dma_fast_smmu_mapping *fast;
 
@@ -696,7 +697,11 @@
 	fast->num_4k_pages = size >> FAST_PAGE_SHIFT;
 	fast->bitmap_size = BITS_TO_LONGS(fast->num_4k_pages) * sizeof(long);
 
-	fast->bitmap = kzalloc(fast->bitmap_size, GFP_KERNEL);
+	fast->bitmap = kzalloc(fast->bitmap_size, GFP_KERNEL | __GFP_NOWARN |
+								__GFP_NORETRY);
+	if (!fast->bitmap)
+		fast->bitmap = vzalloc(fast->bitmap_size);
+
 	if (!fast->bitmap)
 		goto err2;
 
@@ -726,7 +731,7 @@
 	int atomic_domain = 1;
 	struct iommu_domain *domain = mapping->domain;
 	struct iommu_pgtbl_info info;
-	size_t size = mapping->bits << PAGE_SHIFT;
+	u64 size = (u64)mapping->bits << PAGE_SHIFT;
 
 	if (mapping->base + size > (SZ_1G * 4ULL))
 		return -EINVAL;
@@ -780,7 +785,7 @@
 	dev->archdata.mapping = NULL;
 	set_dma_ops(dev, NULL);
 
-	kfree(mapping->fast->bitmap);
+	kvfree(mapping->fast->bitmap);
 	kfree(mapping->fast);
 }
 EXPORT_SYMBOL(fast_smmu_detach_device);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 34be95e..b9e50c1 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -915,7 +915,7 @@
 				 * which we used for the IOMMU lookup. Strictly speaking
 				 * we could do this for all PCI devices; we only need to
 				 * get the BDF# from the scope table for ACPI matches. */
-				if (pdev->is_virtfn)
+				if (pdev && pdev->is_virtfn)
 					goto got_pdev;
 
 				*bus = drhd->devices[i].bus;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 393e20c4..f7739ae 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -78,7 +78,7 @@
 
 /* Calculate the block/page mapping size at level l for pagetable in d. */
 #define ARM_LPAE_BLOCK_SIZE(l,d)					\
-	(1 << (ilog2(sizeof(arm_lpae_iopte)) +				\
+	(1ULL << (ilog2(sizeof(arm_lpae_iopte)) +			\
 		((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
 
 /* Page table bits */
diff --git a/drivers/iommu/io-pgtable-fast.c b/drivers/iommu/io-pgtable-fast.c
index 85fe317..9b13fce 100644
--- a/drivers/iommu/io-pgtable-fast.c
+++ b/drivers/iommu/io-pgtable-fast.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,7 @@
 #include <linux/types.h>
 #include <linux/io-pgtable-fast.h>
 #include <asm/cacheflush.h>
+#include <linux/vmalloc.h>
 
 #include "io-pgtable.h"
 
@@ -268,11 +269,18 @@
 	return size;
 }
 
+#if defined(CONFIG_ARM64)
+#define FAST_PGDNDX(va) (((va) & 0x7fc0000000) >> 27)
+#elif defined(CONFIG_ARM)
+#define FAST_PGDNDX(va) (((va) & 0xc0000000) >> 27)
+#endif
+
 static phys_addr_t av8l_fast_iova_to_phys(struct io_pgtable_ops *ops,
 					  unsigned long iova)
 {
 	struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
 	av8l_fast_iopte pte, *pgdp, *pudp, *pmdp;
+	unsigned long pgd;
 	phys_addr_t phys;
 	const unsigned long pts = AV8L_FAST_PTE_TYPE_SHIFT;
 	const unsigned long ptm = AV8L_FAST_PTE_TYPE_MASK;
@@ -282,8 +290,9 @@
 
 	/* TODO: clean up some of these magic numbers... */
 
-	pgdp = (av8l_fast_iopte *)
-		(((unsigned long)data->pgd) | ((iova & 0x7fc0000000) >> 27));
+	pgd = (unsigned long)data->pgd | FAST_PGDNDX(iova);
+	pgdp = (av8l_fast_iopte *)pgd;
+
 	pte = *pgdp;
 	if (((pte >> pts) & ptm) != ptt)
 		return 0;
@@ -345,7 +354,12 @@
 	int i, j, pg = 0;
 	struct page **pages, *page;
 
-	pages = kmalloc(sizeof(*pages) * NUM_PGTBL_PAGES, GFP_KERNEL);
+	pages = kmalloc(sizeof(*pages) * NUM_PGTBL_PAGES, __GFP_NOWARN |
+							__GFP_NORETRY);
+
+	if (!pages)
+		pages = vmalloc(sizeof(*pages) * NUM_PGTBL_PAGES);
+
 	if (!pages)
 		return -ENOMEM;
 
@@ -414,7 +428,7 @@
 	for (i = 0; i < pg; ++i)
 		__free_page(pages[i]);
 err_free_pages_arr:
-	kfree(pages);
+	kvfree(pages);
 	return -ENOMEM;
 }
 
@@ -473,6 +487,9 @@
 
 	reg |= (64ULL - cfg->ias) << AV8L_FAST_TCR_T0SZ_SHIFT;
 	reg |= AV8L_FAST_TCR_EPD1_FAULT << AV8L_FAST_TCR_EPD1_SHIFT;
+#if defined(CONFIG_ARM)
+	reg |= ARM_32_LPAE_TCR_EAE;
+#endif
 	cfg->av8l_fast_cfg.tcr = reg;
 
 	/* MAIRs */
@@ -512,7 +529,7 @@
 	vunmap(data->pmds);
 	for (i = 0; i < NUM_PGTBL_PAGES; ++i)
 		__free_page(data->pages[i]);
-	kfree(data->pages);
+	kvfree(data->pages);
 	kfree(data);
 }
 
@@ -560,7 +577,7 @@
 						 const phys_addr_t phys_start,
 						 const size_t size)
 {
-	unsigned long iova = iova_start;
+	u64 iova = iova_start;
 	phys_addr_t phys = phys_start;
 
 	while (iova < (iova_start + size)) {
@@ -576,11 +593,12 @@
 static int __init av8l_fast_positive_testing(void)
 {
 	int failed = 0;
-	unsigned long iova;
+	u64 iova;
 	struct io_pgtable_ops *ops;
 	struct io_pgtable_cfg cfg;
 	struct av8l_fast_io_pgtable *data;
 	av8l_fast_iopte *pmds;
+	u64 max = SZ_1G * 4ULL - 1;
 
 	cfg = (struct io_pgtable_cfg) {
 		.quirks = 0,
@@ -600,19 +618,18 @@
 	pmds = data->pmds;
 
 	/* map the entire 4GB VA space with 4K map calls */
-	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_4K) {
+	for (iova = 0; iova < max; iova += SZ_4K) {
 		if (WARN_ON(ops->map(ops, iova, iova, SZ_4K, IOMMU_READ))) {
 			failed++;
 			continue;
 		}
 	}
-
 	if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
-							  SZ_1G * 4UL)))
+							  max)))
 		failed++;
 
 	/* unmap it all */
-	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_4K) {
+	for (iova = 0; iova < max; iova += SZ_4K) {
 		if (WARN_ON(ops->unmap(ops, iova, SZ_4K) != SZ_4K))
 			failed++;
 	}
@@ -621,7 +638,7 @@
 	av8l_fast_clear_stale_ptes(pmds, false);
 
 	/* map the entire 4GB VA space with 8K map calls */
-	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_8K) {
+	for (iova = 0; iova < max; iova += SZ_8K) {
 		if (WARN_ON(ops->map(ops, iova, iova, SZ_8K, IOMMU_READ))) {
 			failed++;
 			continue;
@@ -629,11 +646,11 @@
 	}
 
 	if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
-							  SZ_1G * 4UL)))
+							  max)))
 		failed++;
 
 	/* unmap it all with 8K unmap calls */
-	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_8K) {
+	for (iova = 0; iova < max; iova += SZ_8K) {
 		if (WARN_ON(ops->unmap(ops, iova, SZ_8K) != SZ_8K))
 			failed++;
 	}
@@ -642,7 +659,7 @@
 	av8l_fast_clear_stale_ptes(pmds, false);
 
 	/* map the entire 4GB VA space with 16K map calls */
-	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_16K) {
+	for (iova = 0; iova < max; iova += SZ_16K) {
 		if (WARN_ON(ops->map(ops, iova, iova, SZ_16K, IOMMU_READ))) {
 			failed++;
 			continue;
@@ -650,11 +667,11 @@
 	}
 
 	if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
-							  SZ_1G * 4UL)))
+							  max)))
 		failed++;
 
 	/* unmap it all */
-	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_16K) {
+	for (iova = 0; iova < max; iova += SZ_16K) {
 		if (WARN_ON(ops->unmap(ops, iova, SZ_16K) != SZ_16K))
 			failed++;
 	}
@@ -663,7 +680,7 @@
 	av8l_fast_clear_stale_ptes(pmds, false);
 
 	/* map the entire 4GB VA space with 64K map calls */
-	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_64K) {
+	for (iova = 0; iova < max; iova += SZ_64K) {
 		if (WARN_ON(ops->map(ops, iova, iova, SZ_64K, IOMMU_READ))) {
 			failed++;
 			continue;
@@ -671,11 +688,11 @@
 	}
 
 	if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
-							  SZ_1G * 4UL)))
+							  max)))
 		failed++;
 
 	/* unmap it all at once */
-	if (WARN_ON(ops->unmap(ops, 0, SZ_1G * 4UL) != SZ_1G * 4UL))
+	if (WARN_ON(ops->unmap(ops, 0, max) != max))
 		failed++;
 
 	free_io_pgtable_ops(ops);
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 45ffb40..5730126 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -822,7 +822,7 @@
 	if (!virt)
 		goto out;
 
-	mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4UL);
+	mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4ULL);
 	if (!mapping) {
 		seq_puts(s, "fast_smmu_create_mapping failed\n");
 		goto out_kfree;
@@ -922,8 +922,8 @@
 static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
 {
 	int i, ret = 0;
-	unsigned long iova;
-	const unsigned long max = SZ_1G * 4UL;
+	u64 iova;
+	const u64  max = SZ_1G * 4ULL - 1;
 	void *virt;
 	phys_addr_t phys;
 	dma_addr_t dma_addr;
@@ -995,8 +995,8 @@
 	}
 
 	/* we're all full again. unmap everything. */
-	for (dma_addr = 0; dma_addr < max; dma_addr += SZ_8K)
-		dma_unmap_single(dev, dma_addr, SZ_8K, DMA_TO_DEVICE);
+	for (iova = 0; iova < max; iova += SZ_8K)
+		dma_unmap_single(dev, (dma_addr_t)iova, SZ_8K, DMA_TO_DEVICE);
 
 out:
 	free_pages((unsigned long)virt, get_order(SZ_8K));
@@ -1029,7 +1029,7 @@
 			   const size_t size)
 {
 	u64 iova;
-	const unsigned long max = SZ_1G * 4UL;
+	const u64 max = SZ_1G * 4ULL - 1;
 	int i, remapped, unmapped, ret = 0;
 	void *virt;
 	dma_addr_t dma_addr, dma_addr2;
@@ -1061,9 +1061,9 @@
 	fib_init(&fib);
 	for (iova = get_next_fib(&fib) * size;
 	     iova < max - size;
-	     iova = get_next_fib(&fib) * size) {
-		dma_addr = iova;
-		dma_addr2 = max - size - iova;
+	     iova = (u64)get_next_fib(&fib) * size) {
+		dma_addr = (dma_addr_t)(iova);
+		dma_addr2 = (dma_addr_t)((max + 1) - size - iova);
 		if (dma_addr == dma_addr2) {
 			WARN(1,
 			"%s test needs update! The random number sequence is folding in on itself and should be changed.\n",
@@ -1089,8 +1089,8 @@
 		ret = -EINVAL;
 	}
 
-	for (dma_addr = 0; dma_addr < max; dma_addr += size)
-		dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
+	for (iova = 0; iova < max; iova += size)
+		dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
 
 out:
 	free_pages((unsigned long)virt, get_order(size));
@@ -1118,10 +1118,11 @@
 static int __full_va_sweep(struct device *dev, struct seq_file *s,
 			   const size_t size, struct iommu_domain *domain)
 {
-	unsigned long iova;
+	u64 iova;
 	dma_addr_t dma_addr;
 	void *virt;
 	phys_addr_t phys;
+	const u64 max = SZ_1G * 4ULL - 1;
 	int ret = 0, i;
 
 	virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
@@ -1136,7 +1137,7 @@
 	}
 	phys = virt_to_phys(virt);
 
-	for (iova = 0, i = 0; iova < SZ_1G * 4UL; iova += size, ++i) {
+	for (iova = 0, i = 0; iova < max; iova += size, ++i) {
 		unsigned long expected = iova;
 
 		dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
@@ -1184,8 +1185,8 @@
 	}
 
 out:
-	for (dma_addr = 0; dma_addr < SZ_1G * 4UL; dma_addr += size)
-		dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
+	for (iova = 0; iova < max; iova += size)
+		dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
 
 	free_pages((unsigned long)virt, get_order(size));
 	return ret;
@@ -1374,7 +1375,8 @@
 	int ret = -EINVAL, fast = 1;
 	phys_addr_t pt_phys;
 
-	mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4UL);
+	mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
+						(SZ_1G * 4ULL));
 	if (!mapping)
 		goto out;
 
@@ -1443,7 +1445,9 @@
 	size_t sizes[] = {SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12, 0};
 	int ret = -EINVAL;
 
-	mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4UL);
+	/* Make the size equal to MAX_ULONG */
+	mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
+						(SZ_1G * 4ULL - 1));
 	if (!mapping)
 		goto out;
 
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index c5dee30..acb9d25 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1598,6 +1598,14 @@
 	its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
 }
 
+static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
+{
+	struct its_node *its = data;
+
+	/* On QDF2400, the size of the ITE is 16Bytes */
+	its->ite_size = 16;
+}
+
 static const struct gic_quirk its_quirks[] = {
 #ifdef CONFIG_CAVIUM_ERRATUM_22375
 	{
@@ -1615,6 +1623,14 @@
 		.init	= its_enable_quirk_cavium_23144,
 	},
 #endif
+#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
+	{
+		.desc	= "ITS: QDF2400 erratum 0065",
+		.iidr	= 0x00001070, /* QDF2400 ITS rev 1.x */
+		.mask	= 0xffffffff,
+		.init	= its_enable_quirk_qdf2400_e0065,
+	},
+#endif
 	{
 	}
 };
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index aecec6d..7f1c625 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2317,6 +2317,9 @@
 		return -ENODEV;
 	}
 
+	if (hostif->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	dev_info(&udev->dev,
 		 "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
 		 __func__, le16_to_cpu(udev->descriptor.idVendor),
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index 01e553c..b045e3b 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -1077,6 +1077,8 @@
 			pr_err("trigger lmh mitigation failed, rc=%d\n", rc);
 			return rc;
 		}
+		/* Wait for LMH mitigation to take effect */
+		udelay(500);
 	}
 
 	if (led->trigger_chgr) {
diff --git a/drivers/leds/leds-qpnp-wled.c b/drivers/leds/leds-qpnp-wled.c
index c31d2e1..3060cfa 100644
--- a/drivers/leds/leds-qpnp-wled.c
+++ b/drivers/leds/leds-qpnp-wled.c
@@ -33,6 +33,7 @@
 
 /* ctrl registers */
 #define QPNP_WLED_FAULT_STATUS(b)	(b + 0x08)
+#define QPNP_WLED_INT_RT_STS(b)		(b + 0x10)
 #define QPNP_WLED_EN_REG(b)		(b + 0x46)
 #define QPNP_WLED_FDBK_OP_REG(b)	(b + 0x48)
 #define QPNP_WLED_VREF_REG(b)		(b + 0x49)
@@ -44,6 +45,7 @@
 #define QPNP_WLED_SOFTSTART_RAMP_DLY(b) (b + 0x53)
 #define QPNP_WLED_VLOOP_COMP_RES_REG(b)	(b + 0x55)
 #define QPNP_WLED_VLOOP_COMP_GM_REG(b)	(b + 0x56)
+#define QPNP_WLED_EN_PSM_REG(b)		(b + 0x5A)
 #define QPNP_WLED_PSM_CTRL_REG(b)	(b + 0x5B)
 #define QPNP_WLED_LCD_AUTO_PFM_REG(b)	(b + 0x5C)
 #define QPNP_WLED_SC_PRO_REG(b)		(b + 0x5E)
@@ -82,12 +84,13 @@
 #define QPNP_WLED_VREF_PSM_MIN_MV			400
 #define QPNP_WLED_VREF_PSM_MAX_MV			750
 #define QPNP_WLED_VREF_PSM_DFLT_AMOLED_MV		450
-#define QPNP_WLED_PSM_CTRL_OVERWRITE			0x80
+#define QPNP_WLED_PSM_OVERWRITE_BIT			BIT(7)
 #define QPNP_WLED_LCD_AUTO_PFM_DFLT_THRESH		1
 #define QPNP_WLED_LCD_AUTO_PFM_THRESH_MAX		0xF
 #define QPNP_WLED_LCD_AUTO_PFM_EN_SHIFT			7
 #define QPNP_WLED_LCD_AUTO_PFM_EN_BIT			BIT(7)
 #define QPNP_WLED_LCD_AUTO_PFM_THRESH_MASK		GENMASK(3, 0)
+#define QPNP_WLED_EN_PSM_BIT				BIT(7)
 
 #define QPNP_WLED_ILIM_MASK		GENMASK(2, 0)
 #define QPNP_WLED_ILIM_OVERWRITE	BIT(7)
@@ -117,6 +120,9 @@
 		QPNP_WLED_TEST4_EN_CLAMP_BIT |		\
 		QPNP_WLED_TEST4_EN_SOFT_START_BIT)
 #define QPNP_WLED_TEST4_EN_IIND_UP	0x1
+#define QPNP_WLED_ILIM_FAULT_BIT	BIT(0)
+#define QPNP_WLED_OVP_FAULT_BIT		BIT(1)
+#define QPNP_WLED_SC_FAULT_BIT		BIT(2)
 
 /* sink registers */
 #define QPNP_WLED_CURR_SINK_REG(b)	(b + 0x46)
@@ -335,6 +341,7 @@
  *  @ lcd_auto_pfm_thresh - the threshold for lcd auto pfm mode
  *  @ loop_auto_gm_en - select if auto gm is enabled
  *  @ lcd_auto_pfm_en - select if auto pfm is enabled in lcd mode
+ *  @ lcd_psm_ctrl - select if psm needs to be controlled in lcd mode
  *  @ avdd_mode_spmi - enable avdd programming via spmi
  *  @ en_9b_dim_res - enable or disable 9bit dimming
  *  @ en_phase_stag - enable or disable phase staggering
@@ -380,6 +387,7 @@
 	u8			lcd_auto_pfm_thresh;
 	bool			loop_auto_gm_en;
 	bool			lcd_auto_pfm_en;
+	bool			lcd_psm_ctrl;
 	bool			avdd_mode_spmi;
 	bool			en_9b_dim_res;
 	bool			en_phase_stag;
@@ -549,6 +557,30 @@
 	return 0;
 }
 
+static int qpnp_wled_psm_config(struct qpnp_wled *wled, bool enable)
+{
+	int rc;
+
+	if (!wled->lcd_psm_ctrl)
+		return 0;
+
+	rc = qpnp_wled_masked_write_reg(wled,
+			QPNP_WLED_EN_PSM_REG(wled->ctrl_base),
+			QPNP_WLED_EN_PSM_BIT,
+			enable ? QPNP_WLED_EN_PSM_BIT : 0);
+	if (rc < 0)
+		return rc;
+
+	rc = qpnp_wled_masked_write_reg(wled,
+			QPNP_WLED_PSM_CTRL_REG(wled->ctrl_base),
+			QPNP_WLED_PSM_OVERWRITE_BIT,
+			enable ? QPNP_WLED_PSM_OVERWRITE_BIT : 0);
+	if (rc < 0)
+		return rc;
+
+	return 0;
+}
+
 static int qpnp_wled_module_en(struct qpnp_wled *wled,
 				u16 base_addr, bool state)
 {
@@ -561,21 +593,31 @@
 	if (rc < 0)
 		return rc;
 
-	if (wled->ovp_irq > 0) {
-		if (state && wled->ovp_irq_disabled) {
-			/*
-			 * Wait for at least 10ms before enabling OVP fault
-			 * interrupt after enabling the module so that soft
-			 * start is completed. Keep OVP interrupt disabled
-			 * when the module is disabled.
-			 */
-			usleep_range(10000, 11000);
+	/*
+	 * Wait for at least 10ms before enabling OVP fault interrupt after
+	 * enabling the module so that soft start is completed. Also, this
+	 * delay can be used to control PSM during enable when required. Keep
+	 * OVP interrupt disabled when the module is disabled.
+	 */
+	if (state) {
+		usleep_range(10000, 11000);
+		rc = qpnp_wled_psm_config(wled, false);
+		if (rc < 0)
+			return rc;
+
+		if (wled->ovp_irq > 0 && wled->ovp_irq_disabled) {
 			enable_irq(wled->ovp_irq);
 			wled->ovp_irq_disabled = false;
-		} else if (!state && !wled->ovp_irq_disabled) {
+		}
+	} else {
+		if (wled->ovp_irq > 0 && !wled->ovp_irq_disabled) {
 			disable_irq(wled->ovp_irq);
 			wled->ovp_irq_disabled = true;
 		}
+
+		rc = qpnp_wled_psm_config(wled, true);
+		if (rc < 0)
+			return rc;
 	}
 
 	return 0;
@@ -990,7 +1032,7 @@
 		reg &= QPNP_WLED_VREF_PSM_MASK;
 		reg |= ((wled->vref_psm_mv - QPNP_WLED_VREF_PSM_MIN_MV)/
 			QPNP_WLED_VREF_PSM_STEP_MV);
-		reg |= QPNP_WLED_PSM_CTRL_OVERWRITE;
+		reg |= QPNP_WLED_PSM_OVERWRITE_BIT;
 		rc = qpnp_wled_write_reg(wled,
 				QPNP_WLED_PSM_CTRL_REG(wled->ctrl_base), reg);
 		if (rc)
@@ -1053,16 +1095,25 @@
 {
 	struct qpnp_wled *wled = _wled;
 	int rc;
-	u8 val;
+	u8 fault_sts, int_sts;
 
 	rc = qpnp_wled_read_reg(wled,
-			QPNP_WLED_FAULT_STATUS(wled->ctrl_base), &val);
+			QPNP_WLED_INT_RT_STS(wled->ctrl_base), &int_sts);
+	if (rc < 0) {
+		pr_err("Error in reading WLED_INT_RT_STS rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	rc = qpnp_wled_read_reg(wled,
+			QPNP_WLED_FAULT_STATUS(wled->ctrl_base), &fault_sts);
 	if (rc < 0) {
 		pr_err("Error in reading WLED_FAULT_STATUS rc=%d\n", rc);
 		return IRQ_HANDLED;
 	}
 
-	pr_err("WLED OVP fault detected, fault_status= %x\n", val);
+	if (fault_sts & (QPNP_WLED_OVP_FAULT_BIT | QPNP_WLED_ILIM_FAULT_BIT))
+		pr_err("WLED OVP fault detected, int_sts=%x fault_sts= %x\n",
+			int_sts, fault_sts);
 	return IRQ_HANDLED;
 }
 
@@ -1677,6 +1728,8 @@
 				wled->ovp_irq, rc);
 			return rc;
 		}
+		disable_irq(wled->ovp_irq);
+		wled->ovp_irq_disabled = true;
 	}
 
 	if (wled->sc_irq >= 0) {
@@ -2063,6 +2116,8 @@
 	wled->en_ext_pfet_sc_pro = of_property_read_bool(pdev->dev.of_node,
 					"qcom,en-ext-pfet-sc-pro");
 
+	wled->lcd_psm_ctrl = of_property_read_bool(pdev->dev.of_node,
+				"qcom,lcd-psm-ctrl");
 	return 0;
 }
 
diff --git a/drivers/leds/leds-qpnp.c b/drivers/leds/leds-qpnp.c
index 85a6be8..817dfa3 100644
--- a/drivers/leds/leds-qpnp.c
+++ b/drivers/leds/leds-qpnp.c
@@ -897,9 +897,10 @@
 			}
 		}
 
-		if (led->mpp_cfg->pwm_mode != MANUAL_MODE)
+		if (led->mpp_cfg->pwm_mode != MANUAL_MODE) {
 			pwm_enable(led->mpp_cfg->pwm_cfg->pwm_dev);
-		else {
+			led->mpp_cfg->pwm_cfg->pwm_enabled = 1;
+		} else {
 			if (led->cdev.brightness < LED_MPP_CURRENT_MIN)
 				led->cdev.brightness = LED_MPP_CURRENT_MIN;
 			else {
@@ -950,6 +951,7 @@
 			led->mpp_cfg->pwm_mode =
 				led->mpp_cfg->pwm_cfg->default_mode;
 			pwm_disable(led->mpp_cfg->pwm_cfg->pwm_dev);
+			led->mpp_cfg->pwm_cfg->pwm_enabled = 0;
 		}
 		rc = qpnp_led_masked_write(led,
 					LED_MPP_MODE_CTRL(led->base),
@@ -1606,7 +1608,7 @@
 			dev_err(&led->pdev->dev, "pwm enable failed\n");
 			return rc;
 		}
-
+		led->kpdbl_cfg->pwm_cfg->pwm_enabled = 1;
 		set_bit(led->kpdbl_cfg->row_id, kpdbl_leds_in_use);
 
 		/* is_kpdbl_master_turn_on will be set to true when GPLED1
@@ -1642,6 +1644,7 @@
 						"pwm enable failed\n");
 					return rc;
 				}
+				led->kpdbl_cfg->pwm_cfg->pwm_enabled = 1;
 			} else {
 				if (kpdbl_master) {
 					pwm_disable(kpdbl_master);
@@ -1660,6 +1663,7 @@
 			is_kpdbl_master_turn_on = false;
 		} else {
 			pwm_disable(led->kpdbl_cfg->pwm_cfg->pwm_dev);
+			led->kpdbl_cfg->pwm_cfg->pwm_enabled = 0;
 			clear_bit(led->kpdbl_cfg->row_id, kpdbl_leds_in_use);
 			if (bitmap_weight(kpdbl_leds_in_use,
 				NUM_KPDBL_LEDS) == 1 && kpdbl_master &&
@@ -1727,20 +1731,17 @@
 				"Failed to write led enable reg\n");
 			return rc;
 		}
-
+		if (!led->rgb_cfg->pwm_cfg->pwm_enabled) {
+			pwm_enable(led->rgb_cfg->pwm_cfg->pwm_dev);
+			led->rgb_cfg->pwm_cfg->pwm_enabled = 1;
+		}
+	} else {
+		led->rgb_cfg->pwm_cfg->mode =
+			led->rgb_cfg->pwm_cfg->default_mode;
 		if (led->rgb_cfg->pwm_cfg->pwm_enabled) {
 			pwm_disable(led->rgb_cfg->pwm_cfg->pwm_dev);
 			led->rgb_cfg->pwm_cfg->pwm_enabled = 0;
 		}
-
-		rc = pwm_enable(led->rgb_cfg->pwm_cfg->pwm_dev);
-		if (!rc)
-			led->rgb_cfg->pwm_cfg->pwm_enabled = 1;
-	} else {
-		led->rgb_cfg->pwm_cfg->mode =
-			led->rgb_cfg->pwm_cfg->default_mode;
-		pwm_disable(led->rgb_cfg->pwm_cfg->pwm_dev);
-		led->rgb_cfg->pwm_cfg->pwm_enabled = 0;
 		rc = qpnp_led_masked_write(led,
 			RGB_LED_EN_CTL(led->base),
 			led->rgb_cfg->enable, RGB_LED_DISABLE);
@@ -2183,11 +2184,17 @@
 	previous_pwm_us = pwm_cfg->pwm_period_us;
 
 	pwm_cfg->pwm_period_us = pwm_us;
-	pwm_free(pwm_cfg->pwm_dev);
+	if (pwm_cfg->pwm_enabled) {
+		pwm_disable(pwm_cfg->pwm_dev);
+		pwm_cfg->pwm_enabled = 0;
+	}
 	ret = qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
 	if (ret) {
 		pwm_cfg->pwm_period_us = previous_pwm_us;
-		pwm_free(pwm_cfg->pwm_dev);
+		if (pwm_cfg->pwm_enabled) {
+			pwm_disable(pwm_cfg->pwm_dev);
+			pwm_cfg->pwm_enabled = 0;
+		}
 		qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
 		qpnp_led_set(&led->cdev, led->cdev.brightness);
 		dev_err(&led->pdev->dev,
@@ -2237,12 +2244,18 @@
 
 	previous_pause_lo = pwm_cfg->lut_params.lut_pause_lo;
 
-	pwm_free(pwm_cfg->pwm_dev);
+	if (pwm_cfg->pwm_enabled) {
+		pwm_disable(pwm_cfg->pwm_dev);
+		pwm_cfg->pwm_enabled = 0;
+	}
 	pwm_cfg->lut_params.lut_pause_lo = pause_lo;
 	ret = qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
 	if (ret) {
 		pwm_cfg->lut_params.lut_pause_lo = previous_pause_lo;
-		pwm_free(pwm_cfg->pwm_dev);
+		if (pwm_cfg->pwm_enabled) {
+			pwm_disable(pwm_cfg->pwm_dev);
+			pwm_cfg->pwm_enabled = 0;
+		}
 		qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
 		qpnp_led_set(&led->cdev, led->cdev.brightness);
 		dev_err(&led->pdev->dev,
@@ -2292,12 +2305,18 @@
 
 	previous_pause_hi = pwm_cfg->lut_params.lut_pause_hi;
 
-	pwm_free(pwm_cfg->pwm_dev);
+	if (pwm_cfg->pwm_enabled) {
+		pwm_disable(pwm_cfg->pwm_dev);
+		pwm_cfg->pwm_enabled = 0;
+	}
 	pwm_cfg->lut_params.lut_pause_hi = pause_hi;
 	ret = qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
 	if (ret) {
 		pwm_cfg->lut_params.lut_pause_hi = previous_pause_hi;
-		pwm_free(pwm_cfg->pwm_dev);
+		if (pwm_cfg->pwm_enabled) {
+			pwm_disable(pwm_cfg->pwm_dev);
+			pwm_cfg->pwm_enabled = 0;
+		}
 		qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
 		qpnp_led_set(&led->cdev, led->cdev.brightness);
 		dev_err(&led->pdev->dev,
@@ -2348,12 +2367,18 @@
 	previous_start_idx = pwm_cfg->duty_cycles->start_idx;
 	pwm_cfg->duty_cycles->start_idx = start_idx;
 	pwm_cfg->lut_params.start_idx = pwm_cfg->duty_cycles->start_idx;
-	pwm_free(pwm_cfg->pwm_dev);
+	if (pwm_cfg->pwm_enabled) {
+		pwm_disable(pwm_cfg->pwm_dev);
+		pwm_cfg->pwm_enabled = 0;
+	}
 	ret = qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
 	if (ret) {
 		pwm_cfg->duty_cycles->start_idx = previous_start_idx;
 		pwm_cfg->lut_params.start_idx = pwm_cfg->duty_cycles->start_idx;
-		pwm_free(pwm_cfg->pwm_dev);
+		if (pwm_cfg->pwm_enabled) {
+			pwm_disable(pwm_cfg->pwm_dev);
+			pwm_cfg->pwm_enabled = 0;
+		}
 		qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
 		qpnp_led_set(&led->cdev, led->cdev.brightness);
 		dev_err(&led->pdev->dev,
@@ -2403,12 +2428,18 @@
 
 	previous_ramp_step_ms = pwm_cfg->lut_params.ramp_step_ms;
 
-	pwm_free(pwm_cfg->pwm_dev);
+	if (pwm_cfg->pwm_enabled) {
+		pwm_disable(pwm_cfg->pwm_dev);
+		pwm_cfg->pwm_enabled = 0;
+	}
 	pwm_cfg->lut_params.ramp_step_ms = ramp_step_ms;
 	ret = qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
 	if (ret) {
 		pwm_cfg->lut_params.ramp_step_ms = previous_ramp_step_ms;
-		pwm_free(pwm_cfg->pwm_dev);
+		if (pwm_cfg->pwm_enabled) {
+			pwm_disable(pwm_cfg->pwm_dev);
+			pwm_cfg->pwm_enabled = 0;
+		}
 		qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
 		qpnp_led_set(&led->cdev, led->cdev.brightness);
 		dev_err(&led->pdev->dev,
@@ -2458,12 +2489,18 @@
 
 	previous_lut_flags = pwm_cfg->lut_params.flags;
 
-	pwm_free(pwm_cfg->pwm_dev);
+	if (pwm_cfg->pwm_enabled) {
+		pwm_disable(pwm_cfg->pwm_dev);
+		pwm_cfg->pwm_enabled = 0;
+	}
 	pwm_cfg->lut_params.flags = lut_flags;
 	ret = qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
 	if (ret) {
 		pwm_cfg->lut_params.flags = previous_lut_flags;
-		pwm_free(pwm_cfg->pwm_dev);
+		if (pwm_cfg->pwm_enabled) {
+			pwm_disable(pwm_cfg->pwm_dev);
+			pwm_cfg->pwm_enabled = 0;
+		}
 		qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
 		qpnp_led_set(&led->cdev, led->cdev.brightness);
 		dev_err(&led->pdev->dev,
@@ -2543,7 +2580,11 @@
 	pwm_cfg->old_duty_pcts = previous_duty_pcts;
 	pwm_cfg->lut_params.idx_len = pwm_cfg->duty_cycles->num_duty_pcts;
 
-	pwm_free(pwm_cfg->pwm_dev);
+	if (pwm_cfg->pwm_enabled) {
+		pwm_disable(pwm_cfg->pwm_dev);
+		pwm_cfg->pwm_enabled = 0;
+	}
+
 	ret = qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
 	if (ret)
 		goto restore;
@@ -2558,7 +2599,10 @@
 	pwm_cfg->old_duty_pcts = pwm_cfg->duty_cycles->duty_pcts;
 	pwm_cfg->duty_cycles->duty_pcts = previous_duty_pcts;
 	pwm_cfg->lut_params.idx_len = pwm_cfg->duty_cycles->num_duty_pcts;
-	pwm_free(pwm_cfg->pwm_dev);
+	if (pwm_cfg->pwm_enabled) {
+		pwm_disable(pwm_cfg->pwm_dev);
+		pwm_cfg->pwm_enabled = 0;
+	}
 	qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
 	qpnp_led_set(&led->cdev, led->cdev.brightness);
 	return ret;
@@ -2588,7 +2632,10 @@
 				led->kpdbl_cfg->pwm_mode =
 						pwm_cfg->default_mode;
 		}
-		pwm_free(pwm_cfg->pwm_dev);
+		if (pwm_cfg->pwm_enabled) {
+			pwm_disable(pwm_cfg->pwm_dev);
+			pwm_cfg->pwm_enabled = 0;
+		}
 		qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
 		if (led->id == QPNP_ID_RGB_RED || led->id == QPNP_ID_RGB_GREEN
 				|| led->id == QPNP_ID_RGB_BLUE) {
@@ -3541,8 +3588,11 @@
 	}
 
 	rc = qpnp_get_config_pwm(led->kpdbl_cfg->pwm_cfg, led->pdev,  node);
-	if (rc < 0)
+	if (rc < 0) {
+		if (led->kpdbl_cfg->pwm_cfg->pwm_dev)
+			pwm_put(led->kpdbl_cfg->pwm_cfg->pwm_dev);
 		return rc;
+	}
 
 	rc = of_property_read_u32(node, "qcom,row-id", &val);
 	if (!rc)
@@ -3605,8 +3655,11 @@
 	}
 
 	rc = qpnp_get_config_pwm(led->rgb_cfg->pwm_cfg, led->pdev, node);
-	if (rc < 0)
+	if (rc < 0) {
+		if (led->rgb_cfg->pwm_cfg->pwm_dev)
+			pwm_put(led->rgb_cfg->pwm_cfg->pwm_dev);
 		return rc;
+	}
 
 	return 0;
 }
@@ -3729,8 +3782,11 @@
 	}
 
 	rc = qpnp_get_config_pwm(led->mpp_cfg->pwm_cfg, led->pdev, node);
-	if (rc < 0)
+	if (rc < 0) {
+		if (led->mpp_cfg->pwm_cfg && led->mpp_cfg->pwm_cfg->pwm_dev)
+			pwm_put(led->mpp_cfg->pwm_cfg->pwm_dev);
 		goto err_config_mpp;
+	}
 
 	return 0;
 
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index c041db6..0db8a6d 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -151,4 +151,11 @@
 	  Support for communication with the hardened-RPM blocks in
 	  Qualcomm Technologies Inc (QTI) SoCs using TCS hardware mailbox.
 
+config MSM_QMP
+	bool "QTI Mailbox Protocol(QMP)"
+	depends on MSM_SMEM
+	help
+	  QMP is a lightweight communication protocol for sending messages to
+	  a remote processor. This protocol fits into the Generic Mailbox
+	  Framework. QMP uses a mailbox located in shared memory.
 endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 0a01d79..3c811d3 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -31,3 +31,5 @@
 obj-$(CONFIG_BCM_PDC_MBOX)	+= bcm-pdc-mailbox.o
 
 obj-$(CONFIG_QTI_RPMH_MBOX)	+= qti-tcs.o
+
+obj-$(CONFIG_MSM_QMP)	+= msm_qmp.o
diff --git a/drivers/mailbox/msm_qmp.c b/drivers/mailbox/msm_qmp.c
new file mode 100644
index 0000000..dd022d3
--- /dev/null
+++ b/drivers/mailbox/msm_qmp.c
@@ -0,0 +1,811 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/kthread.h>
+#include <linux/workqueue.h>
+#include <linux/mailbox/qmp.h>
+
+#define QMP_MAGIC	0x4d41494c	/* MAIL */
+#define QMP_VERSION	0x1
+#define QMP_FEATURES	0x0
+#define QMP_NUM_CHANS	0x1
+#define QMP_TOUT_MS	5000
+#define QMP_TX_TOUT_MS	2000
+
+#define QMP_MBOX_LINK_DOWN		0xFFFF0000
+#define QMP_MBOX_LINK_UP		0x0000FFFF
+#define QMP_MBOX_CH_DISCONNECTED	0xFFFF0000
+#define QMP_MBOX_CH_CONNECTED		0x0000FFFF
+
+#define MSG_RAM_ALIGN_BYTES 3
+
+/**
+ * enum qmp_local_state - definition of the local state machine
+ * @LINK_DISCONNECTED:		Init state, waiting for ucore to start
+ * @LINK_NEGOTIATION:		Set local link state to up, wait for ucore ack
+ * @LINK_CONNECTED:		Link state up, channel not connected
+ * @LOCAL_CONNECTING:		Channel opening locally, wait for ucore ack
+ * @LOCAL_CONNECTED:		Channel opened locally
+ * @CHANNEL_CONNECTED:		Channel fully opened
+ * @LOCAL_DISCONNECTING:	Channel closing locally, wait for ucore ack
+ */
+enum qmp_local_state {
+	LINK_DISCONNECTED,
+	LINK_NEGOTIATION,
+	LINK_CONNECTED,
+	LOCAL_CONNECTING,
+	LOCAL_CONNECTED,
+	CHANNEL_CONNECTED,
+	LOCAL_DISCONNECTING,
+};
+
+/**
+ * struct channel_desc - description of a core's link, channel and mailbox state
+ * @link_state		Current link state of core
+ * @link_state_ack	Ack for other core to use when link state changes
+ * @ch_state		Current channel state of core
+ * @ch_state_ack	Ack for other core to use when channel state changes
+ * @mailbox_size	Size of this core's mailbox
+ * @mailbox_offset	Location of core's mailbox from a base smem location
+ */
+struct channel_desc {
+	u32 link_state;
+	u32 link_state_ack;
+	u32 ch_state;
+	u32 ch_state_ack;
+	u32 mailbox_size;
+	u32 mailbox_offset;
+};
+
+/**
+ * struct mbox_desc - description of the protocol's mailbox state
+ * @magic	Magic number field to be set by ucore
+ * @version	Version field to be set by ucore
+ * @features	Features field to be set by ucore
+ * @ucore	Channel descriptor to hold state of ucore
+ * @mcore	Channel descriptor to hold state of mcore
+ * @reserved	Reserved in case of future use
+ *
+ * This structure resides in SMEM and contains the control information for the
+ * mailbox channel. Each core in the link will have one channel descriptor
+ */
+struct mbox_desc {
+	u32 magic;
+	u32 version;
+	u32 features;
+	struct channel_desc ucore;
+	struct channel_desc mcore;
+	u32 reserved;
+};
+
+/**
+ * struct qmp_core_version - local structure to hold version and features
+ * @version	Version field to indicate what version the ucore supports
+ * @features	Features field to indicate what features the ucore supports
+ */
+struct qmp_core_version {
+	u32 version;
+	u32 features;
+};
+
+/**
+ * struct qmp_device - local information for managing a single mailbox
+ * @dev:		The device that corresponds to this mailbox
+ * @mbox:		The mbox controller for this mailbox
+ * @name:		The name of this mailbox
+ * @local_state:	Current state of the mailbox protocol
+ * @link_complete:	Use to block until link negotiation with remote proc
+ *			is complete
+ * @ch_complete:	Use to block until the channel is fully opened
+ * @tx_sent:		True if tx is sent and remote proc has not sent ack
+ * @ch_in_use:		True if this mailbox's channel owned by a client
+ * @rx_buf:		buffer to pass to client, holds copied data from mailbox
+ * @version:		Version and features received during link negotiation
+ * @mcore_mbox_offset:	Offset of mcore mbox from the msgram start
+ * @mcore_mbox_size:	Size of the mcore mbox
+ * @desc:		Reference to the mailbox descriptor in SMEM
+ * @msgram:		Reference to the start of msgram
+ * @irq_mask:		Mask written to @tx_irq_reg to trigger irq
+ * @tx_irq_reg:		Reference to the register to send an irq to remote proc
+ * @rx_reset_reg:	Reference to the register to reset the rx irq, if
+ *			applicable
+ * @rx_irq_line:	The incoming interrupt line
+ * @tx_irq_count:	Number of tx interrupts triggered
+ * @rx_irq_count:	Number of rx interrupts received
+ * @kwork:		Work to be executed when an irq is received
+ * @kworker:		Handle to entitiy to process incoming data
+ * @task:		Handle to task context used to run @kworker
+ * @state_lock:		Serialize mailbox state changes
+ * @dwork:		Delayed work to detect timed out tx
+ * @tx_lock:		Serialize access for writes to mailbox
+ */
+struct qmp_device {
+	struct device *dev;
+	struct mbox_controller *mbox;
+	const char *name;
+	enum qmp_local_state local_state;
+	struct completion link_complete;
+	struct completion ch_complete;
+	bool tx_sent;
+	bool ch_in_use;
+	struct qmp_pkt rx_pkt;
+	struct qmp_core_version version;
+	u32 mcore_mbox_offset;
+	u32 mcore_mbox_size;
+	void __iomem *desc;
+	void __iomem *msgram;
+	u32 irq_mask;
+	void __iomem *tx_irq_reg;
+	void __iomem *rx_reset_reg;
+	u32 rx_irq_line;
+	u32 tx_irq_count;
+	u32 rx_irq_count;
+	struct kthread_work kwork;
+	struct kthread_worker kworker;
+	struct task_struct *task;
+	struct mutex state_lock;
+	struct delayed_work dwork;
+	spinlock_t tx_lock;
+};
+
+/**
+ * send_irq() - send an irq to a remote entity as an event signal.
+ * @mdev:	Which remote entity that should receive the irq.
+ */
+static void send_irq(struct qmp_device *mdev)
+{
+	/*
+	 * Any data associated with this event must be visable to the remote
+	 * before the interrupt is triggered
+	 */
+	wmb();
+	writel_relaxed(mdev->irq_mask, mdev->tx_irq_reg);
+	mdev->tx_irq_count++;
+}
+
+/**
+ * qmp_irq_handler() - handle irq from remote entitity.
+ * @irq:	irq number for the trggered interrupt.
+ * @priv:	private pointer to qmp mbox device.
+ */
+irqreturn_t qmp_irq_handler(int irq, void *priv)
+{
+	struct qmp_device *mdev = (struct qmp_device *)priv;
+
+	if (mdev->rx_reset_reg)
+		writel_relaxed(mdev->irq_mask, mdev->rx_reset_reg);
+
+	kthread_queue_work(&mdev->kworker, &mdev->kwork);
+	mdev->rx_irq_count++;
+
+	return IRQ_HANDLED;
+}
+
+static void memcpy32_toio(void *dest, void *src, size_t size)
+{
+	u32 *dest_local = (u32 *)dest;
+	u32 *src_local = (u32 *)src;
+
+	WARN_ON(size & MSG_RAM_ALIGN_BYTES);
+	size /= sizeof(u32);
+	while (size--)
+		iowrite32(*src_local++, dest_local++);
+}
+
+static void memcpy32_fromio(void *dest, void *src, size_t size)
+{
+	u32 *dest_local = (u32 *)dest;
+	u32 *src_local = (u32 *)src;
+
+	WARN_ON(size & MSG_RAM_ALIGN_BYTES);
+	size /= sizeof(u32);
+	while (size--)
+		*dest_local++ = ioread32(src_local++);
+}
+
+/**
+ * set_ucore_link_ack() - set the link ack in the ucore channel desc.
+ * @mdev:	the mailbox for the field that is being set.
+ * @state:	the value to set the ack field to.
+ */
+static void set_ucore_link_ack(struct qmp_device *mdev, u32 state)
+{
+	u32 offset;
+
+	offset = offsetof(struct mbox_desc, ucore);
+	offset += offsetof(struct channel_desc, link_state_ack);
+	iowrite32(state, mdev->desc + offset);
+}
+
+/**
+ * set_ucore_ch_ack() - set the channel ack in the ucore channel desc.
+ * @mdev:	the mailbox for the field that is being set.
+ * @state:	the value to set the ack field to.
+ */
+static void set_ucore_ch_ack(struct qmp_device *mdev, u32 state)
+{
+	u32 offset;
+
+	offset = offsetof(struct mbox_desc, ucore);
+	offset += offsetof(struct channel_desc, ch_state_ack);
+	iowrite32(state, mdev->desc + offset);
+}
+
+/**
+ * set_mcore_ch() - set the channel state in the mcore channel desc.
+ * @mdev:	the mailbox for the field that is being set.
+ * @state:	the value to set the channel field to.
+ */
+static void set_mcore_ch(struct qmp_device *mdev, u32 state)
+{
+	u32 offset;
+
+	offset = offsetof(struct mbox_desc, mcore);
+	offset += offsetof(struct channel_desc, ch_state);
+	iowrite32(state, mdev->desc + offset);
+}
+
+/**
+ * qmp_notify_timeout() - Notify client of tx timeout with -EIO
+ * @work:	Structure for work that was scheduled.
+ */
+static void qmp_notify_timeout(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct qmp_device *mdev = container_of(dwork, struct qmp_device, dwork);
+	struct mbox_chan *chan = &mdev->mbox->chans[0];
+	int err = -EIO;
+
+	pr_err("%s: qmp tx timeout for %s\n", __func__, mdev->name);
+	mbox_chan_txdone(chan, err);
+}
+
+/**
+ * qmp_startup() - Start qmp mailbox channel for communication. Waits for
+ *			remote subsystem to open channel if link is not
+ *			initated or until timeout.
+ * @chan:	mailbox channel that is being opened.
+ *
+ * Return: 0 on succes or standard Linux error code.
+ */
+static int qmp_startup(struct mbox_chan *chan)
+{
+	struct qmp_device *mdev = chan->con_priv;
+
+	if (!mdev)
+		return -EINVAL;
+
+	mutex_lock(&mdev->state_lock);
+	if (mdev->local_state == CHANNEL_CONNECTED) {
+		mutex_unlock(&mdev->state_lock);
+		return -EINVAL;
+	}
+	if (!completion_done(&mdev->link_complete)) {
+		mutex_unlock(&mdev->state_lock);
+		return -EAGAIN;
+	}
+
+	set_mcore_ch(mdev, QMP_MBOX_CH_CONNECTED);
+	mdev->local_state = LOCAL_CONNECTING;
+	mutex_unlock(&mdev->state_lock);
+
+	send_irq(mdev);
+	wait_for_completion_interruptible_timeout(&mdev->ch_complete,
+					msecs_to_jiffies(QMP_TOUT_MS));
+	return 0;
+}
+
+static inline void qmp_schedule_tx_timeout(struct qmp_device *mdev)
+{
+	schedule_delayed_work(&mdev->dwork, msecs_to_jiffies(QMP_TX_TOUT_MS));
+}
+
+/**
+ * qmp_send_data() - Copy the data to the channel's mailbox and notify
+ *				remote subsystem of new data. This function will
+ *				return an error if the previous message sent has
+ *				not been read. Cannot Sleep.
+ * @chan:	mailbox channel that data is to be sent over.
+ * @data:	Data to be sent to remote processor, should be in the format of
+ *		a qmp_pkt.
+ *
+ * Return: 0 on succes or standard Linux error code.
+ */
+static int qmp_send_data(struct mbox_chan *chan, void *data)
+{
+	struct qmp_device *mdev = chan->con_priv;
+	struct qmp_pkt *pkt = (struct qmp_pkt *)data;
+	void __iomem *addr;
+	unsigned long flags;
+
+	if (!mdev || !data || mdev->local_state != CHANNEL_CONNECTED)
+		return -EINVAL;
+
+	spin_lock_irqsave(&mdev->tx_lock, flags);
+	addr = mdev->msgram + mdev->mcore_mbox_offset;
+	if (ioread32(addr)) {
+		spin_unlock_irqrestore(&mdev->tx_lock, flags);
+		return -EBUSY;
+	}
+
+	if (pkt->size + sizeof(pkt->size) > mdev->mcore_mbox_size) {
+		spin_unlock_irqrestore(&mdev->tx_lock, flags);
+		return -EINVAL;
+	}
+	memcpy32_toio(addr + sizeof(pkt->size), pkt->data, pkt->size);
+	iowrite32(pkt->size, addr);
+	mdev->tx_sent = true;
+	send_irq(mdev);
+	qmp_schedule_tx_timeout(mdev);
+	spin_unlock_irqrestore(&mdev->tx_lock, flags);
+	return 0;
+}
+
+/**
+ * qmp_shutdown() - Disconnect this mailbox channel so the client does not
+ *				receive anymore data and can reliquish control
+ *				of the channel
+ * @chan:	mailbox channel to be shutdown.
+ */
+static void qmp_shutdown(struct mbox_chan *chan)
+{
+	struct qmp_device *mdev = chan->con_priv;
+
+	mutex_lock(&mdev->state_lock);
+	if (mdev->local_state != LINK_DISCONNECTED) {
+		mdev->local_state = LOCAL_DISCONNECTING;
+		set_mcore_ch(mdev, QMP_MBOX_CH_DISCONNECTED);
+		send_irq(mdev);
+	}
+	mdev->ch_in_use = false;
+	mutex_unlock(&mdev->state_lock);
+}
+
+/**
+ * qmp_last_tx_done() - qmp does not support polling operations, print
+ *				error of unexpected usage and return true to
+ *				resume operation.
+ * @chan:	Corresponding mailbox channel for requested last tx.
+ *
+ * Return: true
+ */
+static bool qmp_last_tx_done(struct mbox_chan *chan)
+{
+	pr_err("In %s, unexpected usage of last_tx_done\n", __func__);
+	return true;
+}
+
+/**
+ * qmp_recv_data() - received notification that data is available in the
+ *			mailbox. Copy data from mailbox and pass to client.
+ * @mdev:	mailbox device that received the notification.
+ * @mbox_of:	offset of mailbox from msgram start.
+ */
+static void qmp_recv_data(struct qmp_device *mdev, u32 mbox_of)
+{
+	void __iomem *addr;
+	struct qmp_pkt *pkt;
+
+	addr = mdev->msgram + mbox_of;
+	pkt = &mdev->rx_pkt;
+	pkt->size = ioread32(addr);
+
+	if (pkt->size > mdev->mcore_mbox_size)
+		pr_err("%s: Invalid mailbox packet\n", __func__);
+	else {
+		memcpy32_fromio(pkt->data, addr + sizeof(pkt->size), pkt->size);
+		mbox_chan_received_data(&mdev->mbox->chans[0], &pkt);
+	}
+	iowrite32(0, addr);
+	send_irq(mdev);
+}
+
+/**
+ * init_mcore_state() - initialize the mcore state of a mailbox.
+ * @mdev:	mailbox device to be initialized.
+ */
+static void init_mcore_state(struct qmp_device *mdev)
+{
+	struct channel_desc mcore;
+	u32 offset = offsetof(struct mbox_desc, mcore);
+
+	mcore.link_state = QMP_MBOX_LINK_UP;
+	mcore.link_state_ack = QMP_MBOX_LINK_DOWN;
+	mcore.ch_state = QMP_MBOX_CH_DISCONNECTED;
+	mcore.ch_state_ack = QMP_MBOX_CH_DISCONNECTED;
+	mcore.mailbox_size = mdev->mcore_mbox_size;
+	mcore.mailbox_offset = mdev->mcore_mbox_offset;
+	memcpy32_toio(mdev->desc + offset, &mcore, sizeof(mcore));
+}
+
+/**
+ * __qmp_rx_worker() - Handle incoming messages from remote processor.
+ * @mdev:	mailbox device that received notification.
+ */
+static void __qmp_rx_worker(struct qmp_device *mdev)
+{
+	u32 msg_len;
+	struct mbox_desc desc;
+
+	memcpy_fromio(&desc, mdev->desc, sizeof(desc));
+	if (desc.magic != QMP_MAGIC)
+		return;
+
+	mutex_lock(&mdev->state_lock);
+	switch (mdev->local_state) {
+	case LINK_DISCONNECTED:
+		mdev->version.version = desc.version;
+		mdev->version.features = desc.features;
+		set_ucore_link_ack(mdev, desc.ucore.link_state);
+		if (desc.mcore.mailbox_size) {
+			mdev->mcore_mbox_size = desc.mcore.mailbox_size;
+			mdev->mcore_mbox_offset = desc.mcore.mailbox_offset;
+		}
+		init_mcore_state(mdev);
+		mdev->local_state = LINK_NEGOTIATION;
+		mdev->rx_pkt.data = devm_kzalloc(mdev->dev,
+						 desc.ucore.mailbox_size,
+						 GFP_KERNEL);
+		if (!mdev->rx_pkt.data) {
+			pr_err("In %s: failed to allocate rx pkt\n", __func__);
+			break;
+		}
+		send_irq(mdev);
+		break;
+	case LINK_NEGOTIATION:
+		if (desc.mcore.link_state_ack != QMP_MBOX_LINK_UP ||
+				desc.mcore.link_state != QMP_MBOX_LINK_UP) {
+			pr_err("In %s: rx interrupt without negotiation ack\n",
+					__func__);
+			break;
+		}
+		mdev->local_state = LINK_CONNECTED;
+		complete_all(&mdev->link_complete);
+		break;
+	case LINK_CONNECTED:
+		if (desc.ucore.ch_state == desc.ucore.ch_state_ack) {
+			pr_err("In %s: rx interrupt without channel open\n",
+					__func__);
+			break;
+		}
+		set_ucore_ch_ack(mdev, desc.ucore.ch_state);
+		send_irq(mdev);
+		break;
+	case LOCAL_CONNECTING:
+		if (desc.mcore.ch_state_ack == QMP_MBOX_CH_CONNECTED &&
+				desc.mcore.ch_state == QMP_MBOX_CH_CONNECTED)
+			mdev->local_state = LOCAL_CONNECTED;
+
+		if (desc.ucore.ch_state != desc.ucore.ch_state_ack) {
+			set_ucore_ch_ack(mdev, desc.ucore.ch_state);
+			send_irq(mdev);
+		}
+		if (mdev->local_state == LOCAL_CONNECTED &&
+				desc.mcore.ch_state == QMP_MBOX_CH_CONNECTED &&
+				desc.ucore.ch_state == QMP_MBOX_CH_CONNECTED) {
+			mdev->local_state = CHANNEL_CONNECTED;
+			complete_all(&mdev->ch_complete);
+		}
+		break;
+	case LOCAL_CONNECTED:
+		if (desc.ucore.ch_state == desc.ucore.ch_state_ack) {
+			pr_err("In %s: rx interrupt without remote channel open\n",
+					__func__);
+			break;
+		}
+		set_ucore_ch_ack(mdev, desc.ucore.ch_state);
+		mdev->local_state = CHANNEL_CONNECTED;
+		send_irq(mdev);
+		complete_all(&mdev->ch_complete);
+		break;
+	case CHANNEL_CONNECTED:
+		if (desc.ucore.ch_state == QMP_MBOX_CH_DISCONNECTED) {
+			set_ucore_ch_ack(mdev, desc.ucore.ch_state);
+			mdev->local_state = LOCAL_CONNECTED;
+			send_irq(mdev);
+		}
+
+		msg_len = ioread32(mdev->msgram + desc.ucore.mailbox_offset);
+		if (msg_len)
+			qmp_recv_data(mdev, desc.ucore.mailbox_offset);
+
+		if (mdev->tx_sent) {
+			msg_len = ioread32(mdev->msgram +
+						mdev->mcore_mbox_offset);
+			if (msg_len == 0) {
+				mdev->tx_sent = false;
+				cancel_delayed_work(&mdev->dwork);
+				mbox_chan_txdone(&mdev->mbox->chans[0], 0);
+			}
+		}
+		break;
+	case LOCAL_DISCONNECTING:
+		if (desc.mcore.ch_state_ack == QMP_MBOX_CH_DISCONNECTED &&
+				desc.mcore.ch_state == desc.mcore.ch_state_ack)
+			mdev->local_state = LINK_CONNECTED;
+		reinit_completion(&mdev->ch_complete);
+		break;
+	default:
+		pr_err("In %s: Local Channel State corrupted\n", __func__);
+	}
+	mutex_unlock(&mdev->state_lock);
+}
+
+static void rx_worker(struct kthread_work *work)
+{
+	struct qmp_device *mdev;
+
+	mdev = container_of(work, struct qmp_device, kwork);
+	__qmp_rx_worker(mdev);
+}
+
+/**
+ * qmp_mbox_of_xlate() - Returns a mailbox channel to be used for this mailbox
+ *			device. Make sure the channel is not already in use.
+ * @mbox:	Mailbox device controlls the requested channel.
+ * @spec:	Device tree arguments to specify which channel is requested.
+ */
+static struct mbox_chan *qmp_mbox_of_xlate(struct mbox_controller *mbox,
+		const struct of_phandle_args *spec)
+{
+	struct qmp_device *mdev = dev_get_drvdata(mbox->dev);
+	unsigned int channel = spec->args[0];
+
+	if (!mdev || channel >= mbox->num_chans)
+		return ERR_PTR(-EINVAL);
+
+	mutex_lock(&mdev->state_lock);
+	if (mdev->ch_in_use) {
+		pr_err("%s, mbox channel already in use %s\n", __func__,
+								mdev->name);
+		mutex_unlock(&mdev->state_lock);
+		return ERR_PTR(-EBUSY);
+	}
+	mdev->ch_in_use = true;
+	mutex_unlock(&mdev->state_lock);
+	return &mbox->chans[0];
+}
+
+/**
+ * parse_devicetree() - Parse the device tree information for QMP, map io
+ *			memory and register for needed interrupts
+ * @pdev:	platform device for this driver.
+ * @mdev:	mailbox device to hold the device tree configuration.
+ *
+ * Return: 0 on succes or standard Linux error code.
+ */
+static int qmp_parse_devicetree(struct platform_device *pdev,
+					struct qmp_device *mdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	char *key;
+	int rc;
+	const char *subsys_name;
+	u32 rx_irq_line, tx_irq_mask;
+	u32 desc_of = 0;
+	u32 mbox_of = 0;
+	u32 mbox_size = 0;
+	struct resource *msgram_r, *tx_irq_reg_r;
+
+	key = "label";
+	subsys_name = of_get_property(node, key, NULL);
+	if (!subsys_name) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		return -ENODEV;
+	}
+
+	key = "msgram";
+	msgram_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (!msgram_r) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		return -ENODEV;
+	}
+
+	key = "irq-reg-base";
+	tx_irq_reg_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (!tx_irq_reg_r) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		return -ENODEV;
+	}
+
+	key = "qcom,irq-mask";
+	rc = of_property_read_u32(node, key, &tx_irq_mask);
+	if (rc) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		return -ENODEV;
+	}
+
+	key = "interrupts";
+	rx_irq_line = irq_of_parse_and_map(node, 0);
+	if (!rx_irq_line) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		return -ENODEV;
+	}
+
+	key = "mbox-desc-offset";
+	rc = of_property_read_u32(node, key, &desc_of);
+	if (rc) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		return -ENODEV;
+	}
+
+	key = "mbox-offset";
+	rc = of_property_read_u32(node, key, &mbox_of);
+	if (!rc)
+		mdev->mcore_mbox_offset = mbox_of;
+
+	key = "mbox-size";
+	rc = of_property_read_u32(node, key, &mbox_size);
+	if (!rc)
+		mdev->mcore_mbox_size = mbox_size;
+
+	mdev->name = subsys_name;
+	mdev->msgram = devm_ioremap_nocache(&pdev->dev, msgram_r->start,
+						resource_size(msgram_r));
+	if (!mdev->msgram)
+		return -ENOMEM;
+
+	mdev->desc = mdev->msgram + desc_of;
+	if (!mdev->desc)
+		return -ENOMEM;
+
+	mdev->irq_mask = tx_irq_mask;
+	mdev->tx_irq_reg = devm_ioremap_nocache(&pdev->dev, tx_irq_reg_r->start,
+						resource_size(tx_irq_reg_r));
+	if (!mdev->tx_irq_reg)
+		return -ENOMEM;
+
+	mdev->rx_irq_line = rx_irq_line;
+	return 0;
+}
+
+/**
+ * cleanup_workqueue() - Flush all work and stop the thread for this mailbox.
+ * @mdev:	mailbox device to cleanup.
+ */
+static void cleanup_workqueue(struct qmp_device *mdev)
+{
+	kthread_flush_worker(&mdev->kworker);
+	kthread_stop(mdev->task);
+	mdev->task = NULL;
+}
+
+static struct mbox_chan_ops qmp_mbox_ops = {
+	.startup = qmp_startup,
+	.shutdown = qmp_shutdown,
+	.send_data = qmp_send_data,
+	.last_tx_done = qmp_last_tx_done,
+};
+
+static const struct of_device_id qmp_mbox_match_table[] = {
+	{ .compatible = "qcom,qmp-mbox" },
+	{},
+};
+
+static int qmp_mbox_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct mbox_controller *mbox;
+	struct qmp_device *mdev;
+	struct mbox_chan *chans;
+	int ret = 0;
+
+	mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
+	if (!mdev)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, mdev);
+
+	ret = qmp_parse_devicetree(pdev, mdev);
+	if (ret)
+		return ret;
+
+	mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+
+	chans = devm_kzalloc(&pdev->dev, sizeof(*chans) * QMP_NUM_CHANS,
+								GFP_KERNEL);
+	if (!chans)
+		return -ENOMEM;
+
+	mbox->dev = &pdev->dev;
+	mbox->ops = &qmp_mbox_ops;
+	mbox->chans = chans;
+	mbox->chans[0].con_priv = mdev;
+	mbox->num_chans = QMP_NUM_CHANS;
+	mbox->txdone_irq = true;
+	mbox->txdone_poll = false;
+	mbox->of_xlate = qmp_mbox_of_xlate;
+
+	mdev->dev = &pdev->dev;
+	mdev->mbox = mbox;
+	spin_lock_init(&mdev->tx_lock);
+	mutex_init(&mdev->state_lock);
+	mdev->local_state = LINK_DISCONNECTED;
+	kthread_init_work(&mdev->kwork, rx_worker);
+	kthread_init_worker(&mdev->kworker);
+	mdev->task = kthread_run(kthread_worker_fn, &mdev->kworker, "qmp_%s",
+								mdev->name);
+	init_completion(&mdev->link_complete);
+	init_completion(&mdev->ch_complete);
+	mdev->tx_sent = false;
+	mdev->ch_in_use = false;
+	INIT_DELAYED_WORK(&mdev->dwork, qmp_notify_timeout);
+
+	ret = mbox_controller_register(mbox);
+	if (ret) {
+		cleanup_workqueue(mdev);
+		pr_err("%s: failed to register mbox controller %d\n", __func__,
+									ret);
+		return ret;
+	}
+
+	ret = devm_request_irq(&pdev->dev, mdev->rx_irq_line, qmp_irq_handler,
+		IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND | IRQF_SHARED,
+		node->name, mdev);
+	if (ret < 0) {
+		cleanup_workqueue(mdev);
+		mbox_controller_unregister(mdev->mbox);
+		pr_err("%s: request irq on %d failed: %d\n", __func__,
+							mdev->rx_irq_line, ret);
+		return ret;
+	}
+	ret = enable_irq_wake(mdev->rx_irq_line);
+	if (ret < 0)
+		pr_err("%s: enable_irq_wake on %d failed: %d\n", __func__,
+							mdev->rx_irq_line, ret);
+
+	qmp_irq_handler(0, mdev);
+	return 0;
+}
+
+static int qmp_mbox_remove(struct platform_device *pdev)
+{
+	struct qmp_device *mdev = platform_get_drvdata(pdev);
+
+	cleanup_workqueue(mdev);
+	mbox_controller_unregister(mdev->mbox);
+	return 0;
+}
+
+static struct platform_driver qmp_mbox_driver = {
+	.probe = qmp_mbox_probe,
+	.remove = qmp_mbox_remove,
+	.driver = {
+		.name = "qmp_mbox",
+		.owner = THIS_MODULE,
+		.of_match_table = qmp_mbox_match_table,
+	},
+};
+
+static int __init qmp_init(void)
+{
+	int rc = 0;
+
+	rc = platform_driver_register(&qmp_mbox_driver);
+	if (rc)
+		pr_err("%s: qmp_mbox_driver reg failed %d\n", __func__, rc);
+	return rc;
+}
+arch_initcall(qmp_init);
+
+MODULE_DESCRIPTION("MSM QTI Mailbox Protocol");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/qti-tcs.c b/drivers/mailbox/qti-tcs.c
index 5b114cb..1c73c5a2 100644
--- a/drivers/mailbox/qti-tcs.c
+++ b/drivers/mailbox/qti-tcs.c
@@ -121,6 +121,7 @@
 
 /* One per MBOX controller */
 struct tcs_drv {
+	const char *name;
 	void *base; /* start address of the RSC's registers */
 	void *reg_base; /* start address for DRV specific register */
 	int drv_id;
@@ -333,6 +334,7 @@
 	u32 irq_status, sts;
 	struct tcs_mbox *tcs;
 	struct tcs_response *resp;
+	struct tcs_cmd *cmd;
 	u32 irq_clear = 0;
 	u32 data;
 
@@ -352,28 +354,20 @@
 
 		cancel_delayed_work(&resp->dwork);
 
-		/* Clear the AMC mode for non-ACTIVE TCSes */
 		tcs = get_tcs_from_index(drv, m);
 		if (!tcs) {
 			pr_err("TCS-%d doesn't exist in DRV\n", m);
 			continue;
 		}
-		if (tcs->type != ACTIVE_TCS) {
-			data = read_tcs_reg(base, TCS_DRV_CONTROL, m, 0);
-			data &= ~TCS_AMC_MODE_ENABLE;
-			write_tcs_reg(base, TCS_DRV_CONTROL, m, 0, data);
-		} else {
-			/* Clear the enable bit for the commands */
-			write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, 0);
-		}
 
 		/* Check if all commands were completed */
 		resp->err = 0;
 		for (i = 0; i < resp->msg->num_payload; i++) {
+			cmd = &resp->msg->payload[i];
 			sts = read_tcs_reg(base, TCS_DRV_CMD_STATUS, m, i);
-			if (!(sts & CMD_STATUS_ISSUED) ||
-				(resp->msg->is_complete &&
-					!(sts & CMD_STATUS_COMPL)))
+			if ((!(sts & CMD_STATUS_ISSUED)) ||
+				((resp->msg->is_complete || cmd->complete) &&
+				(!(sts & CMD_STATUS_COMPL))))
 				resp->err = -EIO;
 		}
 
@@ -385,7 +379,18 @@
 			mbox_chan_received_data(resp->chan, resp->msg);
 		}
 
-		trace_rpmh_notify_irq(m, resp->msg->payload[0].addr, resp->err);
+		trace_rpmh_notify_irq(drv->name, m, resp->msg->payload[0].addr,
+						resp->err);
+
+		/* Clear the AMC mode for non-ACTIVE TCSes */
+		if (tcs->type != ACTIVE_TCS) {
+			data = read_tcs_reg(base, TCS_DRV_CONTROL, m, 0);
+			data &= ~TCS_AMC_MODE_ENABLE;
+			write_tcs_reg(base, TCS_DRV_CONTROL, m, 0, data);
+		} else {
+			/* Clear the enable bit for the commands */
+			write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, 0);
+		}
 
 		/* Notify the client that this request is completed. */
 		send_tcs_response(resp);
@@ -401,7 +406,9 @@
 static inline void mbox_notify_tx_done(struct mbox_chan *chan,
 				struct tcs_mbox_msg *msg, int m, int err)
 {
-	trace_rpmh_notify(m, msg->payload[0].addr, err);
+	struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
+
+	trace_rpmh_notify(drv->name, m, msg->payload[0].addr, err);
 	mbox_chan_txdone(chan, err);
 }
 
@@ -467,15 +474,16 @@
 	mbox_notify_tx_done(chan, msg, -1, err);
 }
 
-static void __tcs_buffer_write(void __iomem *base, int d, int m, int n,
+static void __tcs_buffer_write(struct tcs_drv *drv, int d, int m, int n,
 			struct tcs_mbox_msg *msg, bool trigger)
 {
-	u32 cmd_msgid = 0;
+	u32 msgid, cmd_msgid = 0;
 	u32 cmd_enable = 0;
 	u32 cmd_complete;
 	u32 enable = TCS_AMC_MODE_ENABLE;
 	struct tcs_cmd *cmd;
 	int i;
+	void __iomem *base = drv->reg_base;
 
 	/* We have homologous command set i.e pure read or write, not a mix */
 	cmd_msgid = CMD_MSGID_LEN;
@@ -489,11 +497,13 @@
 		cmd = &msg->payload[i];
 		cmd_enable |= BIT(n + i);
 		cmd_complete |= cmd->complete << (n + i);
-		write_tcs_reg(base, TCS_DRV_CMD_MSGID, m, n + i, cmd_msgid);
+		msgid = cmd_msgid;
+		msgid |= (cmd->complete) ? CMD_MSGID_RESP_REQ : 0;
+		write_tcs_reg(base, TCS_DRV_CMD_MSGID, m, n + i, msgid);
 		write_tcs_reg(base, TCS_DRV_CMD_ADDR, m, n + i, cmd->addr);
 		write_tcs_reg(base, TCS_DRV_CMD_DATA, m, n + i, cmd->data);
-		trace_rpmh_send_msg(base, m, n + i,
-				cmd_msgid, cmd->addr, cmd->data, cmd->complete);
+		trace_rpmh_send_msg(drv->name, m, n + i, msgid, cmd->addr,
+					cmd->data, cmd->complete, trigger);
 	}
 
 	/* Write the send-after-prev completion bits for the batch */
@@ -716,7 +726,7 @@
 	}
 
 	/* Write to the TCS or AMC */
-	__tcs_buffer_write(drv->reg_base, d, m, n, msg, trigger);
+	__tcs_buffer_write(drv, d, m, n, msg, trigger);
 
 	/* Schedule a timeout response, incase there is no actual response */
 	if (trigger)
@@ -727,6 +737,41 @@
 	return 0;
 }
 
+static void __tcs_buffer_invalidate(void __iomem *base, int m)
+{
+	write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, 0);
+}
+
+static int tcs_mbox_invalidate(struct mbox_chan *chan)
+{
+	struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
+	struct tcs_mbox *tcs;
+	int m, i;
+	int inv_types[] = { WAKE_TCS, SLEEP_TCS };
+	int type = 0;
+
+	do {
+		tcs = get_tcs_of_type(drv, inv_types[type]);
+		if (IS_ERR(tcs))
+			return PTR_ERR(tcs);
+
+		spin_lock(&tcs->tcs_lock);
+		for (i = 0; i < tcs->num_tcs; i++) {
+			m = i + tcs->tcs_offset;
+			spin_lock(&tcs->tcs_m_lock[i]);
+			while (!tcs_is_free(drv->reg_base, m))
+				cpu_relax();
+			__tcs_buffer_invalidate(drv->reg_base, m);
+			spin_unlock(&tcs->tcs_m_lock[i]);
+		}
+		/* Mark the TCS as free */
+		bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
+		spin_unlock(&tcs->tcs_lock);
+	} while (++type < ARRAY_SIZE(inv_types));
+
+	return 0;
+}
+
 /**
  * chan_tcs_write: Validate the incoming message and write to the
  * appropriate TCS block.
@@ -771,6 +816,13 @@
 		goto tx_fail;
 	}
 
+	/*
+	 * Since we are re-purposing the wake TCS, invalidate previous
+	 * contents to avoid confusion.
+	 */
+	if (msg->state == RPMH_AWAKE_STATE)
+		tcs_mbox_invalidate(chan);
+
 	/* Post the message to the TCS and trigger */
 	ret = tcs_mbox_write(chan, msg, true);
 
@@ -791,50 +843,16 @@
 	return 0;
 }
 
-static void __tcs_buffer_invalidate(void __iomem *base, int m)
-{
-	write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, 0);
-}
-
-static int tcs_mbox_invalidate(struct mbox_chan *chan)
-{
-	struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
-	struct tcs_mbox *tcs;
-	int m, i;
-	int inv_types[] = { WAKE_TCS, SLEEP_TCS };
-	int type = 0;
-
-	do {
-		tcs = get_tcs_of_type(drv, inv_types[type]);
-		if (IS_ERR(tcs))
-			return PTR_ERR(tcs);
-
-		spin_lock(&tcs->tcs_lock);
-		for (i = 0; i < tcs->num_tcs; i++) {
-			m = i + tcs->tcs_offset;
-			spin_lock(&tcs->tcs_m_lock[i]);
-			while (!tcs_is_free(drv->reg_base, m))
-				cpu_relax();
-			__tcs_buffer_invalidate(drv->reg_base, m);
-			spin_unlock(&tcs->tcs_m_lock[i]);
-		}
-		/* Mark the TCS as free */
-		bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
-		spin_unlock(&tcs->tcs_lock);
-	} while (++type < ARRAY_SIZE(inv_types));
-
-	return 0;
-}
-
-static void __tcs_write_hidden(void *base, int d, struct tcs_mbox_msg *msg)
+static void __tcs_write_hidden(struct tcs_drv *drv, int d,
+					struct tcs_mbox_msg *msg)
 {
 	int i;
-	void __iomem *addr = base + TCS_HIDDEN_CMD0_DRV_DATA;
+	void __iomem *addr = drv->base + TCS_HIDDEN_CMD0_DRV_DATA;
 
 	for (i = 0; i < msg->num_payload; i++) {
 		/* Only data is write capable */
 		writel_relaxed(cpu_to_le32(msg->payload[i].data), addr);
-		trace_rpmh_control_msg(addr, msg->payload[i].data);
+		trace_rpmh_control_msg(drv->name, msg->payload[i].data);
 		addr += TCS_HIDDEN_CMD_SHIFT;
 	}
 }
@@ -855,7 +873,7 @@
 	}
 
 	spin_lock(&tcs->tcs_lock);
-	__tcs_write_hidden(tcs->drv->base, drv->drv_id, msg);
+	__tcs_write_hidden(tcs->drv, drv->drv_id, msg);
 	spin_unlock(&tcs->tcs_lock);
 
 	return 0;
@@ -949,6 +967,7 @@
 	u32 config, max_tcs, ncpt;
 	int tcs_type_count[TCS_TYPE_NR] = { 0 };
 	struct resource *res;
+	u32 irq_mask;
 
 	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
 	if (!drv)
@@ -1073,6 +1092,10 @@
 	drv->num_tcs = st;
 	drv->pdev = pdev;
 
+	drv->name = of_get_property(pdev->dev.of_node, "label", NULL);
+	if (!drv->name)
+		drv->name = dev_name(&pdev->dev);
+
 	ret = tcs_response_pool_init(drv);
 	if (ret)
 		return ret;
@@ -1088,9 +1111,14 @@
 	if (ret)
 		return ret;
 
-	/* Enable interrupts for AMC TCS */
-	write_tcs_reg(drv->reg_base, TCS_DRV_IRQ_ENABLE, 0, 0,
-					drv->tcs[ACTIVE_TCS].tcs_mask);
+	/*
+	 * Enable interrupts for AMC TCS,
+	 * if there are no AMC TCS, use wake TCS.
+	 */
+	irq_mask = (drv->tcs[ACTIVE_TCS].num_tcs) ?
+				drv->tcs[ACTIVE_TCS].tcs_mask :
+				drv->tcs[WAKE_TCS].tcs_mask;
+	write_tcs_reg(drv->reg_base, TCS_DRV_IRQ_ENABLE, 0, 0, irq_mask);
 
 	ret = mbox_controller_register(&drv->mbox);
 	if (ret)
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 1e66909..3b53f34 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -516,15 +516,15 @@
 	  If unsure, say N.
 
 config DM_ANDROID_VERITY
-	tristate "Android verity target support"
-	depends on DM_VERITY
+	bool "Android verity target support"
+	depends on DM_VERITY=y
 	depends on X509_CERTIFICATE_PARSER
 	depends on SYSTEM_TRUSTED_KEYRING
 	depends on PUBLIC_KEY_ALGO_RSA
 	depends on KEYS
 	depends on ASYMMETRIC_KEY_TYPE
 	depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE
-	depends on MD_LINEAR
+	depends on MD_LINEAR=y
 	select DM_VERITY_HASH_PREFETCH_MIN_SIZE_128
 	---help---
 	  This device-mapper target is virtually a VERITY target. This
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 39fddda..55b5e0e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1470,7 +1470,25 @@
 			split = bio;
 		}
 
+		/*
+		 * If a bio is splitted, the first part of bio will pass
+		 * barrier but the bio is queued in current->bio_list (see
+		 * generic_make_request). If there is a raise_barrier() called
+		 * here, the second part of bio can't pass barrier. But since
+		 * the first part bio isn't dispatched to underlaying disks
+		 * yet, the barrier is never released, hence raise_barrier will
+		 * alays wait. We have a deadlock.
+		 * Note, this only happens in read path. For write path, the
+		 * first part of bio is dispatched in a schedule() call
+		 * (because of blk plug) or offloaded to raid10d.
+		 * Quitting from the function immediately can change the bio
+		 * order queued in bio_list and avoid the deadlock.
+		 */
 		__make_request(mddev, split);
+		if (split != bio && bio_data_dir(bio) == READ) {
+			generic_make_request(bio);
+			break;
+		}
 	} while (split != bio);
 
 	/* In case raid10d snuck in to freeze_array */
diff --git a/drivers/media/platform/msm/camera/Makefile b/drivers/media/platform/msm/camera/Makefile
index bd925f4..c897669 100644
--- a/drivers/media/platform/msm/camera/Makefile
+++ b/drivers/media/platform/msm/camera/Makefile
@@ -1,3 +1,4 @@
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
-
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_utils/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_core/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sync/
diff --git a/drivers/media/platform/msm/camera/cam_core/Makefile b/drivers/media/platform/msm/camera/cam_core/Makefile
new file mode 100644
index 0000000..417de13
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/Makefile
@@ -0,0 +1,3 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_context.o cam_node.o cam_subdev.o
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c
new file mode 100644
index 0000000..56b34f5
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c
@@ -0,0 +1,361 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "cam_context.h"
+
+static int cam_context_handle_hw_event(void *context, uint32_t evt_id,
+	void *evt_data)
+{
+	int rc = 0;
+	struct cam_context *ctx = (struct cam_context *)context;
+
+	if (!ctx || !ctx->state_machine) {
+		pr_err("%s: Context is not ready.\n", __func__);
+		return -EINVAL;
+	}
+
+	if (ctx->state_machine[ctx->state].irq_ops)
+		rc = ctx->state_machine[ctx->state].irq_ops(ctx, evt_id,
+			evt_data);
+	else
+		pr_debug("%s: No function to handle event %d in dev %d, state %d\n",
+				__func__, evt_id, ctx->dev_hdl, ctx->state);
+	return rc;
+}
+
+int cam_context_handle_get_dev_info(struct cam_context *ctx,
+	struct cam_req_mgr_device_info *info)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		pr_err("%s: Context is not ready.\n'", __func__);
+		return -EINVAL;
+	}
+
+	if (!info) {
+		pr_err("%s: Invalid get device info payload.\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].crm_ops.get_dev_info) {
+		rc = ctx->state_machine[ctx->state].crm_ops.get_dev_info(
+			ctx, info);
+	} else {
+		pr_err("%s: No get device info in dev %d, state %d\n",
+			__func__, ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_link(struct cam_context *ctx,
+	struct cam_req_mgr_core_dev_link_setup *link)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		pr_err("%s: Context is not ready.\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!link) {
+		pr_err("%s: Invalid link payload.\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].crm_ops.link) {
+		rc = ctx->state_machine[ctx->state].crm_ops.link(ctx, link);
+	} else {
+		pr_err("%s: No crm link in dev %d, state %d\n", __func__,
+			ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_unlink(struct cam_context *ctx,
+	struct cam_req_mgr_core_dev_link_setup *unlink)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		pr_err("%s: Context is not ready!\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!unlink) {
+		pr_err("%s: Invalid unlink payload.\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].crm_ops.unlink) {
+		rc = ctx->state_machine[ctx->state].crm_ops.unlink(
+			ctx, unlink);
+	} else {
+		pr_err("%s: No crm unlink in dev %d, state %d\n",
+			__func__, ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_apply_req(struct cam_context *ctx,
+	struct cam_req_mgr_apply_request *apply)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		pr_err("%s: Context is not ready.\n'", __func__);
+		return -EINVAL;
+	}
+
+	if (!apply) {
+		pr_err("%s: Invalid apply request payload.\n'", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].crm_ops.apply_req) {
+		rc = ctx->state_machine[ctx->state].crm_ops.apply_req(ctx,
+			apply);
+	} else {
+		pr_err("%s: No crm apply req in dev %d, state %d\n",
+			__func__, ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+
+int cam_context_handle_acquire_dev(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		pr_err("%s: Context is not ready.\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		pr_err("%s: Invalid acquire device command payload.\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.acquire_dev) {
+		rc = ctx->state_machine[ctx->state].ioctl_ops.acquire_dev(
+			ctx, cmd);
+	} else {
+		pr_err("%s: No acquire device in dev %d, state %d\n",
+			__func__, cmd->dev_handle, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_release_dev(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		pr_err("%s: Context is not ready.\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		pr_err("%s: Invalid release device command payload.\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.release_dev) {
+		rc = ctx->state_machine[ctx->state].ioctl_ops.release_dev(
+			ctx, cmd);
+	} else {
+		pr_err("%s: No release device in dev %d, state %d\n",
+			__func__, ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_config_dev(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		pr_err("%s: context is not ready\n'", __func__);
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		pr_err("%s: Invalid config device command payload.\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.config_dev) {
+		rc = ctx->state_machine[ctx->state].ioctl_ops.config_dev(
+			ctx, cmd);
+	} else {
+		pr_err("%s: No config device in dev %d, state %d\n",
+			__func__, ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_start_dev(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	if (!ctx->state_machine) {
+		pr_err("%s: Context is not ready.\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		pr_err("%s: Invalid start device command payload.\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.start_dev)
+		rc = ctx->state_machine[ctx->state].ioctl_ops.start_dev(
+			ctx, cmd);
+	else
+		/* start device can be optional for some driver */
+		pr_debug("%s: No start device in dev %d, state %d\n",
+			__func__, ctx->dev_hdl, ctx->state);
+
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_stop_dev(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	if (!ctx->state_machine) {
+		pr_err("%s: Context is not ready.\n'", __func__);
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		pr_err("%s: Invalid stop device command payload.\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.stop_dev)
+		rc = ctx->state_machine[ctx->state].ioctl_ops.stop_dev(
+			ctx, cmd);
+	else
+		/* stop device can be optional for some driver */
+		pr_warn("%s: No stop device in dev %d, state %d\n",
+			__func__, ctx->dev_hdl, ctx->state);
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_init(struct cam_context *ctx,
+	struct cam_req_mgr_kmd_ops *crm_node_intf,
+	struct cam_hw_mgr_intf *hw_mgr_intf,
+	struct cam_ctx_request *req_list,
+	uint32_t req_size)
+{
+	int i;
+
+	/* crm_node_intf is optinal */
+	if (!ctx || !hw_mgr_intf || !req_list) {
+		pr_err("%s: Invalid input parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	INIT_LIST_HEAD(&ctx->list);
+	mutex_init(&ctx->ctx_mutex);
+	spin_lock_init(&ctx->lock);
+
+	ctx->ctx_crm_intf = NULL;
+	ctx->crm_ctx_intf = crm_node_intf;
+	ctx->hw_mgr_intf = hw_mgr_intf;
+	ctx->irq_cb_intf = cam_context_handle_hw_event;
+
+	INIT_LIST_HEAD(&ctx->active_req_list);
+	INIT_LIST_HEAD(&ctx->wait_req_list);
+	INIT_LIST_HEAD(&ctx->pending_req_list);
+	INIT_LIST_HEAD(&ctx->free_req_list);
+	ctx->req_list = req_list;
+	ctx->req_size = req_size;
+	for (i = 0; i < req_size; i++) {
+		INIT_LIST_HEAD(&ctx->req_list[i].list);
+		list_add_tail(&ctx->req_list[i].list, &ctx->free_req_list);
+	}
+	ctx->state = CAM_CTX_AVAILABLE;
+	ctx->state_machine = NULL;
+	ctx->ctx_priv = NULL;
+
+	return 0;
+}
+
+int cam_context_deinit(struct cam_context *ctx)
+{
+	if (!ctx)
+		return -EINVAL;
+
+	/**
+	 * This is called from platform device remove.
+	 * Everyting should be released at this moment.
+	 * so we just free the memory for the context
+	 */
+	if (ctx->state != CAM_CTX_AVAILABLE)
+		pr_err("%s: Device did not shutdown cleanly.\n", __func__);
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	return 0;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.h b/drivers/media/platform/msm/camera/cam_core/cam_context.h
new file mode 100644
index 0000000..c7329cf
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.h
@@ -0,0 +1,302 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CONTEXT_H_
+#define _CAM_CONTEXT_H_
+
+#include <linux/spinlock.h>
+#include "cam_req_mgr_interface.h"
+#include "cam_hw_mgr_intf.h"
+
+/* Forward declarations */
+struct cam_context;
+
+/* max request number */
+#define CAM_CTX_REQ_MAX              20
+
+/**
+ * enum cam_ctx_state -  context top level states
+ *
+ */
+enum cam_context_state {
+	CAM_CTX_UNINIT               = 0,
+	CAM_CTX_AVAILABLE            = 1,
+	CAM_CTX_ACQUIRED             = 2,
+	CAM_CTX_READY                = 3,
+	CAM_CTX_ACTIVATED            = 4,
+	CAM_CTX_STATE_MAX            = 5,
+};
+
+/**
+ * struct cam_ctx_request - Common request structure for the context
+ *
+ * @list:                  Link list entry
+ * @status:                Request status
+ * @request_id:            Request id
+ * @req_priv:              Derived request object
+ *
+ */
+struct cam_ctx_request {
+	struct list_head   list;
+	uint32_t           status;
+	uint64_t           request_id;
+	void              *req_priv;
+};
+
+/**
+ * struct cam_ctx_ioctl_ops - Function table for handling IOCTL calls
+ *
+ * @acquire_dev:           Function pointer for acquire device
+ * @release_dev:           Function pointer for release device
+ * @config_dev:            Function pointer for config device
+ * @start_dev:             Function pointer for start device
+ * @stop_dev:              Function pointer for stop device
+ *
+ */
+struct cam_ctx_ioctl_ops {
+	int (*acquire_dev)(struct cam_context *ctx,
+			struct cam_acquire_dev_cmd *cmd);
+	int (*release_dev)(struct cam_context *ctx,
+			struct cam_release_dev_cmd *cmd);
+	int (*config_dev)(struct cam_context *ctx,
+			struct cam_config_dev_cmd *cmd);
+	int (*start_dev)(struct cam_context *ctx,
+			struct cam_start_stop_dev_cmd *cmd);
+	int (*stop_dev)(struct cam_context *ctx,
+			struct cam_start_stop_dev_cmd *cmd);
+};
+
+/**
+ * struct cam_ctx_crm_ops -  Function table for handling CRM to context calls
+ *
+ * @get_dev_info:          Get device informaiton
+ * @link:                  Link the context
+ * @unlink:                Unlink the context
+ * @apply_req:             Apply setting for the context
+ *
+ */
+struct cam_ctx_crm_ops {
+	int (*get_dev_info)(struct cam_context *ctx,
+			struct cam_req_mgr_device_info *);
+	int (*link)(struct cam_context *ctx,
+			struct cam_req_mgr_core_dev_link_setup *link);
+	int (*unlink)(struct cam_context *ctx,
+			struct cam_req_mgr_core_dev_link_setup *unlink);
+	int (*apply_req)(struct cam_context *ctx,
+			struct cam_req_mgr_apply_request *apply);
+};
+
+
+/**
+ * struct cam_ctx_ops - Collection of the interface funciton tables
+ *
+ * @ioctl_ops:             Ioctl funciton table
+ * @crm_ops:               CRM to context interface function table
+ * @irq_ops:               Hardware event handle function
+ *
+ */
+struct cam_ctx_ops {
+	struct cam_ctx_ioctl_ops     ioctl_ops;
+	struct cam_ctx_crm_ops       crm_ops;
+	cam_hw_event_cb_func         irq_ops;
+};
+
+/**
+ * struct cam_context - camera context object for the subdevice node
+ *
+ * @list:                  Link list entry
+ * @sessoin_hdl:           Session handle
+ * @dev_hdl:               Device handle
+ * @link_hdl:              Link handle
+ * @ctx_mutex:             Mutex for ioctl calls
+ * @lock:                  Spin lock
+ * @active_req_list:       Requests pending for done event
+ * @pending_req_list:      Requests pending for reg upd event
+ * @wait_req_list:         Requests waiting for apply
+ * @free_req_list:         Requests that are free
+ * @req_list:              Reference to the request storage
+ * @req_size:              Size of the request storage
+ * @hw_mgr_intf:           Context to HW interface
+ * @ctx_crm_intf:          Context to CRM interface
+ * @crm_ctx_intf:          CRM to context interface
+ * @irq_cb_intf:           HW to context callback interface
+ * @state:                 Current state for top level state machine
+ * @state_machine:         Top level state machine
+ * @ctx_priv:              Private context pointer
+ *
+ */
+struct cam_context {
+	struct list_head             list;
+	int32_t                      session_hdl;
+	int32_t                      dev_hdl;
+	int32_t                      link_hdl;
+
+	struct mutex                 ctx_mutex;
+	spinlock_t                   lock;
+
+	struct list_head             active_req_list;
+	struct list_head             pending_req_list;
+	struct list_head             wait_req_list;
+	struct list_head             free_req_list;
+	struct cam_ctx_request      *req_list;
+	uint32_t                     req_size;
+
+	struct cam_hw_mgr_intf      *hw_mgr_intf;
+	struct cam_req_mgr_crm_cb   *ctx_crm_intf;
+	struct cam_req_mgr_kmd_ops  *crm_ctx_intf;
+	cam_hw_event_cb_func         irq_cb_intf;
+
+	enum cam_context_state       state;
+	struct cam_ctx_ops          *state_machine;
+
+	void                        *ctx_priv;
+};
+
+/**
+ * cam_context_handle_get_dev_info()
+ *
+ * @brief:        Handle get device information command
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @info:                  Device information returned
+ *
+ */
+int cam_context_handle_get_dev_info(struct cam_context *ctx,
+		struct cam_req_mgr_device_info *info);
+
+/**
+ * cam_context_handle_link()
+ *
+ * @brief:        Handle link command
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @link:                  Link command payload
+ *
+ */
+int cam_context_handle_link(struct cam_context *ctx,
+		struct cam_req_mgr_core_dev_link_setup *link);
+
+/**
+ * cam_context_handle_unlink()
+ *
+ * @brief:        Handle unlink command
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @unlink:                Unlink command payload
+ *
+ */
+int cam_context_handle_unlink(struct cam_context *ctx,
+		struct cam_req_mgr_core_dev_link_setup *unlink);
+
+/**
+ * cam_context_handle_apply_req()
+ *
+ * @brief:        Handle apply request command
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @apply:                 Apply request command payload
+ *
+ */
+int cam_context_handle_apply_req(struct cam_context *ctx,
+		struct cam_req_mgr_apply_request *apply);
+
+
+/**
+ * cam_context_handle_acquire_dev()
+ *
+ * @brief:        Handle acquire device command
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @cmd:                   Acquire device command payload
+ *
+ */
+int cam_context_handle_acquire_dev(struct cam_context *ctx,
+		struct cam_acquire_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_release_dev()
+ *
+ * @brief:        Handle release device command
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @cmd:                   Release device command payload
+ *
+ */
+int cam_context_handle_release_dev(struct cam_context *ctx,
+		struct cam_release_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_config_dev()
+ *
+ * @brief:        Handle config device command
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @cmd:                   Config device command payload
+ *
+ */
+int cam_context_handle_config_dev(struct cam_context *ctx,
+		struct cam_config_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_start_dev()
+ *
+ * @brief:        Handle start device command
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @cmd:                   Start device command payload
+ *
+ */
+int cam_context_handle_start_dev(struct cam_context *ctx,
+		struct cam_start_stop_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_stop_dev()
+ *
+ * @brief:        Handle stop device command
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @cmd:                   Stop device command payload
+ *
+ */
+int cam_context_handle_stop_dev(struct cam_context *ctx,
+		struct cam_start_stop_dev_cmd *cmd);
+
+/**
+ * cam_context_deinit()
+ *
+ * @brief:        Camera context deinitialize function
+ *
+ * @ctx:                   Object pointer for cam_context
+ *
+ */
+int cam_context_deinit(struct cam_context *ctx);
+
+/**
+ * cam_context_init()
+ *
+ * @brief:        Camera context initialize function
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @crm_node_intf:         Function table for crm to context interface
+ * @hw_mgr_intf:           Function table for context to hw interface
+ * @req_list:              Requests storage
+ * @req_size:              Size of the request storage
+ *
+ */
+int cam_context_init(struct cam_context *ctx,
+		struct cam_req_mgr_kmd_ops *crm_node_intf,
+		struct cam_hw_mgr_intf *hw_mgr_intf,
+		struct cam_ctx_request *req_list,
+		uint32_t req_size);
+
+
+#endif  /* _CAM_CONTEXT_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw.h b/drivers/media/platform/msm/camera/cam_core/cam_hw.h
new file mode 100644
index 0000000..d01a84a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_HW_H_
+#define _CAM_HW_H_
+
+#include "cam_soc_util.h"
+
+/*
+ * This file declares Enums, Structures and APIs to be used as template
+ * when writing any HW driver in the camera subsystem.
+ */
+
+/* Hardware state enum */
+enum cam_hw_state {
+	CAM_HW_STATE_POWER_DOWN,
+	CAM_HW_STATE_POWER_UP,
+};
+
+/**
+ * struct cam_hw_info - Common hardware information
+ *
+ * @hw_mutex:              Hardware mutex
+ * @hw_lock:               Hardware spinlock
+ * @hw_complete:           Hardware Completion
+ * @open_count:            Count to track the HW enable from the client
+ * @hw_state:              Hardware state
+ * @soc_info:              Platform SOC properties for hardware
+ * @node_info:             Private HW data related to nodes
+ * @core_info:             Private HW data related to core logic
+ *
+ */
+struct cam_hw_info {
+	struct mutex                    hw_mutex;
+	spinlock_t                      hw_lock;
+	struct completion               hw_complete;
+	uint32_t                        open_count;
+	enum cam_hw_state               hw_state;
+	struct cam_hw_soc_info          soc_info;
+	void                           *node_info;
+	void                           *core_info;
+};
+
+#endif /* _CAM_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h b/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h
new file mode 100644
index 0000000..3a997ae
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h
@@ -0,0 +1,80 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_HW_INTF_H_
+#define _CAM_HW_INTF_H_
+
+#include <linux/types.h>
+
+/*
+ * This file declares Constants, Enums, Structures and APIs to be used as
+ * Interface between HW driver and HW Manager.
+ */
+
+/**
+ * struct cam_hw_ops - Hardware layer interface functions
+ *
+ * @get_hw_caps:           Function pointer for get hw caps
+ * @init:                  Function poniter for initialize hardware
+ * @deinit:                Function pointer for deinitialize hardware
+ * @reset:                 Function pointer for reset hardware
+ * @reserve:               Function pointer for reserve hardware
+ * @release:               Function pointer for release hardware
+ * @start:                 Function pointer for start hardware
+ * @stop:                  Function pointer for stop hardware
+ * @read:                  Function pointer for read hardware registers
+ * @write:                 Function pointer for Write hardware registers
+ * @process_cmd:           Function pointer for additional hardware controls
+ *
+ */
+struct cam_hw_ops {
+	int (*get_hw_caps)(void *hw_priv,
+		void *get_hw_cap_args, uint32_t arg_size);
+	int (*init)(void *hw_priv,
+		void *init_hw_args, uint32_t arg_size);
+	int (*deinit)(void *hw_priv,
+		void *init_hw_args, uint32_t arg_size);
+	int (*reset)(void *hw_priv,
+		void *reset_core_args, uint32_t arg_size);
+	int (*reserve)(void *hw_priv,
+		void *reserve_args, uint32_t arg_size);
+	int (*release)(void *hw_priv,
+		void *release_args, uint32_t arg_size);
+	int (*start)(void *hw_priv,
+		void *start_args, uint32_t arg_size);
+	int (*stop)(void *hw_priv,
+		void *stop_args, uint32_t arg_size);
+	int (*read)(void *hw_priv,
+		void *read_args, uint32_t arg_size);
+	int (*write)(void *hw_priv,
+		void *write_args, uint32_t arg_size);
+	int (*process_cmd)(void *hw_priv,
+		uint32_t cmd_type, void *cmd_args, uint32_t arg_size);
+};
+
+/**
+ * struct cam_hw_intf - Common hardware node
+ *
+ * @hw_type:               Hardware type
+ * @hw_idx:                Hardware ID
+ * @hw_ops:                Hardware interface function table
+ * @hw_priv:               Private hardware node pointer
+ *
+ */
+struct cam_hw_intf {
+	uint32_t                     hw_type;
+	uint32_t                     hw_idx;
+	struct cam_hw_ops            hw_ops;
+	void                        *hw_priv;
+};
+
+#endif /* _CAM_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
new file mode 100644
index 0000000..db605e7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
@@ -0,0 +1,209 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_HW_MGR_INTF_H_
+#define _CAM_HW_MGR_INTF_H_
+
+/*
+ * This file declares Constants, Enums, Structures and APIs to be used as
+ * Interface between HW Manager and Context.
+ */
+
+
+/* maximum context numbers */
+#define CAM_CTX_MAX                         8
+
+/* maximum buf done irqs */
+#define CAM_NUM_OUT_PER_COMP_IRQ_MAX        12
+
+/* hardware event callback function type */
+typedef int (*cam_hw_event_cb_func)(void *context, uint32_t evt_id,
+	void *evt_data);
+
+/**
+ * struct cam_hw_update_entry - Entry for hardware config
+ *
+ * @handle:                Memory handle for the configuration
+ * @offset:                Memory offset
+ * @len:                   Size of the configuration
+ * @flags:                 Flags for the config entry(eg. DMI)
+ *
+ */
+struct cam_hw_update_entry {
+	int                handle;
+	uint32_t           offset;
+	uint32_t           len;
+	uint32_t           flags;
+};
+
+/**
+ * struct cam_hw_fence_map_entry - Entry for the resource to sync id map
+ *
+ * @resrouce_handle:       Resource port id for the buffer
+ * @sync_id:               Synce id
+ *
+ */
+struct cam_hw_fence_map_entry {
+	uint32_t           resource_handle;
+	int32_t            sync_id;
+};
+
+/**
+ * struct cam_hw_done_event_data - Payload for hw done event
+ *
+ * @num_handles:           number of handles in the event
+ * @resrouce_handle:       list of the resource handle
+ * @timestamp:             time stamp
+ *
+ */
+struct cam_hw_done_event_data {
+	uint32_t           num_handles;
+	uint32_t           resource_handle[CAM_NUM_OUT_PER_COMP_IRQ_MAX];
+	struct timeval     timestamp;
+};
+
+/**
+ * struct cam_hw_acquire_args - Payload for acquire command
+ *
+ * @context_data:          Context data pointer for the callback function
+ * @event_cb:              Callback function array
+ * @num_acq:               Total number of acquire in the payload
+ * @acquire_info:          Acquired resource array pointer
+ * @ctxt_to_hw_map:        HW context (returned)
+ *
+ */
+struct cam_hw_acquire_args {
+	void                        *context_data;
+	cam_hw_event_cb_func         event_cb;
+	uint32_t                     num_acq;
+	uint64_t                     acquire_info;
+	void                        *ctxt_to_hw_map;
+};
+
+/**
+ * struct cam_hw_release_args - Payload for release command
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ *
+ */
+struct cam_hw_release_args {
+	void              *ctxt_to_hw_map;
+};
+
+/**
+ * struct cam_hw_start_args - Payload for start command
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @num_hw_update_entries: Number of Hardware configuration
+ * @hw_update_entries:     Hardware configuration list
+ *
+ */
+struct cam_hw_start_args {
+	void                        *ctxt_to_hw_map;
+	uint32_t                     num_hw_update_entries;
+	struct cam_hw_update_entry  *hw_update_entries;
+};
+
+/**
+ * struct cam_hw_stop_args - Payload for stop command
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ *
+ */
+struct cam_hw_stop_args {
+	void              *ctxt_to_hw_map;
+};
+
+/**
+ * struct cam_hw_prepare_update_args - Payload for prepare command
+ *
+ * @packet:                CSL packet from user mode driver
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @max_hw_update_entries: Maximum hardware update entries supported
+ * @hw_update_entries:     Actual hardware update configuration (returned)
+ * @num_hw_update_entries: Number of actual hardware update entries (returned)
+ * @max_out_map_entries:   Maximum output fence mapping supported
+ * @out_map_entries:       Actual output fence mapping list (returned)
+ * @num_out_map_entries:   Number of actual output fence mapping (returned)
+ * @max_in_map_entries:    Maximum input fence mapping supported
+ * @in_map_entries:        Actual input fence mapping list (returned)
+ * @num_in_map_entries:    Number of acutal input fence mapping (returned)
+ *
+ */
+struct cam_hw_prepare_update_args {
+	struct cam_packet              *packet;
+	void                           *ctxt_to_hw_map;
+	uint32_t                        max_hw_update_entries;
+	struct cam_hw_update_entry     *hw_update_entries;
+	uint32_t                        num_hw_update_entries;
+	uint32_t                        max_out_map_entries;
+	struct cam_hw_fence_map_entry  *out_map_entries;
+	uint32_t                        num_out_map_entries;
+	uint32_t                        max_in_map_entries;
+	struct cam_hw_fence_map_entry  *in_map_entries;
+	uint32_t                        num_in_map_entries;
+};
+
+/**
+ * struct cam_hw_config_args - Payload for config command
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @num_hw_update_entries: Number of hardware update entries
+ * @hw_update_entries:     Hardware update list
+ *
+ */
+struct cam_hw_config_args {
+	void                        *ctxt_to_hw_map;
+	uint32_t                     num_hw_update_entries;
+	struct cam_hw_update_entry  *hw_update_entries;
+};
+
+/**
+ * cam_hw_mgr_intf - HW manager interface
+ *
+ * @hw_mgr_priv:           HW manager object
+ * @hw_get_caps:           Function pointer for get hw caps
+ *                               args = cam_query_cap_cmd
+ * @hw_acquire:            Function poniter for acquire hw resources
+ *                               args = cam_hw_acquire_args
+ * @hw_release:            Function pointer for release hw device resource
+ *                               args = cam_hw_release_args
+ * @hw_start:              Function pointer for start hw devices
+ *                               args = cam_hw_start_args
+ * @hw_stop:               Function pointer for stop hw devices
+ *                               args = cam_hw_stop_args
+ * @hw_prepare_update:     Function pointer for prepare hw update for hw devices
+ *                               args = cam_hw_prepare_update_args
+ * @hw_config:             Function pointer for configure hw devices
+ *                               args = cam_hw_config_args
+ * @hw_read:               Function pointer for read hardware registers
+ * @hw_write:              Function pointer for Write hardware registers
+ * @hw_cmd:                Function pointer for any customized commands for the
+ *                         hardware manager
+ *
+ */
+struct cam_hw_mgr_intf {
+	void *hw_mgr_priv;
+
+	int (*hw_get_caps)(void *hw_priv, void *hw_caps_args);
+	int (*hw_acquire)(void *hw_priv, void *hw_acquire_args);
+	int (*hw_release)(void *hw_priv, void *hw_release_args);
+	int (*hw_start)(void *hw_priv, void *hw_start_args);
+	int (*hw_stop)(void *hw_priv, void *hw_stop_args);
+	int (*hw_prepare_update)(void *hw_priv, void *hw_prepare_update_args);
+	int (*hw_config)(void *hw_priv, void *hw_config_args);
+	int (*hw_read)(void *hw_priv, void *read_args);
+	int (*hw_write)(void *hw_priv, void *write_args);
+	int (*hw_cmd)(void *hw_priv, void *write_args);
+};
+
+#endif /* _CAM_HW_MGR_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
new file mode 100644
index 0000000..ef60822
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -0,0 +1,413 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+
+#include "cam_node.h"
+
+static int __cam_node_handle_query_cap(struct cam_node *node,
+	struct cam_query_cap_cmd *query)
+{
+	int rc = -EFAULT;
+
+	if (!query)
+		return -EINVAL;
+
+	if (node->hw_mgr_intf.hw_get_caps) {
+		rc = node->hw_mgr_intf.hw_get_caps(
+			node->hw_mgr_intf.hw_mgr_priv, query);
+	}
+	return rc;
+}
+
+static int __cam_node_handle_acquire_dev(struct cam_node *node,
+	struct cam_acquire_dev_cmd *acquire)
+{
+	int rc = 0;
+	struct cam_context *ctx = NULL;
+
+	if (!acquire)
+		return -EINVAL;
+
+	mutex_lock(&node->list_mutex);
+	if (!list_empty(&node->free_ctx_list)) {
+		ctx = list_first_entry(&node->free_ctx_list,
+			struct cam_context, list);
+		list_del_init(&ctx->list);
+	}
+	mutex_unlock(&node->list_mutex);
+
+	if (!ctx) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	rc = cam_context_handle_acquire_dev(ctx, acquire);
+	if (rc) {
+		pr_err("%s: Acquire device failed\n", __func__);
+		goto free_ctx;
+	}
+
+	return 0;
+free_ctx:
+	mutex_lock(&node->list_mutex);
+	list_add_tail(&ctx->list, &node->free_ctx_list);
+	mutex_unlock(&node->list_mutex);
+err:
+	return rc;
+}
+
+static int __cam_node_handle_start_dev(struct cam_node *node,
+	struct cam_start_stop_dev_cmd *start)
+{
+	struct cam_context *ctx = NULL;
+
+	if (!start)
+		return -EINVAL;
+
+	if (start->dev_handle <= 0) {
+		pr_err("Invalid device handle for context\n");
+		return -EINVAL;
+	}
+
+	if (start->session_handle <= 0) {
+		pr_err("Invalid session handle for context\n");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(start->dev_handle);
+	if (!ctx) {
+		pr_err("%s: Can not get context for handle %d\n",
+			__func__, start->dev_handle);
+		return -EINVAL;
+	}
+
+	return cam_context_handle_start_dev(ctx, start);
+}
+
+static int __cam_node_handle_stop_dev(struct cam_node *node,
+	struct cam_start_stop_dev_cmd *stop)
+{
+	struct cam_context *ctx = NULL;
+
+	if (!stop)
+		return -EINVAL;
+
+	if (stop->dev_handle <= 0) {
+		pr_err("Invalid device handle for context\n");
+		return -EINVAL;
+	}
+
+	if (stop->session_handle <= 0) {
+		pr_err("Invalid session handle for context\n");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(stop->dev_handle);
+	if (!ctx) {
+		pr_err("%s: Can not get context for handle %d\n",
+			__func__, stop->dev_handle);
+		return -EINVAL;
+	}
+
+	return cam_context_handle_stop_dev(ctx, stop);
+}
+
+static int __cam_node_handle_config_dev(struct cam_node *node,
+	struct cam_config_dev_cmd *config)
+{
+	struct cam_context *ctx = NULL;
+
+	if (!config)
+		return -EINVAL;
+
+	if (config->dev_handle <= 0) {
+		pr_err("Invalid device handle for context\n");
+		return -EINVAL;
+	}
+
+	if (config->session_handle <= 0) {
+		pr_err("Invalid session handle for context\n");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(config->dev_handle);
+	if (!ctx) {
+		pr_err("%s: Can not get context for handle %d\n",
+			__func__, config->dev_handle);
+		return -EINVAL;
+	}
+
+	return cam_context_handle_config_dev(ctx, config);
+}
+
+static int __cam_node_handle_release_dev(struct cam_node *node,
+	struct cam_release_dev_cmd *release)
+{
+	int rc = 0;
+	struct cam_context *ctx = NULL;
+
+	if (!release)
+		return -EINVAL;
+
+	if (release->dev_handle <= 0) {
+		pr_err("Invalid device handle for context\n");
+		return -EINVAL;
+	}
+
+	if (release->session_handle <= 0) {
+		pr_err("Invalid session handle for context\n");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(release->dev_handle);
+	if (!ctx) {
+		pr_err("%s: Can not get context for handle %d\n",
+			__func__, release->dev_handle);
+		return -EINVAL;
+	}
+
+	rc = cam_context_handle_release_dev(ctx, release);
+	if (rc)
+		pr_err("%s: context release failed\n", __func__);
+
+	rc = cam_destroy_device_hdl(release->dev_handle);
+	if (rc)
+		pr_err("%s: destroy device handle is failed\n", __func__);
+
+	mutex_lock(&node->list_mutex);
+	list_add_tail(&ctx->list, &node->free_ctx_list);
+	mutex_unlock(&node->list_mutex);
+	return rc;
+}
+
+static int __cam_node_get_dev_info(struct cam_req_mgr_device_info *info)
+{
+	struct cam_context *ctx = NULL;
+
+	if (!info)
+		return -EINVAL;
+
+	ctx = (struct cam_context *) cam_get_device_priv(info->dev_hdl);
+	if (!ctx) {
+		pr_err("%s: Can not get context  for handle %d\n",
+			__func__, info->dev_hdl);
+		return -EINVAL;
+	}
+	return cam_context_handle_get_dev_info(ctx, info);
+}
+
+static int __cam_node_link_setup(struct cam_req_mgr_core_dev_link_setup *setup)
+{
+	int rc;
+	struct cam_context *ctx = NULL;
+
+	if (!setup)
+		return -EINVAL;
+
+	ctx = (struct cam_context *) cam_get_device_priv(setup->dev_hdl);
+	if (!ctx) {
+		pr_err("%s: Can not get context for handle %d\n",
+			__func__, setup->dev_hdl);
+		return -EINVAL;
+	}
+
+	if (setup->link_enable)
+		rc = cam_context_handle_link(ctx, setup);
+	else
+		rc = cam_context_handle_unlink(ctx, setup);
+
+	return rc;
+}
+
+static int __cam_node_apply_req(struct cam_req_mgr_apply_request *apply)
+{
+	struct cam_context *ctx = NULL;
+
+	if (!apply)
+		return -EINVAL;
+
+	ctx = (struct cam_context *) cam_get_device_priv(apply->dev_hdl);
+	if (!ctx) {
+		pr_err("%s: Can not get context for handle %d\n",
+			__func__, apply->dev_hdl);
+		return -EINVAL;
+	}
+
+	return cam_context_handle_apply_req(ctx, apply);
+}
+
+int cam_node_deinit(struct cam_node *node)
+{
+	if (node)
+		memset(node, 0, sizeof(*node));
+
+	pr_debug("%s: deinit complete!\n", __func__);
+	return 0;
+
+}
+
+int cam_node_init(struct cam_node *node, struct cam_hw_mgr_intf *hw_mgr_intf,
+	struct cam_context *ctx_list, uint32_t ctx_size, char *name)
+{
+	int rc = 0;
+	int i;
+
+	if (!node || !hw_mgr_intf ||
+		sizeof(node->hw_mgr_intf) != sizeof(*hw_mgr_intf)) {
+		return -EINVAL;
+	}
+
+	memset(node, 0, sizeof(*node));
+
+	strlcpy(node->name, name, sizeof(node->name));
+
+	memcpy(&node->hw_mgr_intf, hw_mgr_intf, sizeof(node->hw_mgr_intf));
+
+	node->crm_node_intf.apply_req = __cam_node_apply_req;
+	node->crm_node_intf.get_dev_info = __cam_node_get_dev_info;
+	node->crm_node_intf.link_setup = __cam_node_link_setup;
+
+	mutex_init(&node->list_mutex);
+	INIT_LIST_HEAD(&node->free_ctx_list);
+	node->ctx_list = ctx_list;
+	node->ctx_size = ctx_size;
+	for (i = 0; i < ctx_size; i++) {
+		if (!ctx_list[i].state_machine) {
+			pr_err("%s: camera context %d is not initialized!",
+				__func__, i);
+			rc = -1;
+			goto err;
+		}
+		INIT_LIST_HEAD(&ctx_list[i].list);
+		list_add_tail(&ctx_list[i].list, &node->free_ctx_list);
+	}
+
+	node->state = CAM_NODE_STATE_INIT;
+err:
+	pr_debug("%s: Exit. (rc = %d)\n", __func__, rc);
+	return rc;
+}
+
+int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd)
+{
+	int rc = 0;
+
+	if (!cmd)
+		return -EINVAL;
+
+	pr_debug("%s: handle cmd %d\n", __func__, cmd->op_code);
+
+	switch (cmd->op_code) {
+	case CAM_QUERY_CAP: {
+		struct cam_query_cap_cmd query;
+
+		if (copy_from_user(&query, (void __user *)cmd->handle,
+			sizeof(query))) {
+			rc = -EFAULT;
+			break;
+		}
+		rc = __cam_node_handle_query_cap(node, &query);
+		if (rc) {
+			pr_err("%s: querycap is failed(rc = %d)\n",
+				__func__,  rc);
+			break;
+		}
+		if (copy_to_user((void __user *)cmd->handle, &query,
+			sizeof(query)))
+			rc = -EFAULT;
+		break;
+	}
+	case CAM_ACQUIRE_DEV: {
+		struct cam_acquire_dev_cmd acquire;
+
+		if (copy_from_user(&acquire, (void __user *)cmd->handle,
+			sizeof(acquire))) {
+			rc = -EFAULT;
+			break;
+		}
+		rc = __cam_node_handle_acquire_dev(node, &acquire);
+		if (rc) {
+			pr_err("%s: acquire device failed(rc = %d)\n",
+				__func__, rc);
+			break;
+		}
+		if (copy_to_user((void __user *)cmd->handle, &acquire,
+			sizeof(acquire)))
+			rc = -EFAULT;
+		break;
+	}
+	case CAM_START_DEV: {
+		struct cam_start_stop_dev_cmd start;
+
+		if (copy_from_user(&start, (void __user *)cmd->handle,
+			sizeof(start)))
+			rc = -EFAULT;
+		else {
+			rc = __cam_node_handle_start_dev(node, &start);
+			if (rc)
+				pr_err("%s: start device failed(rc = %d)\n",
+					__func__, rc);
+		}
+		break;
+	}
+	case CAM_STOP_DEV: {
+		struct cam_start_stop_dev_cmd stop;
+
+		if (copy_from_user(&stop, (void __user *)cmd->handle,
+			sizeof(stop)))
+			rc = -EFAULT;
+		else {
+			rc = __cam_node_handle_stop_dev(node, &stop);
+			if (rc)
+				pr_err("%s: stop device failed(rc = %d)\n",
+					__func__, rc);
+		}
+		break;
+	}
+	case CAM_CONFIG_DEV: {
+		struct cam_config_dev_cmd config;
+
+		if (copy_from_user(&config, (void __user *)cmd->handle,
+			sizeof(config)))
+			rc = -EFAULT;
+		else {
+			rc = __cam_node_handle_config_dev(node, &config);
+			if (rc)
+				pr_err("%s: config device failed(rc = %d)\n",
+					__func__, rc);
+		}
+		break;
+	}
+	case CAM_RELEASE_DEV: {
+		struct cam_release_dev_cmd release;
+
+		if (copy_from_user(&release, (void __user *)cmd->handle,
+			sizeof(release)))
+			rc = -EFAULT;
+		else {
+			rc = __cam_node_handle_release_dev(node, &release);
+			if (rc)
+				pr_err("%s: release device failed(rc = %d)\n",
+					__func__, rc);
+		}
+		break;
+	}
+	default:
+		pr_err("Unknown op code %d\n", cmd->op_code);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.h b/drivers/media/platform/msm/camera/cam_core/cam_node.h
new file mode 100644
index 0000000..6e4a641
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.h
@@ -0,0 +1,90 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_NODE_H_
+#define _CAM_NODE_H_
+
+#include "cam_context.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_interface.h"
+
+#define CAM_NODE_NAME_LENGTH_MAX        256
+
+#define CAM_NODE_STATE_UNINIT           0
+#define CAM_NODE_STATE_INIT             1
+
+/**
+ * struct cam_node - Singleton Node for camera HW devices
+ *
+ * @name:                  Name for struct cam_node
+ * @state:                 Node state:
+ *                            0 = uninitialized, 1 = initialized
+ * @list_mutex:            Mutex for the context pool
+ * @free_ctx_list:         Free context pool list
+ * @ctx_list:              Context list
+ * @ctx_size:              Context list size
+ * @hw_mgr_intf:           Interface for cam_node to HW
+ * @crm_node_intf:         Interface for the CRM to cam_node
+ *
+ */
+struct cam_node {
+	char                         name[CAM_NODE_NAME_LENGTH_MAX];
+	uint32_t                     state;
+
+	/* context pool */
+	struct mutex                 list_mutex;
+	struct list_head             free_ctx_list;
+	struct cam_context          *ctx_list;
+	uint32_t                     ctx_size;
+
+	/* interfaces */
+	struct cam_hw_mgr_intf       hw_mgr_intf;
+	struct cam_req_mgr_kmd_ops   crm_node_intf;
+};
+
+/**
+ * cam_node_handle_ioctl()
+ *
+ * @brief:       Handle ioctl commands
+ *
+ * @node:                  Node handle
+ * @cmd:                   IOCTL command
+ *
+ */
+int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd);
+
+/**
+ * cam_node_deinit()
+ *
+ * @brief:       Deinitialization function for the Node interface
+ *
+ * @node:                  Node handle
+ *
+ */
+int cam_node_deinit(struct cam_node *node);
+
+/**
+ * cam_node_init()
+ *
+ * @brief:       Initialization function for the Node interface.
+ *
+ * @node:                  Cam_node pointer
+ * @hw_mgr_intf:           HW manager interface blob
+ * @ctx_list:              List of cam_contexts to be added
+ * @ctx_size:              Size of the cam_context
+ * @name:                  Name for the node
+ *
+ */
+int cam_node_init(struct cam_node *node, struct cam_hw_mgr_intf *hw_mgr_intf,
+	struct cam_context *ctx_list, uint32_t ctx_size, char *name);
+
+#endif /* _CAM_NODE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_subdev.c b/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
new file mode 100644
index 0000000..03b18cf
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
@@ -0,0 +1,143 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_subdev.h"
+#include "cam_node.h"
+
+/**
+ * cam_subdev_subscribe_event()
+ *
+ * @brief: function to subscribe to v4l2 events
+ *
+ * @sd:                    Pointer to struct v4l2_subdev.
+ * @fh:                    Pointer to struct v4l2_fh.
+ * @sub:                   Pointer to struct v4l2_event_subscription.
+ */
+static int cam_subdev_subscribe_event(struct v4l2_subdev *sd,
+	struct v4l2_fh *fh,
+	struct v4l2_event_subscription *sub)
+{
+	return v4l2_event_subscribe(fh, sub, CAM_SUBDEVICE_EVENT_MAX, NULL);
+}
+
+/**
+ * cam_subdev_unsubscribe_event()
+ *
+ * @brief: function to unsubscribe from v4l2 events
+ *
+ * @sd:                    Pointer to struct v4l2_subdev.
+ * @fh:                    Pointer to struct v4l2_fh.
+ * @sub:                   Pointer to struct v4l2_event_subscription.
+ */
+static int cam_subdev_unsubscribe_event(struct v4l2_subdev *sd,
+	struct v4l2_fh *fh,
+	struct v4l2_event_subscription *sub)
+{
+	return v4l2_event_unsubscribe(fh, sub);
+}
+
+static long cam_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd,
+	void *arg)
+{
+	long rc;
+	struct cam_node *node =
+		(struct cam_node *) v4l2_get_subdevdata(sd);
+
+	if (!node || node->state == CAM_NODE_STATE_UNINIT) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_node_handle_ioctl(node,
+			(struct cam_control *) arg);
+		break;
+	default:
+		pr_err("Invalid command %d for %s!\n", cmd,
+			node->name);
+		rc = -EINVAL;
+	}
+end:
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_subdev_compat_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, unsigned long arg)
+{
+	return cam_subdev_ioctl(sd, cmd, compat_ptr(arg));
+}
+#endif
+
+const struct v4l2_subdev_core_ops cam_subdev_core_ops = {
+	.ioctl = cam_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = cam_subdev_compat_ioctl,
+#endif
+	.subscribe_event = cam_subdev_subscribe_event,
+	.unsubscribe_event = cam_subdev_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_ops cam_subdev_ops = {
+	.core = &cam_subdev_core_ops,
+};
+
+int cam_subdev_remove(struct cam_subdev *sd)
+{
+	if (!sd)
+		return -EINVAL;
+
+	cam_unregister_subdev(sd);
+	cam_node_deinit((struct cam_node *)sd->token);
+	kfree(sd->token);
+
+	return 0;
+}
+
+int cam_subdev_probe(struct cam_subdev *sd, struct platform_device *pdev,
+	char *name, uint32_t dev_type)
+{
+	int rc;
+	struct cam_node *node = NULL;
+
+	if (!sd || !pdev || !name) {
+		rc = -EINVAL;
+		goto err;
+	}
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	/* Setup camera v4l2 subdevice */
+	sd->pdev = pdev;
+	sd->name = name;
+	sd->ops = &cam_subdev_ops;
+	sd->token = node;
+	sd->sd_flags =
+		V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+	sd->ent_function = dev_type;
+	rc = cam_register_subdev(sd);
+	if (rc) {
+		pr_err("%s: cam_register_subdev() failed for dev: %s!\n",
+			__func__, sd->name);
+		goto err;
+	}
+	platform_set_drvdata(sd->pdev, sd);
+	return rc;
+err:
+	kfree(node);
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_dev_mgr_util.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_dev_mgr_util.h
deleted file mode 100644
index 69970b5..0000000
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_dev_mgr_util.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef _CAM_DEV_MGR_UTIL_H_
-#define _CAM_DEV_MGR_UTIL_H_
-
-#define CAM_SUBDEVICE_EVENT_MAX 30
-
-#include <linux/types.h>
-#include <media/v4l2-fh.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-subdev.h>
-#include <media/v4l2-event.h>
-#include <media/v4l2-ioctl.h>
-
-/**
- * struct cam_subdev - describes a camera sub-device
- *
- * @sd: struct v4l2_subdev
- * @ops: struct v4l2_subdev_ops
- * @internal_ops: struct v4l2_subdev_internal_ops
- * @name: Name of the sub-device. Please notice that the name must be unique.
- * @sd_flags: subdev flags. Can be:
- *   %V4L2_SUBDEV_FL_HAS_DEVNODE - Set this flag if this subdev needs a
- *   device node;
- *   %V4L2_SUBDEV_FL_HAS_EVENTS -  Set this flag if this subdev generates
- *   events.
- * @token: pointer to cookie of the client driver
- * @ent_function: media entity function type. Can be:
- *   %CAM_IFE_DEVICE_TYPE - identifies as IFE device;
- *   %CAM_ICP_DEVICE_TYPE - identifies as ICP device.
- * Each instance of a subdev driver should create this struct, either
- * stand-alone or embedded in a larger struct.
- *
- * This structure should be initialized/registered by cam_register_subdev
- */
-struct cam_subdev {
-	struct v4l2_subdev sd;
-	const struct v4l2_subdev_ops *ops;
-	const struct v4l2_subdev_internal_ops *internal_ops;
-	char *name;
-	u32 sd_flags;
-	void *token;
-	u32 ent_function;
-};
-
-/**
- * cam_register_subdev()
- *
- * @brief:  Registration function for camera subdevice
- *
- * @sd: pointer to struct cam_subdev.
- */
-int cam_register_subdev(struct cam_subdev *sd);
-
-/**
- * cam_unregister_subdev()
- *
- * @brief:  Unregistration function for camera subdevice
- *
- * @sd: pointer to struct cam_subdev.
- */
-int cam_unregister_subdev(struct cam_subdev *sd);
-
-/**
- * cam_send_event()
- *
- * @brief: Inline function to sent event to user space
- *
- * @csd: pointer to struct cam_subdev.
- * @ev: pointer to struct v4l2_event.
- */
-static inline int cam_send_event(struct cam_subdev *csd,
-	const struct v4l2_event *ev)
-{
-	if (!csd || !ev)
-		return -EINVAL;
-
-	v4l2_event_queue(csd->sd.devnode, ev);
-
-	return 0;
-}
-
-/**
- * cam_get_subdev_data()
- *
- * @brief: Inline function to retrieve the private data
- *
- * @csd: pointer to struct cam_subdev.
- */
-static inline void *cam_get_subdev_data(struct cam_subdev *csd)
-{
-	if (!csd)
-		return ERR_PTR(-EINVAL);
-
-	return v4l2_get_subdevdata(&csd->sd);
-}
-
-/**
- * cam_sd_subscribe_event()
- *
- * @brief: Inline function to subscribe to v4l2 events
- *
- * @sd: pointer to struct v4l2_subdev.
- * @fh: pointer to struct v4l2_fh.
- * @sub: pointer to struct v4l2_event_subscription.
- */
-static inline int cam_sd_subscribe_event(struct v4l2_subdev *sd,
-	struct v4l2_fh *fh,
-	struct v4l2_event_subscription *sub)
-{
-	return v4l2_event_subscribe(fh, sub, CAM_SUBDEVICE_EVENT_MAX, NULL);
-}
-
-/**
- * cam_sd_unsubscribe_event()
- *
- * @brief: Inline function to unsubscribe from v4l2 events
- *
- * @sd: pointer to struct v4l2_subdev.
- * @fh: pointer to struct v4l2_fh.
- * @sub: pointer to struct v4l2_event_subscription.
- */
-static inline int cam_sd_unsubscribe_event(struct v4l2_subdev *sd,
-	struct v4l2_fh *fh,
-	struct v4l2_event_subscription *sub)
-{
-	return v4l2_event_unsubscribe(fh, sub);
-}
-#endif /* _CAM_DEV_MGR_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
index 2dba2c8..f3af1bd 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
@@ -23,7 +23,7 @@
 #include "cam_req_mgr_dev.h"
 #include "cam_req_mgr_util.h"
 #include "cam_req_mgr_core.h"
-#include "cam_dev_mgr_util.h"
+#include "cam_subdev.h"
 
 #define CAM_REQ_MGR_EVENT_MAX 30
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_subdev.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_subdev.h
new file mode 100644
index 0000000..78f2223
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_subdev.h
@@ -0,0 +1,106 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SUBDEV_H_
+#define _CAM_SUBDEV_H_
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+#define CAM_SUBDEVICE_EVENT_MAX 30
+
+/**
+ * struct cam_subdev - describes a camera sub-device
+ *
+ * @pdev:                  Pointer to the platform device
+ * @sd:                    V4l2 subdevice
+ * @ops:                   V4l2 subdecie operations
+ * @internal_ops:          V4l2 subdevice internal operations
+ * @name:                  Name of the sub-device. Please notice that the name
+ *                             must be unique.
+ * @sd_flags:              Subdev flags. Can be:
+ *                             %V4L2_SUBDEV_FL_HAS_DEVNODE - Set this flag if
+ *                                 this subdev needs a device node.
+ *                             %V4L2_SUBDEV_FL_HAS_EVENTS -  Set this flag if
+ *                                 this subdev generates events.
+ * @token:                 Pointer to cookie of the client driver
+ * @ent_function:          Media entity function type. Can be:
+ *                             %CAM_IFE_DEVICE_TYPE - identifies as IFE device.
+ *                             %CAM_ICP_DEVICE_TYPE - identifies as ICP device.
+ *
+ * Each instance of a subdev driver should create this struct, either
+ * stand-alone or embedded in a larger struct. This structure should be
+ * initialized/registered by cam_register_subdev
+ *
+ */
+struct cam_subdev {
+	struct platform_device                *pdev;
+	struct v4l2_subdev                     sd;
+	const struct v4l2_subdev_ops          *ops;
+	const struct v4l2_subdev_internal_ops *internal_ops;
+	char                                  *name;
+	u32                                    sd_flags;
+	void                                  *token;
+	u32                                    ent_function;
+};
+
+/**
+ * cam_subdev_probe()
+ *
+ * @brief:      Camera Subdevice node probe function for v4l2 setup
+ *
+ * @sd:                    Camera subdevice object
+ * @name:                  Name of the subdevice node
+ * @dev_type:              Subdevice node type
+ *
+ */
+int cam_subdev_probe(struct cam_subdev *sd, struct platform_device *pdev,
+	char *name, uint32_t dev_type);
+
+/**
+ * cam_subdev_remove()
+ *
+ * @brief:      Called when subdevice node is unloaded
+ *
+ * @sd:                    Camera subdevice node object
+ *
+ */
+int cam_subdev_remove(struct cam_subdev *sd);
+
+/**
+ * cam_register_subdev()
+ *
+ * @brief:   This is the common utility function to be called by each camera
+ *           subdevice node when it tries to register itself to the camera
+ *           request manager
+ *
+ * @sd:                    Pointer to struct cam_subdev.
+ */
+int cam_register_subdev(struct cam_subdev *sd);
+
+/**
+ * cam_unregister_subdev()
+ *
+ * @brief:    This is the common utility function to be called by each camera
+ *            subdevice node when it tries to unregister itself from the
+ *            camera request manger
+ *
+ * @sd:                    Pointer to struct cam_subdev.
+ */
+int cam_unregister_subdev(struct cam_subdev *sd);
+
+#endif /* _CAM_SUBDEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sync/Makefile b/drivers/media/platform/msm/camera/cam_sync/Makefile
new file mode 100644
index 0000000..e3012cb
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sync/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sync.o cam_sync_util.o
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
new file mode 100644
index 0000000..a736148
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -0,0 +1,1024 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-SYNC %s:%d " fmt, __func__, __LINE__
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/irqflags.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include "cam_sync_util.h"
+
+struct sync_device *sync_dev;
+
+int cam_sync_create(int32_t *sync_obj, const char *name)
+{
+	int rc;
+	long idx;
+
+	do {
+		idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
+			if (idx >= CAM_SYNC_MAX_OBJS)
+				return -ENOMEM;
+	} while (!spin_trylock_bh(&sync_dev->row_spinlocks[idx]));
+
+	rc = cam_sync_init_object(sync_dev->sync_table, idx, name);
+	if (rc) {
+		pr_err("Error: Unable to init row at idx = %ld\n", idx);
+		spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+		return -EINVAL;
+	}
+
+	set_bit(idx, sync_dev->bitmap);
+	*sync_obj = idx;
+	spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+
+	return rc;
+}
+
+int cam_sync_register_callback(sync_callback cb_func,
+	void *userdata, int32_t sync_obj)
+{
+	struct sync_callback_info *sync_cb;
+	struct sync_callback_info *cb_info;
+	struct sync_callback_info *temp_cb;
+	struct sync_table_row *row = NULL;
+
+	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0 || !cb_func)
+		return -EINVAL;
+
+	spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
+	row = sync_dev->sync_table + sync_obj;
+
+	if (row->state == CAM_SYNC_STATE_INVALID) {
+		pr_err("Error: accessing an uninitialized sync obj %d\n",
+			sync_obj);
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		return -EINVAL;
+	}
+
+	sync_cb = kzalloc(sizeof(*sync_cb), GFP_ATOMIC);
+	if (!sync_cb) {
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		return -ENOMEM;
+	}
+
+	/* Trigger callback if sync object is already in SIGNALED state */
+	if (row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS ||
+		row->state == CAM_SYNC_STATE_SIGNALED_ERROR) {
+		sync_cb->callback_func = cb_func;
+		sync_cb->cb_data = userdata;
+		sync_cb->sync_obj = sync_obj;
+		INIT_WORK(&sync_cb->cb_dispatch_work,
+			cam_sync_util_cb_dispatch);
+
+		sync_cb->status = row->state;
+		queue_work(sync_dev->work_queue,
+			&sync_cb->cb_dispatch_work);
+
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		return 0;
+	}
+
+	/* Don't register if callback was registered earlier */
+	list_for_each_entry_safe(cb_info, temp_cb, &row->callback_list, list) {
+		if (cb_info->callback_func == cb_func &&
+			cb_info->cb_data == userdata) {
+			kfree(sync_cb);
+			spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+			return -EALREADY;
+		}
+	}
+
+	sync_cb->callback_func = cb_func;
+	sync_cb->cb_data = userdata;
+	sync_cb->sync_obj = sync_obj;
+	INIT_WORK(&sync_cb->cb_dispatch_work, cam_sync_util_cb_dispatch);
+	list_add_tail(&sync_cb->list, &row->callback_list);
+	spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+
+	return 0;
+}
+
+int cam_sync_deregister_callback(sync_callback cb_func,
+	void *userdata, int32_t sync_obj)
+{
+	struct sync_table_row *row = NULL;
+	struct sync_callback_info *sync_cb, *temp;
+
+	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+		return -EINVAL;
+
+	spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
+	row = sync_dev->sync_table + sync_obj;
+
+	if (row->state == CAM_SYNC_STATE_INVALID) {
+		pr_err("Error: accessing an uninitialized sync obj = %d\n",
+			sync_obj);
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		return -EINVAL;
+	}
+
+	list_for_each_entry_safe(sync_cb, temp, &row->callback_list, list) {
+		if (sync_cb->callback_func == cb_func &&
+			sync_cb->cb_data == userdata) {
+			list_del_init(&sync_cb->list);
+			kfree(sync_cb);
+		}
+	}
+
+	spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+	return 0;
+}
+
+int cam_sync_signal(int32_t sync_obj, uint32_t status)
+{
+	int rc;
+	struct sync_table_row *row = NULL;
+	struct sync_table_row *parent_row = NULL;
+	struct sync_callback_info *sync_cb;
+	struct sync_user_payload *payload_info;
+	struct sync_parent_info *parent_info;
+	struct list_head sync_list;
+	struct cam_signalable_info *list_info = NULL;
+	struct cam_signalable_info *temp_list_info = NULL;
+
+	/* Objects to be signaled will be added into this list */
+	INIT_LIST_HEAD(&sync_list);
+
+	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+		return -EINVAL;
+
+	row = sync_dev->sync_table + sync_obj;
+	if (row->state == CAM_SYNC_STATE_INVALID) {
+		pr_err("Error: accessing an uninitialized sync obj = %d\n",
+			sync_obj);
+		return -EINVAL;
+	}
+
+	spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
+	if (row->type == CAM_SYNC_TYPE_GROUP) {
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		pr_err("Error: Signaling a GROUP sync object = %d\n",
+			sync_obj);
+		return -EINVAL;
+	}
+
+	if (row->state != CAM_SYNC_STATE_ACTIVE) {
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		pr_err("Error: Sync object already signaled sync_obj = %d",
+			sync_obj);
+		return -EALREADY;
+	}
+
+	if (status != CAM_SYNC_STATE_SIGNALED_SUCCESS &&
+		status != CAM_SYNC_STATE_SIGNALED_ERROR) {
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		pr_err("Error: signaling with undefined status = %d\n",
+			status);
+		return -EINVAL;
+	}
+
+	row->state = status;
+	rc = cam_sync_util_add_to_signalable_list(sync_obj, status, &sync_list);
+	if (rc < 0) {
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		return rc;
+	}
+
+	/*
+	 * Now iterate over all parents of this object and if they too need to
+	 * be signaled add them to the list
+	 */
+	list_for_each_entry(parent_info,
+		&row->parents_list,
+		list) {
+		parent_row = sync_dev->sync_table + parent_info->sync_id;
+		spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
+		parent_row->remaining--;
+
+		parent_row->state = cam_sync_util_get_state(
+			parent_row->state,
+			status);
+
+		if (!parent_row->remaining) {
+			rc = cam_sync_util_add_to_signalable_list
+				(parent_info->sync_id,
+					parent_row->state,
+					&sync_list);
+			if (rc < 0) {
+				spin_unlock_bh(
+					&sync_dev->row_spinlocks[
+						parent_info->sync_id]);
+				return rc;
+			}
+		}
+		spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
+	}
+
+	/*
+	 * Now dispatch the various sync objects collected so far, in our
+	 * list
+	 */
+	list_for_each_entry_safe(list_info,
+		temp_list_info,
+		&sync_list,
+		list) {
+		struct sync_table_row *signalable_row = NULL;
+		struct sync_callback_info *temp_sync_cb;
+		struct sync_user_payload *temp_payload_info;
+
+		signalable_row = sync_dev->sync_table + list_info->sync_obj;
+		/* Dispatch kernel callbacks if any were registered earlier */
+		list_for_each_entry_safe(sync_cb,
+		temp_sync_cb, &signalable_row->callback_list, list) {
+			sync_cb->status = list_info->status;
+			queue_work(sync_dev->work_queue,
+				&sync_cb->cb_dispatch_work);
+			list_del_init(&sync_cb->list);
+		}
+
+		/* Dispatch user payloads if any were registered earlier */
+		list_for_each_entry_safe(payload_info, temp_payload_info,
+		&signalable_row->user_payload_list, list) {
+			spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
+			if (!sync_dev->cam_sync_eventq) {
+				spin_unlock_bh(
+				&sync_dev->cam_sync_eventq_lock);
+				break;
+			}
+			spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
+			cam_sync_util_send_v4l2_event(
+				CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
+				list_info->sync_obj,
+				list_info->status,
+				payload_info->payload_data,
+				CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
+
+			list_del_init(&payload_info->list);
+			/*
+			 * We can free the list node here because
+			 * sending V4L event will make a deep copy
+			 * anyway
+			 */
+			kfree(payload_info);
+		}
+
+		/*
+		 * This needs to be done because we want to unblock anyone
+		 * who might be blocked and waiting on this sync object
+		 */
+		complete_all(&signalable_row->signaled);
+
+		list_del_init(&list_info->list);
+		kfree(list_info);
+	}
+
+	spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+
+	return rc;
+}
+
+int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
+{
+	int rc;
+	long idx = 0;
+
+	rc = cam_sync_util_validate_merge(sync_obj,
+		num_objs);
+	if (rc < 0) {
+		pr_err("Validation failed, Merge not allowed");
+		return -EINVAL;
+	}
+
+	rc = cam_sync_util_find_and_set_empty_row(sync_dev, &idx);
+	if (rc < 0) {
+		pr_err("Error: Unable to find empty row, table full");
+		return -EINVAL;
+	}
+
+	if (idx <= 0 || idx >= CAM_SYNC_MAX_OBJS) {
+		pr_err("Error: Invalid empty row index returned = %ld", idx);
+		return -EINVAL;
+	}
+
+	rc = cam_sync_init_group_object(sync_dev->sync_table,
+		idx, sync_obj,
+		num_objs);
+
+	if (rc < 0) {
+		pr_err("Error: Unable to init row at idx = %ld\n", idx);
+		return -EINVAL;
+	}
+
+	*merged_obj = idx;
+
+	return 0;
+}
+
+int cam_sync_destroy(int32_t sync_obj)
+{
+	struct sync_table_row *row = NULL;
+
+	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+		return -EINVAL;
+
+	row = sync_dev->sync_table + sync_obj;
+	if (row->state == CAM_SYNC_STATE_INVALID) {
+		pr_err("Error: accessing an uninitialized sync obj: idx = %d\n",
+			sync_obj);
+		return -EINVAL;
+	}
+
+	cam_sync_deinit_object(sync_dev->sync_table, sync_obj);
+	return 0;
+}
+
+int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
+{
+	unsigned long timeleft;
+	int rc = -EINVAL;
+	struct sync_table_row *row = NULL;
+
+	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+		return -EINVAL;
+
+	row = sync_dev->sync_table + sync_obj;
+
+	if (row->state == CAM_SYNC_STATE_INVALID) {
+		pr_err("Error: accessing an uninitialized sync obj = %d\n",
+			sync_obj);
+		return -EINVAL;
+	}
+
+	timeleft = wait_for_completion_timeout(&row->signaled,
+		msecs_to_jiffies(timeout_ms));
+
+	if (!timeleft) {
+		pr_err("Error: cam_sync_wait() timed out for sync obj = %d\n",
+			sync_obj);
+		rc = -ETIMEDOUT;
+	} else {
+		switch (row->state) {
+		case CAM_SYNC_STATE_INVALID:
+		case CAM_SYNC_STATE_ACTIVE:
+		case CAM_SYNC_STATE_SIGNALED_ERROR:
+			pr_err("Error: Wait on invalid state = %d, obj = %d\n",
+				row->state, sync_obj);
+			rc = -EINVAL;
+			break;
+		case CAM_SYNC_STATE_SIGNALED_SUCCESS:
+			rc = 0;
+			break;
+		default:
+			rc = -EINVAL;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
+{
+	struct cam_sync_info sync_create;
+	int result;
+
+	if (k_ioctl->size != sizeof(struct cam_sync_info))
+		return -EINVAL;
+
+	if (!k_ioctl->ioctl_ptr)
+		return -EINVAL;
+
+	if (copy_from_user(&sync_create,
+		(void *)k_ioctl->ioctl_ptr,
+		k_ioctl->size))
+		return -EFAULT;
+
+	result = cam_sync_create(&sync_create.sync_obj,
+		sync_create.name);
+
+	if (!result)
+		if (copy_to_user((void *)k_ioctl->ioctl_ptr,
+			&sync_create,
+			k_ioctl->size))
+			return -EFAULT;
+
+	return result;
+}
+
+static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl)
+{
+	struct cam_sync_signal sync_signal;
+
+	if (k_ioctl->size != sizeof(struct cam_sync_signal))
+		return -EINVAL;
+
+	if (!k_ioctl->ioctl_ptr)
+		return -EINVAL;
+
+	if (copy_from_user(&sync_signal,
+		(void *)k_ioctl->ioctl_ptr,
+		k_ioctl->size))
+		return -EFAULT;
+
+	return cam_sync_signal(sync_signal.sync_obj,
+		sync_signal.sync_state);
+}
+
+static int cam_sync_handle_merge(struct cam_private_ioctl_arg *k_ioctl)
+{
+	struct cam_sync_merge sync_merge;
+	uint32_t *sync_objs;
+	uint32_t num_objs;
+	uint32_t size;
+	int result;
+
+	if (k_ioctl->size != sizeof(struct cam_sync_merge))
+		return -EINVAL;
+
+	if (!k_ioctl->ioctl_ptr)
+		return -EINVAL;
+
+	if (copy_from_user(&sync_merge,
+		(void *)k_ioctl->ioctl_ptr,
+		k_ioctl->size))
+		return -EFAULT;
+
+	if (sync_merge.num_objs >= CAM_SYNC_MAX_OBJS)
+		return -EINVAL;
+
+	size = sizeof(uint32_t) * sync_merge.num_objs;
+	sync_objs = kzalloc(size, GFP_ATOMIC);
+
+	if (!sync_objs)
+		return -ENOMEM;
+
+	if (copy_from_user(sync_objs,
+	(void *)sync_merge.sync_objs,
+	sizeof(uint32_t) * sync_merge.num_objs)) {
+		kfree(sync_objs);
+		return -EFAULT;
+	}
+
+	num_objs = sync_merge.num_objs;
+
+	result = cam_sync_merge(sync_objs,
+		num_objs,
+		&sync_merge.merged);
+
+	if (!result)
+		if (copy_to_user((void *)k_ioctl->ioctl_ptr,
+			&sync_merge,
+			k_ioctl->size)) {
+			kfree(sync_objs);
+			return -EFAULT;
+	}
+
+	kfree(sync_objs);
+
+	return result;
+}
+
+static int cam_sync_handle_wait(struct cam_private_ioctl_arg *k_ioctl)
+{
+	struct cam_sync_wait sync_wait;
+
+	if (k_ioctl->size != sizeof(struct cam_sync_wait))
+		return -EINVAL;
+
+	if (!k_ioctl->ioctl_ptr)
+		return -EINVAL;
+
+	if (copy_from_user(&sync_wait,
+		(void *)k_ioctl->ioctl_ptr,
+		k_ioctl->size))
+		return -EFAULT;
+
+	k_ioctl->result = cam_sync_wait(sync_wait.sync_obj,
+		sync_wait.timeout_ms);
+
+	return 0;
+}
+
+static int cam_sync_handle_destroy(struct cam_private_ioctl_arg *k_ioctl)
+{
+	struct cam_sync_info sync_create;
+
+	if (k_ioctl->size != sizeof(struct cam_sync_info))
+		return -EINVAL;
+
+	if (!k_ioctl->ioctl_ptr)
+		return -EINVAL;
+
+	if (copy_from_user(&sync_create,
+		(void *)k_ioctl->ioctl_ptr,
+		k_ioctl->size))
+		return -EFAULT;
+
+	return cam_sync_destroy(sync_create.sync_obj);
+}
+
+static int cam_sync_handle_register_user_payload(
+	struct cam_private_ioctl_arg *k_ioctl)
+{
+	struct cam_sync_userpayload_info userpayload_info;
+	struct sync_user_payload *user_payload_kernel;
+	struct sync_user_payload *user_payload_iter;
+	struct sync_user_payload *temp_upayload_kernel;
+	uint32_t sync_obj;
+	struct sync_table_row *row = NULL;
+
+	if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info))
+		return -EINVAL;
+
+	if (!k_ioctl->ioctl_ptr)
+		return -EINVAL;
+
+	if (copy_from_user(&userpayload_info,
+		(void *)k_ioctl->ioctl_ptr,
+		k_ioctl->size))
+		return -EFAULT;
+
+	sync_obj = userpayload_info.sync_obj;
+	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+		return -EINVAL;
+
+	user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL);
+	if (!user_payload_kernel)
+		return -ENOMEM;
+
+	memcpy(user_payload_kernel->payload_data,
+		userpayload_info.payload,
+		CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
+
+	spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
+	row =  sync_dev->sync_table + sync_obj;
+
+	if (row->state == CAM_SYNC_STATE_INVALID) {
+		pr_err("Error: accessing an uninitialized sync obj = %d\n",
+			sync_obj);
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		kfree(user_payload_kernel);
+		return -EINVAL;
+	}
+
+	if (row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS ||
+		row->state == CAM_SYNC_STATE_SIGNALED_ERROR) {
+
+		cam_sync_util_send_v4l2_event(CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
+			sync_obj,
+			row->state,
+			user_payload_kernel->payload_data,
+			CAM_SYNC_USER_PAYLOAD_SIZE * sizeof(__u64));
+
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		kfree(user_payload_kernel);
+		return 0;
+	}
+
+	list_for_each_entry_safe(user_payload_iter,
+		temp_upayload_kernel,
+		&row->user_payload_list,
+		list) {
+		if (user_payload_iter->payload_data[0] ==
+				user_payload_kernel->payload_data[0] &&
+			user_payload_iter->payload_data[1] ==
+				user_payload_kernel->payload_data[1]) {
+
+			spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+			kfree(user_payload_kernel);
+			return -EALREADY;
+		}
+	}
+
+	list_add_tail(&user_payload_kernel->list, &row->user_payload_list);
+	spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+	return 0;
+}
+
+static int cam_sync_handle_deregister_user_payload(
+	struct cam_private_ioctl_arg *k_ioctl)
+{
+	struct cam_sync_userpayload_info userpayload_info;
+	struct sync_user_payload *user_payload_kernel, *temp;
+	uint32_t sync_obj;
+	struct sync_table_row *row = NULL;
+
+	if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) {
+		CDBG("Incorrect ioctl size\n");
+		return -EINVAL;
+	}
+
+	if (!k_ioctl->ioctl_ptr) {
+		CDBG("Invalid embedded ioctl ptr\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&userpayload_info,
+		(void *)k_ioctl->ioctl_ptr,
+		k_ioctl->size))
+		return -EFAULT;
+
+	sync_obj = userpayload_info.sync_obj;
+	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+		return -EINVAL;
+
+	spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
+	row =  sync_dev->sync_table + sync_obj;
+
+	if (row->state == CAM_SYNC_STATE_INVALID) {
+		pr_err("Error: accessing an uninitialized sync obj = %d\n",
+			sync_obj);
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		return -EINVAL;
+	}
+
+	list_for_each_entry_safe(user_payload_kernel, temp,
+				&row->user_payload_list, list) {
+		if (user_payload_kernel->payload_data[0] ==
+				userpayload_info.payload[0] &&
+				user_payload_kernel->payload_data[1] ==
+				userpayload_info.payload[1]) {
+			list_del_init(&user_payload_kernel->list);
+			kfree(user_payload_kernel);
+		}
+	}
+
+	spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+	return 0;
+}
+
+static long cam_sync_dev_ioctl(struct file *filep, void *fh,
+		bool valid_prio, unsigned int cmd, void *arg)
+{
+	int32_t rc;
+	struct sync_device *sync_dev = video_drvdata(filep);
+	struct cam_private_ioctl_arg k_ioctl;
+
+	if (!sync_dev) {
+		pr_err("%s sync_dev NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!arg)
+		return -EINVAL;
+
+	if (cmd != CAM_PRIVATE_IOCTL_CMD)
+		return -ENOIOCTLCMD;
+
+	k_ioctl = *(struct cam_private_ioctl_arg *)arg;
+
+	switch (k_ioctl.id) {
+	case CAM_SYNC_CREATE:
+		rc = cam_sync_handle_create(&k_ioctl);
+		break;
+	case CAM_SYNC_DESTROY:
+		rc = cam_sync_handle_destroy(&k_ioctl);
+		break;
+	case CAM_SYNC_REGISTER_PAYLOAD:
+		rc = cam_sync_handle_register_user_payload(
+			&k_ioctl);
+		break;
+	case CAM_SYNC_DEREGISTER_PAYLOAD:
+		rc = cam_sync_handle_deregister_user_payload(
+			&k_ioctl);
+		break;
+	case CAM_SYNC_SIGNAL:
+		rc = cam_sync_handle_signal(&k_ioctl);
+		break;
+	case CAM_SYNC_MERGE:
+		rc = cam_sync_handle_merge(&k_ioctl);
+		break;
+	case CAM_SYNC_WAIT:
+		rc = cam_sync_handle_wait(&k_ioctl);
+		((struct cam_private_ioctl_arg *)arg)->result =
+			k_ioctl.result;
+		break;
+	default:
+		rc = -ENOIOCTLCMD;
+	}
+
+	return rc;
+}
+
+static unsigned int cam_sync_poll(struct file *f,
+	struct poll_table_struct *pll_table)
+{
+	int rc = 0;
+	struct v4l2_fh *eventq = f->private_data;
+
+	if (!eventq)
+		return -EINVAL;
+
+	poll_wait(f, &eventq->wait, pll_table);
+
+	if (v4l2_event_pending(eventq))
+		rc = POLLPRI;
+
+	return rc;
+}
+
+static int cam_sync_open(struct file *filep)
+{
+	int rc;
+	struct sync_device *sync_dev = video_drvdata(filep);
+
+	if (!sync_dev) {
+		pr_err("%s Sync device NULL\n", __func__);
+		return -ENODEV;
+	}
+
+	mutex_lock(&sync_dev->table_lock);
+	if (sync_dev->open_cnt >= 1) {
+		mutex_unlock(&sync_dev->table_lock);
+		return -EALREADY;
+	}
+
+	rc = v4l2_fh_open(filep);
+	if (!rc) {
+		sync_dev->open_cnt++;
+		spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
+		sync_dev->cam_sync_eventq = filep->private_data;
+		spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
+	} else {
+		pr_err("v4l2_fh_open failed : %d\n", rc);
+	}
+	mutex_unlock(&sync_dev->table_lock);
+
+	return rc;
+}
+
+static int cam_sync_close(struct file *filep)
+{
+	int rc = 0;
+	int i;
+	struct sync_device *sync_dev = video_drvdata(filep);
+
+	if (!sync_dev) {
+		pr_err("%s Sync device NULL\n", __func__);
+		rc = -ENODEV;
+		return rc;
+	}
+	mutex_lock(&sync_dev->table_lock);
+	sync_dev->open_cnt--;
+	if (!sync_dev->open_cnt) {
+		for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
+			struct sync_table_row *row =
+			sync_dev->sync_table + i;
+			if (row->state == CAM_SYNC_STATE_INVALID)
+				continue;
+
+			/* Signal all remaining objects as ERR,but we don't care
+			 * about the return status here apart from logging it
+			 */
+			rc = cam_sync_signal(i, CAM_SYNC_STATE_SIGNALED_ERROR);
+			if (rc < 0)
+				pr_err("Cleanup signal failed: idx = %d\n", i);
+
+			rc = cam_sync_destroy(i);
+			if (rc < 0)
+				pr_err("Cleanup destroy failed: idx = %d\n", i);
+		}
+	}
+	mutex_unlock(&sync_dev->table_lock);
+	spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
+	sync_dev->cam_sync_eventq = NULL;
+	spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
+	v4l2_fh_release(filep);
+
+	return rc;
+}
+
+int cam_sync_subscribe_event(struct v4l2_fh *fh,
+		const struct v4l2_event_subscription *sub)
+{
+	return v4l2_event_subscribe(fh, sub, CAM_SYNC_MAX_V4L2_EVENTS, NULL);
+}
+
+int cam_sync_unsubscribe_event(struct v4l2_fh *fh,
+		const struct v4l2_event_subscription *sub)
+{
+	return v4l2_event_unsubscribe(fh, sub);
+}
+
+static const struct v4l2_ioctl_ops g_cam_sync_ioctl_ops = {
+	.vidioc_subscribe_event = cam_sync_subscribe_event,
+	.vidioc_unsubscribe_event = cam_sync_unsubscribe_event,
+	.vidioc_default = cam_sync_dev_ioctl,
+};
+
+static struct v4l2_file_operations cam_sync_v4l2_fops = {
+	.owner = THIS_MODULE,
+	.open  = cam_sync_open,
+	.release = cam_sync_close,
+	.poll = cam_sync_poll,
+	.unlocked_ioctl   = video_ioctl2,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = video_ioctl2,
+#endif
+};
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+static int cam_sync_media_controller_init(struct sync_device *sync_dev,
+	struct platform_device *pdev)
+{
+	int rc;
+
+	sync_dev->v4l2_dev.mdev = kzalloc(sizeof(struct media_device),
+		GFP_KERNEL);
+	if (!sync_dev->v4l2_dev.mdev)
+		return -ENOMEM;
+
+	media_device_init(sync_dev->v4l2_dev.mdev);
+	strlcpy(sync_dev->v4l2_dev.mdev->model, CAM_SYNC_DEVICE_NAME,
+			sizeof(sync_dev->v4l2_dev.mdev->model));
+	sync_dev->v4l2_dev.mdev->dev = &(pdev->dev);
+
+	rc = media_device_register(sync_dev->v4l2_dev.mdev);
+	if (rc < 0)
+		goto register_fail;
+
+	rc = media_entity_pads_init(&sync_dev->vdev->entity, 0, NULL);
+	if (rc < 0)
+		goto entity_fail;
+
+	return 0;
+
+entity_fail:
+	media_device_unregister(sync_dev->v4l2_dev.mdev);
+register_fail:
+	media_device_cleanup(sync_dev->v4l2_dev.mdev);
+	return rc;
+}
+
+static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
+{
+	media_entity_cleanup(&sync_dev->vdev->entity);
+	media_device_unregister(sync_dev->v4l2_dev.mdev);
+	media_device_cleanup(sync_dev->v4l2_dev.mdev);
+	kfree(sync_dev->v4l2_dev.mdev);
+}
+
+static void cam_sync_init_entity(struct sync_device *sync_dev)
+{
+	sync_dev->vdev->entity.function = CAM_SYNC_DEVICE_TYPE;
+	sync_dev->vdev->entity.name =
+				video_device_node_name(sync_dev->vdev);
+}
+#else
+static int cam_sync_media_controller_init(struct sync_device *sync_dev,
+	struct platform_device *pdev)
+{
+	return 0;
+}
+
+static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
+{
+}
+
+static void cam_sync_init_entity(struct sync_device *sync_dev)
+{
+}
+#endif
+
+static int cam_sync_probe(struct platform_device *pdev)
+{
+	int rc;
+	int idx;
+
+	sync_dev = kzalloc(sizeof(*sync_dev), GFP_KERNEL);
+	if (!sync_dev)
+		return -ENOMEM;
+
+	mutex_init(&sync_dev->table_lock);
+	spin_lock_init(&sync_dev->cam_sync_eventq_lock);
+
+	for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
+		spin_lock_init(&sync_dev->row_spinlocks[idx]);
+
+	sync_dev->vdev = video_device_alloc();
+	if (!sync_dev->vdev) {
+		rc = -ENOMEM;
+		goto vdev_fail;
+	}
+
+	rc = cam_sync_media_controller_init(sync_dev, pdev);
+	if (rc < 0)
+		goto mcinit_fail;
+
+	sync_dev->vdev->v4l2_dev = &sync_dev->v4l2_dev;
+
+	rc = v4l2_device_register(&(pdev->dev), sync_dev->vdev->v4l2_dev);
+	if (rc < 0)
+		goto register_fail;
+
+	strlcpy(sync_dev->vdev->name, CAM_SYNC_NAME,
+				sizeof(sync_dev->vdev->name));
+	sync_dev->vdev->release  = video_device_release;
+	sync_dev->vdev->fops     = &cam_sync_v4l2_fops;
+	sync_dev->vdev->ioctl_ops = &g_cam_sync_ioctl_ops;
+	sync_dev->vdev->minor     = -1;
+	sync_dev->vdev->vfl_type  = VFL_TYPE_GRABBER;
+	rc = video_register_device(sync_dev->vdev,
+		VFL_TYPE_GRABBER, -1);
+	if (rc < 0)
+		goto v4l2_fail;
+
+	cam_sync_init_entity(sync_dev);
+	video_set_drvdata(sync_dev->vdev, sync_dev);
+	memset(&sync_dev->sync_table, 0, sizeof(sync_dev->sync_table));
+	memset(&sync_dev->bitmap, 0, sizeof(sync_dev->bitmap));
+	bitmap_zero(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
+
+	/*
+	 * We treat zero as invalid handle, so we will keep the 0th bit set
+	 * always
+	 */
+	set_bit(0, sync_dev->bitmap);
+
+	sync_dev->work_queue = alloc_workqueue(CAM_SYNC_WORKQUEUE_NAME,
+		WQ_HIGHPRI | WQ_UNBOUND, 0);
+
+	if (!sync_dev->work_queue) {
+		pr_err("Error: high priority work queue creation failed!\n");
+		rc = -ENOMEM;
+		goto v4l2_fail;
+	}
+
+	return rc;
+
+v4l2_fail:
+	v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
+register_fail:
+	cam_sync_media_controller_cleanup(sync_dev);
+mcinit_fail:
+	video_device_release(sync_dev->vdev);
+vdev_fail:
+	mutex_destroy(&sync_dev->table_lock);
+	kfree(sync_dev);
+	return rc;
+}
+
+static int cam_sync_remove(struct platform_device *pdev)
+{
+	v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
+	cam_sync_media_controller_cleanup(sync_dev);
+	video_device_release(sync_dev->vdev);
+	kfree(sync_dev);
+	sync_dev = NULL;
+
+	return 0;
+}
+
+static struct platform_device cam_sync_device = {
+	.name = "cam_sync",
+	.id = -1,
+};
+
+static struct platform_driver cam_sync_driver = {
+	.probe = cam_sync_probe,
+	.remove = cam_sync_remove,
+	.driver = {
+		.name = "cam_sync",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init cam_sync_init(void)
+{
+	int rc;
+
+	rc = platform_device_register(&cam_sync_device);
+	if (rc)
+		return -ENODEV;
+
+	return platform_driver_register(&cam_sync_driver);
+}
+
+static void __exit cam_sync_exit(void)
+{
+	int idx;
+
+	for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
+		spin_lock_init(&sync_dev->row_spinlocks[idx]);
+	platform_driver_unregister(&cam_sync_driver);
+	platform_device_unregister(&cam_sync_device);
+	kfree(sync_dev);
+}
+
+module_init(cam_sync_init);
+module_exit(cam_sync_exit);
+MODULE_DESCRIPTION("Camera sync driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_api.h b/drivers/media/platform/msm/camera/cam_sync/cam_sync_api.h
new file mode 100644
index 0000000..9646887
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_api.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CAM_SYNC_API_H__
+#define __CAM_SYNC_API_H__
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+#include <linux/videodev2.h>
+#include <uapi/media/cam_sync.h>
+
+#define SYNC_DEBUG_NAME_LEN 63
+typedef void (*sync_callback)(int32_t sync_obj, int status, void *data);
+
+/* Kernel APIs */
+
+/**
+ * @brief: Creates a sync object
+ *
+ *  The newly created sync obj is assigned to sync_obj.
+ *  sync object.
+ *
+ * @param sync_obj   : Pointer to int referencing the sync object.
+ * @param name : Optional parameter associating a name with the sync object for
+ * debug purposes. Only first SYNC_DEBUG_NAME_LEN bytes are accepted,
+ * rest will be ignored.
+ *
+ * @return Status of operation. Zero in case of success.
+ * -EINVAL will be returned if sync_obj is an invalid pointer.
+ * -ENOMEM will be returned if the kernel can't allocate space for
+ * sync object.
+ */
+int cam_sync_create(int32_t *sync_obj, const char *name);
+
+/**
+ * @brief: Registers a callback with a sync object
+ *
+ * @param cb_func:  Pointer to callback to be registered
+ * @param userdata: Opaque pointer which will be passed back with callback.
+ * @param sync_obj: int referencing the sync object.
+ *
+ * @return Status of operation. Zero in case of success.
+ * -EINVAL will be returned if userdata is invalid.
+ * -ENOMEM will be returned if cb_func is invalid.
+ *
+ */
+int cam_sync_register_callback(sync_callback cb_func,
+	void *userdata, int32_t sync_obj);
+
+/**
+ * @brief: De-registers a callback with a sync object
+ *
+ * @param cb_func:  Pointer to callback to be de-registered
+ * @param userdata: Opaque pointer which will be passed back with callback.
+ * @param sync_obj: int referencing the sync object.
+ *
+ * @return Status of operation. Zero in case of success.
+ * -EINVAL will be returned if userdata is invalid.
+ * -ENOMEM will be returned if cb_func is invalid.
+ */
+int cam_sync_deregister_callback(sync_callback cb_func,
+	void *userdata, int32_t sync_obj);
+
+/**
+ * @brief: Signals a sync object with the status argument.
+ *
+ * This function will signal the sync object referenced by the sync_obj
+ * parameter and when doing so, will trigger callbacks in both user space and
+ * kernel. Callbacks will triggered asynchronously and their order of execution
+ * is not guaranteed. The status parameter will indicate whether the entity
+ * performing the signaling wants to convey an error case or a success case.
+ *
+ * @param sync_obj: int referencing the sync object.
+ * @param status: Status of the signaling. Can be either SYNC_SIGNAL_ERROR or
+ * SYNC_SIGNAL_SUCCESS.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_signal(int32_t sync_obj, uint32_t status);
+
+/**
+ * @brief: Merges multiple sync objects
+ *
+ * This function will merge multiple sync objects into a sync group.
+ *
+ * @param sync_obj: pointer to a block of ints to be merged
+ * @param num_objs: Number of ints in the block
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj);
+
+/**
+ * @brief: Destroys a sync object
+ *
+ * @param sync_obj: int referencing the sync object to be destroyed
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_destroy(int32_t sync_obj);
+
+/**
+ * @brief: Waits for a sync object synchronously
+ *
+ * Does a wait on the sync object identified by sync_obj for a maximum
+ * of timeout_ms milliseconds. Must not be called from interrupt context as
+ * this API can sleep. Should be called from process context only.
+ *
+ * @param sync_obj: int referencing the sync object to be waited upon
+ * @timeout_ms sync_obj: Timeout in ms.
+ *
+ * @return 0 upon success, -EINVAL if sync object is in bad state or arguments
+ * are invalid, -ETIMEDOUT if wait times out.
+ */
+int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms);
+
+
+#endif /* __CAM_SYNC_API_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h b/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h
new file mode 100644
index 0000000..ba9bef4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h
@@ -0,0 +1,186 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CAM_SYNC_PRIVATE_H__
+#define __CAM_SYNC_PRIVATE_H__
+
+#include <linux/bitmap.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+#ifdef CONFIG_CAM_SYNC_DBG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define CAM_SYNC_OBJ_NAME_LEN           64
+#define CAM_SYNC_MAX_OBJS               1024
+#define CAM_SYNC_MAX_V4L2_EVENTS        50
+#define CAM_SYNC_DEBUG_FILENAME         "cam_debug"
+#define CAM_SYNC_DEBUG_BASEDIR          "cam"
+#define CAM_SYNC_DEBUG_BUF_SIZE         32
+#define CAM_SYNC_PAYLOAD_WORDS          2
+#define CAM_SYNC_NAME                   "cam_sync"
+#define CAM_SYNC_WORKQUEUE_NAME         "HIPRIO_SYNC_WORK_QUEUE"
+
+#define CAM_SYNC_TYPE_INDV              0
+#define CAM_SYNC_TYPE_GROUP             1
+
+/**
+ * enum sync_type - Enum to indicate the type of sync object,
+ * i.e. individual or group.
+ *
+ * @SYNC_TYPE_INDV  : Object is an individual sync object
+ * @SYNC_TYPE_GROUP : Object is a group sync object
+ */
+enum sync_type {
+	SYNC_TYPE_INDV,
+	SYNC_TYPE_GROUP
+};
+
+/**
+ * struct sync_parent_info - Single node of information about a parent
+ * of a sync object, usually part of the parents linked list
+ *
+ * @sync_id  : Sync object id of parent
+ * @list     : List member used to append this node to a linked list
+ */
+struct sync_parent_info {
+	int32_t sync_id;
+	struct list_head list;
+};
+
+/**
+ * struct sync_parent_info - Single node of information about a child
+ * of a sync object, usually part of the children linked list
+ *
+ * @sync_id  : Sync object id of child
+ * @list     : List member used to append this node to a linked list
+ */
+struct sync_child_info {
+	int32_t sync_id;
+	struct list_head list;
+};
+
+
+/**
+ * struct sync_callback_info - Single node of information about a kernel
+ * callback registered on a sync object
+ *
+ * @callback_func    : Callback function, registered by client driver
+ * @cb_data          : Callback data, registered by client driver
+ * @status........   : Status with which callback will be invoked in client
+ * @sync_obj         : Sync id of the object for which callback is registered
+ * @cb_dispatch_work : Work representing the call dispatch
+ * @list             : List member used to append this node to a linked list
+ */
+struct sync_callback_info {
+	sync_callback callback_func;
+	void *cb_data;
+	int status;
+	int32_t sync_obj;
+	struct work_struct cb_dispatch_work;
+	struct list_head list;
+};
+
+/**
+ * struct sync_user_payload - Single node of information about a user space
+ * payload registered from user space
+ *
+ * @payload_data    : Payload data, opaque to kernel
+ * @list            : List member used to append this node to a linked list
+ */
+struct sync_user_payload {
+	uint64_t payload_data[CAM_SYNC_PAYLOAD_WORDS];
+	struct list_head list;
+};
+
+/**
+ * struct sync_table_row - Single row of information about a sync object, used
+ * for internal book keeping in the sync driver
+ *
+ * @name              : Optional string representation of the sync object
+ * @type              : Type of the sync object (individual or group)
+ * @sync_id           : Integer id representing this sync object
+ * @parents_list      : Linked list of parents of this sync object
+ * @children_list     : Linked list of children of this sync object
+ * @state             : State (INVALID, ACTIVE, SIGNALED_SUCCESS or
+ *                      SIGNALED_ERROR)
+ * @remaining         : Count of remaining children that not been signaled
+ * @signaled          : Completion variable on which block calls will wait
+ * @callback_list     : Linked list of kernel callbacks registered
+ * @user_payload_list : LInked list of user space payloads registered
+ */
+struct sync_table_row {
+	char name[CAM_SYNC_OBJ_NAME_LEN];
+	enum sync_type type;
+	int32_t sync_id;
+	/* List of parents, which are merged objects */
+	struct list_head parents_list;
+	/* List of children, which constitute the merged object */
+	struct list_head children_list;
+	uint32_t state;
+	uint32_t remaining;
+	struct completion signaled;
+	struct list_head callback_list;
+	struct list_head user_payload_list;
+};
+
+/**
+ * struct cam_signalable_info - Information for a single sync object that is
+ * ready to be signaled
+ *
+ * @sync_obj : Sync object id of signalable object
+ * @status   : Status with which to signal
+ * @list     : List member used to append this node to a linked list
+ */
+struct cam_signalable_info {
+	int32_t sync_obj;
+	uint32_t status;
+	struct list_head list;
+};
+
+/**
+ * struct sync_device - Internal struct to book keep sync driver details
+ *
+ * @vdev            : Video device
+ * @v4l2_dev        : V4L2 device
+ * @sync_table      : Table of all sync objects
+ * @row_spinlocks   : Spinlock array, one for each row in the table
+ * @table_lock      : Mutex used to lock the table
+ * @open_cnt        : Count of file open calls made on the sync driver
+ * @work_queue      : Work queue used for dispatching kernel callbacks
+ * @cam_sync_eventq : Event queue used to dispatch user payloads to user space
+ * @bitmap          : Bitmap representation of all sync objects
+ */
+struct sync_device {
+	struct video_device *vdev;
+	struct v4l2_device v4l2_dev;
+	struct sync_table_row sync_table[CAM_SYNC_MAX_OBJS];
+	spinlock_t row_spinlocks[CAM_SYNC_MAX_OBJS];
+	struct mutex table_lock;
+	int open_cnt;
+	struct workqueue_struct *work_queue;
+	struct v4l2_fh *cam_sync_eventq;
+	spinlock_t cam_sync_eventq_lock;
+	DECLARE_BITMAP(bitmap, CAM_SYNC_MAX_OBJS);
+};
+
+
+#endif /* __CAM_SYNC_PRIVATE_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
new file mode 100644
index 0000000..4f5bf87
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
@@ -0,0 +1,296 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-SYNC-UTIL %s:%d " fmt, __func__, __LINE__
+
+#include "cam_sync_util.h"
+
+int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev,
+	long *idx)
+{
+	int rc = 0;
+
+	mutex_lock(&sync_dev->table_lock);
+
+	*idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
+
+	if (*idx < CAM_SYNC_MAX_OBJS)
+		set_bit(*idx, sync_dev->bitmap);
+	else
+		rc = -1;
+
+	mutex_unlock(&sync_dev->table_lock);
+
+	return rc;
+}
+
+int cam_sync_init_object(struct sync_table_row *table,
+	uint32_t idx,
+	const char *name)
+{
+	struct sync_table_row *row = table + idx;
+
+	if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
+		return -EINVAL;
+
+	if (name)
+		strlcpy(row->name, name, SYNC_DEBUG_NAME_LEN);
+	INIT_LIST_HEAD(&row->parents_list);
+	INIT_LIST_HEAD(&row->children_list);
+	row->type = CAM_SYNC_TYPE_INDV;
+	row->sync_id = idx;
+	row->state = CAM_SYNC_STATE_ACTIVE;
+	row->remaining = 0;
+	init_completion(&row->signaled);
+	INIT_LIST_HEAD(&row->callback_list);
+	INIT_LIST_HEAD(&row->user_payload_list);
+
+	return 0;
+}
+
+int cam_sync_init_group_object(struct sync_table_row *table,
+	uint32_t idx,
+	uint32_t *sync_objs,
+	uint32_t num_objs)
+{
+	int i;
+	struct sync_child_info *child_info;
+	struct sync_parent_info *parent_info;
+	struct sync_table_row *row = table + idx;
+	struct sync_table_row *child_row = NULL;
+
+	spin_lock_bh(&sync_dev->row_spinlocks[idx]);
+	INIT_LIST_HEAD(&row->parents_list);
+
+	INIT_LIST_HEAD(&row->children_list);
+
+	/*
+	 * While traversing parents and children, we allocate in a loop and in
+	 * case allocation fails, we call the clean up function which frees up
+	 * all memory allocation thus far
+	 */
+	for (i = 0; i < num_objs; i++) {
+		child_info = kzalloc(sizeof(*child_info), GFP_ATOMIC);
+
+		if (!child_info) {
+			cam_sync_util_cleanup_children_list(
+				&row->children_list);
+			spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+			return -ENOMEM;
+		}
+
+		child_info->sync_id = sync_objs[i];
+		list_add_tail(&child_info->list, &row->children_list);
+	}
+
+	for (i = 0; i < num_objs; i++) {
+		/* This gets us the row corresponding to the sync object */
+		child_row = table + sync_objs[i];
+		spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+		parent_info = kzalloc(sizeof(*parent_info), GFP_ATOMIC);
+		if (!parent_info) {
+			cam_sync_util_cleanup_parents_list(
+				&child_row->parents_list);
+			cam_sync_util_cleanup_children_list(
+				&row->children_list);
+			spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+			spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+			return -ENOMEM;
+		}
+		parent_info->sync_id = idx;
+		list_add_tail(&parent_info->list, &child_row->parents_list);
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+	}
+
+	row->type = CAM_SYNC_TYPE_GROUP;
+	row->sync_id = idx;
+	row->state = CAM_SYNC_STATE_ACTIVE;
+	row->remaining = num_objs;
+	init_completion(&row->signaled);
+	INIT_LIST_HEAD(&row->callback_list);
+	INIT_LIST_HEAD(&row->user_payload_list);
+
+	spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+	return 0;
+}
+
+int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
+{
+	struct sync_table_row *row = table + idx;
+	struct sync_child_info *child_info, *temp_child;
+	struct sync_callback_info *sync_cb, *temp_cb;
+	struct sync_parent_info *parent_info, *temp_parent;
+	struct sync_user_payload *upayload_info, *temp_upayload;
+
+	if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
+		return -EINVAL;
+
+	spin_lock_bh(&sync_dev->row_spinlocks[idx]);
+	clear_bit(idx, sync_dev->bitmap);
+	list_for_each_entry_safe(child_info, temp_child,
+				&row->children_list, list) {
+		list_del_init(&child_info->list);
+		kfree(child_info);
+	}
+
+	list_for_each_entry_safe(parent_info, temp_parent,
+				&row->parents_list, list) {
+		list_del_init(&parent_info->list);
+		kfree(parent_info);
+	}
+
+	list_for_each_entry_safe(upayload_info, temp_upayload,
+				&row->user_payload_list, list) {
+		list_del_init(&upayload_info->list);
+		kfree(upayload_info);
+	}
+
+	list_for_each_entry_safe(sync_cb, temp_cb,
+				&row->callback_list, list) {
+		list_del_init(&sync_cb->list);
+		kfree(sync_cb);
+	}
+
+	row->state = CAM_SYNC_STATE_INVALID;
+	memset(row, 0, sizeof(*row));
+	spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+
+	return 0;
+}
+
+void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work)
+{
+	struct sync_callback_info *cb_info = container_of(cb_dispatch_work,
+		struct sync_callback_info,
+		cb_dispatch_work);
+
+	cb_info->callback_func(cb_info->sync_obj,
+		cb_info->status,
+		cb_info->cb_data);
+
+	kfree(cb_info);
+}
+
+void cam_sync_util_send_v4l2_event(uint32_t id,
+	uint32_t sync_obj,
+	int status,
+	void *payload,
+	int len)
+{
+	struct v4l2_event event;
+	__u64 *payload_data = NULL;
+	struct cam_sync_ev_header *ev_header = NULL;
+
+	event.id = id;
+	event.type = CAM_SYNC_V4L_EVENT;
+
+	ev_header = CAM_SYNC_GET_HEADER_PTR(event);
+	ev_header->sync_obj = sync_obj;
+	ev_header->status = status;
+
+	payload_data = CAM_SYNC_GET_PAYLOAD_PTR(event, __u64);
+	memcpy(payload_data, payload, len);
+
+	v4l2_event_queue(sync_dev->vdev, &event);
+}
+
+int cam_sync_util_validate_merge(uint32_t *sync_obj, uint32_t num_objs)
+{
+	int i;
+	struct sync_table_row *row = NULL;
+
+	for (i = 0; i < num_objs; i++) {
+		row = sync_dev->sync_table + sync_obj[i];
+		spin_lock_bh(&sync_dev->row_spinlocks[sync_obj[i]]);
+		if (row->type == CAM_SYNC_TYPE_GROUP ||
+			row->state == CAM_SYNC_STATE_INVALID) {
+			pr_err("Group obj %d can't be merged or obj UNINIT\n",
+				sync_obj[i]);
+			spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj[i]]);
+			return -EINVAL;
+		}
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj[i]]);
+	}
+	return 0;
+}
+
+int cam_sync_util_add_to_signalable_list(int32_t sync_obj,
+	uint32_t status,
+	struct list_head *sync_list)
+{
+	struct cam_signalable_info *signalable_info = NULL;
+
+	signalable_info = kzalloc(sizeof(*signalable_info), GFP_ATOMIC);
+	if (!signalable_info)
+		return -ENOMEM;
+
+	signalable_info->sync_obj = sync_obj;
+	signalable_info->status = status;
+
+	list_add_tail(&signalable_info->list, sync_list);
+
+	return 0;
+}
+
+int cam_sync_util_get_state(int current_state,
+	int new_state)
+{
+	int result = CAM_SYNC_STATE_SIGNALED_ERROR;
+
+	if (new_state != CAM_SYNC_STATE_SIGNALED_SUCCESS &&
+		new_state != CAM_SYNC_STATE_SIGNALED_ERROR)
+		return CAM_SYNC_STATE_SIGNALED_ERROR;
+
+	switch (current_state) {
+	case CAM_SYNC_STATE_INVALID:
+		result =  CAM_SYNC_STATE_SIGNALED_ERROR;
+		break;
+
+	case CAM_SYNC_STATE_ACTIVE:
+	case CAM_SYNC_STATE_SIGNALED_SUCCESS:
+		if (new_state == CAM_SYNC_STATE_SIGNALED_ERROR)
+			result = CAM_SYNC_STATE_SIGNALED_ERROR;
+		else if (new_state == CAM_SYNC_STATE_SIGNALED_SUCCESS)
+			result = CAM_SYNC_STATE_SIGNALED_SUCCESS;
+		break;
+
+	case CAM_SYNC_STATE_SIGNALED_ERROR:
+		result = CAM_SYNC_STATE_SIGNALED_ERROR;
+		break;
+	}
+
+	return result;
+}
+
+void cam_sync_util_cleanup_children_list(struct list_head *list_to_clean)
+{
+	struct sync_child_info *child_info = NULL;
+	struct sync_child_info *temp_child_info = NULL;
+
+	list_for_each_entry_safe(child_info,
+			temp_child_info, list_to_clean, list) {
+		list_del_init(&child_info->list);
+		kfree(child_info);
+	}
+}
+
+void cam_sync_util_cleanup_parents_list(struct list_head *list_to_clean)
+{
+	struct sync_parent_info *parent_info = NULL;
+	struct sync_parent_info *temp_parent_info = NULL;
+
+	list_for_each_entry_safe(parent_info,
+			temp_parent_info, list_to_clean, list) {
+		list_del_init(&parent_info->list);
+		kfree(parent_info);
+	}
+}
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
new file mode 100644
index 0000000..9dedd14
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
@@ -0,0 +1,156 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CAM_SYNC_UTIL_H__
+#define __CAM_SYNC_UTIL_H__
+
+
+#include <cam_sync_api.h>
+#include "cam_sync_private.h"
+
+extern struct sync_device *sync_dev;
+
+/**
+ * @brief: Finds an empty row in the sync table and sets its corresponding bit
+ * in the bit array
+ *
+ * @param sync_dev : Pointer to the sync device instance
+ * @param idx      : Pointer to an long containing the index found in the bit
+ *                   array
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev,
+	long *idx);
+
+/**
+ * @brief: Function to initialize an empty row in the sync table. This should be
+ *         called only for individual sync objects.
+ *
+ * @param table : Pointer to the sync objects table
+ * @param idx   : Index of row to initialize
+ * @param name  : Optional string representation of the sync object. Should be
+ *                63 characters or less
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_init_object(struct sync_table_row *table,
+	uint32_t idx,
+	const char *name);
+
+/**
+ * @brief: Function to uninitialize a row in the sync table
+ *
+ * @param table : Pointer to the sync objects table
+ * @param idx   : Index of row to initialize
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx);
+
+/**
+ * @brief: Function to initialize a row in the sync table when the object is a
+ *         group object, also known as a merged sync object
+ *
+ * @param table     : Pointer to the sync objects table
+ * @param idx       : Index of row to initialize
+ * @param sync_objs : Array of sync objects which will merged
+ *                    or grouped together
+ * @param num_objs  : Number of sync objects in the array
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_init_group_object(struct sync_table_row *table,
+	uint32_t idx,
+	uint32_t *sync_objs,
+	uint32_t num_objs);
+
+int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx);
+
+/**
+ * @brief: Function to dispatch a kernel callback for a sync callback
+ *
+ * @param cb_dispatch_work : Pointer to the work_struct that needs to be
+ *                           dispatched
+ *
+ * @return None
+ */
+void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work);
+
+/**
+ * @brief: Function to send V4L event to user space
+ * @param id       : V4L event id to send
+ * @param sync_obj : Sync obj for which event needs to be sent
+ * @param status   : Status of the event
+ * @payload        : Payload that needs to be sent to user space
+ * @len            : Length of the payload
+ *
+ * @return None
+ */
+void cam_sync_util_send_v4l2_event(uint32_t id,
+	uint32_t sync_obj,
+	int status,
+	void *payload,
+	int len);
+
+/**
+ * @brief: Function to validate sync merge arguments
+ *
+ * @param sync_obj : Array of sync objects to merge
+ * @param num_objs : Number of sync objects in the array
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_util_validate_merge(uint32_t *sync_obj, uint32_t num_objs);
+
+/**
+ * @brief: Function which adds sync object information to the signalable list
+ *
+ * @param sync_obj : Sync object to add
+ * @param status   : Status of above sync object
+ * @param list     : Linked list where the information should be added to
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_util_add_to_signalable_list(int32_t sync_obj,
+	uint32_t status,
+	struct list_head *sync_list);
+
+/**
+ * @brief: Function which gets the next state of the sync object based on the
+ *         current state and the new state
+ *
+ * @param current_state : Current state of the sync object
+ * @param new_state     : New state of the sync object
+ *
+ * @return Next state of the sync object
+ */
+int cam_sync_util_get_state(int current_state,
+	int new_state);
+
+/**
+ * @brief: Function to clean up the children of a sync object
+ * @param list_to_clean : List to clean up
+ *
+ * @return None
+ */
+void cam_sync_util_cleanup_children_list(struct list_head *list_to_clean);
+
+/**
+ * @brief: Function to clean up the parents of a sync object
+ * @param list_to_clean : List to clean up
+ *
+ * @return None
+ */
+void cam_sync_util_cleanup_parents_list(struct list_head *list_to_clean);
+
+#endif /* __CAM_SYNC_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/Makefile b/drivers/media/platform/msm/camera/cam_utils/Makefile
new file mode 100644
index 0000000..6f9525e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_soc_util.o cam_io_util.o
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
new file mode 100644
index 0000000..78cd9d8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
@@ -0,0 +1,284 @@
+/* Copyright (c) 2011-2014, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "cam_io_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+int cam_io_w(uint32_t data, void __iomem *addr)
+{
+	if (!addr)
+		return -EINVAL;
+
+	CDBG("0x%pK %08x\n", addr, data);
+	writel_relaxed(data, addr);
+
+	return 0;
+}
+
+int cam_io_w_mb(uint32_t data, void __iomem *addr)
+{
+	if (!addr)
+		return -EINVAL;
+
+	CDBG("0x%pK %08x\n", addr, data);
+	/* Ensure previous writes are done */
+	wmb();
+	writel_relaxed(data, addr);
+
+	return 0;
+}
+
+uint32_t cam_io_r(void __iomem *addr)
+{
+	uint32_t data;
+
+	if (!addr) {
+		pr_err("Invalid args\n");
+		return 0;
+	}
+
+	data = readl_relaxed(addr);
+	CDBG("0x%pK %08x\n", addr, data);
+
+	return data;
+}
+
+uint32_t cam_io_r_mb(void __iomem *addr)
+{
+	uint32_t data;
+
+	if (!addr) {
+		pr_err("Invalid args\n");
+		return 0;
+	}
+
+	/* Ensure previous read is done */
+	rmb();
+	data = readl_relaxed(addr);
+	CDBG("0x%pK %08x\n", addr, data);
+
+	return data;
+}
+
+int cam_io_memcpy(void __iomem *dest_addr,
+	void __iomem *src_addr, uint32_t len)
+{
+	int i;
+	uint32_t *d = (uint32_t *) dest_addr;
+	uint32_t *s = (uint32_t *) src_addr;
+
+	if (!dest_addr || !src_addr)
+		return -EINVAL;
+
+	CDBG("%pK %pK %d\n", dest_addr, src_addr, len);
+
+	for (i = 0; i < len/4; i++) {
+		CDBG("0x%pK %08x\n", d, *s);
+		writel_relaxed(*s++, d++);
+	}
+
+	return 0;
+}
+
+int  cam_io_memcpy_mb(void __iomem *dest_addr,
+	void __iomem *src_addr, uint32_t len)
+{
+	int i;
+	uint32_t *d = (uint32_t *) dest_addr;
+	uint32_t *s = (uint32_t *) src_addr;
+
+	if (!dest_addr || !src_addr)
+		return -EINVAL;
+
+	CDBG("%pK %pK %d\n", dest_addr, src_addr, len);
+
+	/*
+	 * Do not use cam_io_w_mb to avoid double wmb() after a write
+	 * and before the next write.
+	 */
+	wmb();
+	for (i = 0; i < (len / 4); i++) {
+		CDBG("0x%pK %08x\n", d, *s);
+		writel_relaxed(*s++, d++);
+	}
+
+	return 0;
+}
+
+int cam_io_poll_value(void __iomem *addr, uint32_t wait_data, uint32_t retry,
+	unsigned long min_usecs, unsigned long max_usecs)
+{
+	uint32_t tmp, cnt = 0;
+	int rc = 0;
+
+	if (!addr)
+		return -EINVAL;
+
+	tmp = readl_relaxed(addr);
+	while ((tmp != wait_data) && (cnt++ < retry)) {
+		if (min_usecs > 0 && max_usecs > 0)
+			usleep_range(min_usecs, max_usecs);
+		tmp = readl_relaxed(addr);
+	}
+
+	if (cnt > retry) {
+		pr_debug("Poll failed by value\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int cam_io_poll_value_wmask(void __iomem *addr, uint32_t wait_data,
+	uint32_t bmask, uint32_t retry, unsigned long min_usecs,
+	unsigned long max_usecs)
+{
+	uint32_t tmp, cnt = 0;
+	int rc = 0;
+
+	if (!addr)
+		return -EINVAL;
+
+	tmp = readl_relaxed(addr);
+	while (((tmp & bmask) != wait_data) && (cnt++ < retry)) {
+		if (min_usecs > 0 && max_usecs > 0)
+			usleep_range(min_usecs, max_usecs);
+		tmp = readl_relaxed(addr);
+	}
+
+	if (cnt > retry) {
+		pr_debug("Poll failed with mask\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int cam_io_w_same_offset_block(const uint32_t *data, void __iomem *addr,
+	uint32_t len)
+{
+	int i;
+
+	if (!data || !len || !addr)
+		return -EINVAL;
+
+	for (i = 0; i < len; i++) {
+		CDBG("i= %d len =%d val=%x addr =%pK\n",
+			i, len, data[i], addr);
+		writel_relaxed(data[i], addr);
+	}
+
+	return 0;
+}
+
+int cam_io_w_mb_same_offset_block(const uint32_t *data, void __iomem *addr,
+	uint32_t len)
+{
+	int i;
+
+	if (!data || !len || !addr)
+		return -EINVAL;
+
+	for (i = 0; i < len; i++) {
+		CDBG("i= %d len =%d val=%x addr =%pK\n",
+			i, len, data[i], addr);
+		/* Ensure previous writes are done */
+		wmb();
+		writel_relaxed(data[i], addr);
+	}
+
+	return 0;
+}
+
+#define __OFFSET(__i)   (data[__i][0])
+#define __VAL(__i)      (data[__i][1])
+int cam_io_w_offset_val_block(const uint32_t data[][2],
+	void __iomem *addr_base, uint32_t len)
+{
+	int i;
+
+	if (!data || !len || !addr_base)
+		return -EINVAL;
+
+	for (i = 0; i < len; i++) {
+		CDBG("i= %d len =%d val=%x addr_base =%pK reg=%x\n",
+			i, len, __VAL(i), addr_base, __OFFSET(i));
+		writel_relaxed(__VAL(i), addr_base + __OFFSET(i));
+	}
+
+	return 0;
+}
+
+int cam_io_w_mb_offset_val_block(const uint32_t data[][2],
+	void __iomem *addr_base, uint32_t len)
+{
+	int i;
+
+	if (!data || !len || !addr_base)
+		return -EINVAL;
+
+	/* Ensure write is done */
+	wmb();
+	for (i = 0; i < len; i++) {
+		CDBG("i= %d len =%d val=%x addr_base =%pK reg=%x\n",
+			i, len, __VAL(i), addr_base, __OFFSET(i));
+		writel_relaxed(__VAL(i), addr_base + __OFFSET(i));
+	}
+
+	return 0;
+}
+
+#define BYTES_PER_REGISTER           4
+#define NUM_REGISTER_PER_LINE        4
+#define REG_OFFSET(__start, __i)    (__start + (__i * BYTES_PER_REGISTER))
+int cam_io_dump(void __iomem *base_addr, uint32_t start_offset, int size)
+{
+	char          line_str[128];
+	char         *p_str;
+	int           i;
+	uint32_t      data;
+
+	CDBG("addr=%pK offset=0x%x size=%d\n", base_addr, start_offset, size);
+
+	if (!base_addr || (size <= 0))
+		return -EINVAL;
+
+	line_str[0] = '\0';
+	p_str = line_str;
+	for (i = 0; i < size; i++) {
+		if (i % NUM_REGISTER_PER_LINE == 0) {
+			snprintf(p_str, 12, "0x%08x: ",
+				REG_OFFSET(start_offset, i));
+			p_str += 12;
+		}
+		data = readl_relaxed(base_addr + REG_OFFSET(start_offset, i));
+		snprintf(p_str, 9, "%08x ", data);
+		p_str += 9;
+		if ((i + 1) % NUM_REGISTER_PER_LINE == 0) {
+			pr_err("%s\n", line_str);
+			line_str[0] = '\0';
+			p_str = line_str;
+		}
+	}
+	if (line_str[0] != '\0')
+		pr_err("%s\n", line_str);
+
+	return 0;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_io_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.h
new file mode 100644
index 0000000..e4f73ca
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.h
@@ -0,0 +1,239 @@
+/* Copyright (c) 2011-2014, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IO_UTIL_H_
+#define _CAM_IO_UTIL_H_
+
+#include <linux/types.h>
+
+/**
+ * cam_io_w()
+ *
+ * @brief:              Camera IO util for register write
+ *
+ * @data:               Value to be written
+ * @addr:               Address used to write the value
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_w(uint32_t data, void __iomem *addr);
+
+/**
+ * cam_io_w_mb()
+ *
+ * @brief:              Camera IO util for register write with memory barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call wmb() independently in the caller.
+ *
+ * @data:               Value to be written
+ * @addr:               Address used to write the value
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_w_mb(uint32_t data, void __iomem *addr);
+
+/**
+ * cam_io_r()
+ *
+ * @brief:              Camera IO util for register read
+ *
+ * @addr:               Address of register to be read
+ *
+ * @return:             Value read from the register address
+ */
+uint32_t cam_io_r(void __iomem *addr);
+
+/**
+ * cam_io_r_mb()
+ *
+ * @brief:              Camera IO util for register read with memory barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call rmb() independently in the caller.
+ *
+ * @addr:               Address of register to be read
+ *
+ * @return:             Value read from the register address
+ */
+uint32_t cam_io_r_mb(void __iomem *addr);
+
+/**
+ * cam_io_memcpy()
+ *
+ * @brief:              Camera IO util for memory to register copy
+ *
+ * @dest_addr:          Destination register address
+ * @src_addr:           Source regiser address
+ * @len:                Range to be copied
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_memcpy(void __iomem *dest_addr,
+		void __iomem *src_addr, uint32_t len);
+
+/**
+ * cam_io_memcpy_mb()
+ *
+ * @brief:              Camera IO util for memory to register copy
+ *                      with barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call wmb() independently in the caller.
+ *
+ * @dest_addr:          Destination register address
+ * @src_addr:           Source regiser address
+ * @len:                Range to be copied
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_memcpy_mb(void __iomem *dest_addr,
+	void __iomem *src_addr, uint32_t len);
+
+/**
+ * cam_io_poll_value_wmask()
+ *
+ * @brief:              Poll register value with bitmask.
+ *
+ * @addr:               Register address to be polled
+ * @wait_data:          Wait until @bmask read from @addr matches this data
+ * @bmask:              Bit mask
+ * @retry:              Number of retry
+ * @min_usecs:          Minimum time to wait for retry
+ * @max_usecs:          Maximum time to wait for retry
+ *
+ * @return:             Success or Failure
+ *
+ * This function can sleep so it should not be called from interrupt
+ * handler, spin_lock etc.
+ */
+int cam_io_poll_value_wmask(void __iomem *addr, uint32_t wait_data,
+	uint32_t bmask, uint32_t retry, unsigned long min_usecs,
+	unsigned long max_usecs);
+
+/**
+ * cam_io_poll_value()
+ *
+ * @brief:              Poll register value
+ *
+ * @addr:               Register address to be polled
+ * @wait_data:          Wait until value read from @addr matches this data
+ * @retry:              Number of retry
+ * @min_usecs:          Minimum time to wait for retry
+ * @max_usecs:          Maximum time to wait for retry
+ *
+ * @return:             Success or Failure
+ *
+ * This function can sleep so it should not be called from interrupt
+ * handler, spin_lock etc.
+ */
+int cam_io_poll_value(void __iomem *addr, uint32_t wait_data, uint32_t retry,
+	unsigned long min_usecs, unsigned long max_usecs);
+
+/**
+ * cam_io_w_same_offset_block()
+ *
+ * @brief:              Write a block of data to same address
+ *
+ * @data:               Block data to be written
+ * @addr:               Register offset to be written.
+ * @len:                Number of the data to be written
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_w_same_offset_block(const uint32_t *data, void __iomem *addr,
+	uint32_t len);
+
+/**
+ * cam_io_w_mb_same_offset_block()
+ *
+ * @brief:              Write a block of data to same address with barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call wmb() independently in the caller.
+ *
+ * @data:               Block data to be written
+ * @addr:               Register offset to be written.
+ * @len:                Number of the data to be written
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_w_mb_same_offset_block(const uint32_t *data, void __iomem *addr,
+	uint32_t len);
+
+/**
+ * cam_io_w_offset_val_block()
+ *
+ * @brief:              This API is to write a block of registers
+ *                      represented by a 2 dimensional array table with
+ *                      register offset and value pair
+ *
+ *  offset0, value0,
+ *  offset1, value1,
+ *  offset2, value2,
+ *  and so on...
+ *
+ * @data:               Pointer to 2-dimensional offset-value array
+ * @addr_base:          Base address to which offset will be added to
+ *                      get the register address
+ * @len:                Length of offset-value pair array to be written in
+ *                      number of uin32_t
+ *
+ * @return:             Success or Failure
+ *
+ */
+int32_t cam_io_w_offset_val_block(const uint32_t data[][2],
+	void __iomem *addr_base, uint32_t len);
+
+/**
+ * cam_io_w_mb_offset_val_block()
+ *
+ * @brief:              This API is to write a block of registers
+ *                      represented by a 2 dimensional array table with
+ *                      register offset and value pair with memory barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call wmb() independently in the caller.
+ *                      The OFFSETS NEED to be different because of the way
+ *                      barrier is used here.
+ *
+ *  offset0, value0,
+ *  offset1, value1,
+ *  offset2, value2,
+ *  and so on...
+ *
+ * @data:               Pointer to 2-dimensional offset-value array
+ * @addr_base:          Base address to which offset will be added to
+ *                      get the register address
+ * @len:                Length of offset-value pair array to be written in
+ *                      number of uin32_t
+ *
+ * @return:             Success or Failure
+ *
+ */
+int32_t cam_io_w_mb_offset_val_block(const uint32_t data[][2],
+	void __iomem *addr_base, uint32_t len);
+
+/**
+ * cam_io_dump()
+ *
+ * @brief:              Camera IO util for dumping a range of register
+ *
+ * @base_addr:          Start register address for the dumping
+ * @start_offset:       Start register offset for the dump
+ * @size:               Size specifying the range for dumping
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_dump(void __iomem *base_addr, uint32_t start_offset, int size);
+
+#endif /* _CAM_IO_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
new file mode 100644
index 0000000..683386c
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -0,0 +1,598 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/of.h>
+#include <linux/clk.h>
+#include "cam_soc_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+int cam_soc_util_irq_enable(struct cam_hw_soc_info *soc_info)
+{
+	if (!soc_info) {
+		pr_err("Invalid arguments\n");
+		return -EINVAL;
+	}
+
+	if (!soc_info->irq_line) {
+		pr_err("No IRQ line available\n");
+		return -ENODEV;
+	}
+
+	enable_irq(soc_info->irq_line->start);
+
+	return 0;
+}
+
+int cam_soc_util_irq_disable(struct cam_hw_soc_info *soc_info)
+{
+	if (!soc_info) {
+		pr_err("Invalid arguments\n");
+		return -EINVAL;
+	}
+
+	if (!soc_info->irq_line) {
+		pr_err("No IRQ line available\n");
+		return -ENODEV;
+	}
+
+	disable_irq(soc_info->irq_line->start);
+
+	return 0;
+}
+
+int cam_soc_util_clk_enable(struct clk *clk, const char *clk_name,
+	int32_t clk_rate)
+{
+	int rc = 0;
+	long clk_rate_round;
+
+	if (!clk || !clk_name)
+		return -EINVAL;
+
+	CDBG("enable %s, clk %pK rate %d\n",
+		clk_name, clk, clk_rate);
+	if (clk_rate > 0) {
+		clk_rate_round = clk_round_rate(clk, clk_rate);
+		CDBG("new_rate %ld\n", clk_rate_round);
+		if (clk_rate_round < 0) {
+			pr_err("%s: round failed for clock %s rc = %ld\n",
+				__func__, clk_name, clk_rate_round);
+			return clk_rate_round;
+		}
+		rc = clk_set_rate(clk, clk_rate_round);
+		if (rc) {
+			pr_err("set_rate failed on %s\n", clk_name);
+			return rc;
+		}
+	} else if (clk_rate == INIT_RATE) {
+		clk_rate_round = clk_get_rate(clk);
+		CDBG("init new_rate %ld\n", clk_rate_round);
+		if (clk_rate_round == 0) {
+			clk_rate_round = clk_round_rate(clk, 0);
+			if (clk_rate_round <= 0) {
+				pr_err("round rate failed on %s\n", clk_name);
+				return clk_rate_round;
+			}
+		}
+		rc = clk_set_rate(clk, clk_rate_round);
+		if (rc) {
+			pr_err("set_rate failed on %s\n", clk_name);
+			return rc;
+		}
+	}
+	rc = clk_prepare_enable(clk);
+	if (rc) {
+		pr_err("enable failed for %s\n", clk_name);
+		return rc;
+	}
+
+	return rc;
+}
+
+int cam_soc_util_clk_disable(struct clk *clk, const char *clk_name)
+{
+	if (!clk || !clk_name)
+		return -EINVAL;
+
+	CDBG("disable %s\n", clk_name);
+	clk_disable_unprepare(clk);
+
+	return 0;
+}
+
+/**
+ * cam_soc_util_clk_enable_default()
+ *
+ * @brief:              This function enables the default clocks present
+ *                      in soc_info
+ *
+ * @soc_info:           device soc struct to be populated
+ *
+ * @return:             success or failure
+ */
+static int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info)
+{
+	int i, rc = 0;
+
+	if (soc_info->num_clk == 0)
+		return rc;
+
+	for (i = 0; i < soc_info->num_clk; i++) {
+		rc = cam_soc_util_clk_enable(soc_info->clk[i],
+			soc_info->clk_name[i], soc_info->clk_rate[i]);
+		if (rc)
+			goto clk_disable;
+	}
+
+	return rc;
+
+clk_disable:
+	for (i--; i >= 0; i--) {
+		cam_soc_util_clk_disable(soc_info->clk[i],
+			soc_info->clk_name[i]);
+	}
+
+	return rc;
+}
+
+/**
+ * cam_soc_util_clk_disable_default()
+ *
+ * @brief:              This function disables the default clocks present
+ *                      in soc_info
+ *
+ * @soc_info:           device soc struct to be populated
+ *
+ * @return:             success or failure
+ */
+static void cam_soc_util_clk_disable_default(struct cam_hw_soc_info *soc_info)
+{
+	int i;
+
+	if (soc_info->num_clk == 0)
+		return;
+
+	for (i = soc_info->num_clk - 1; i >= 0; i--) {
+		CDBG("disable %s\n", soc_info->clk_name[i]);
+		cam_soc_util_clk_disable(soc_info->clk[i],
+			soc_info->clk_name[i]);
+	}
+}
+
+/**
+ * cam_soc_util_get_dt_clk_info()
+ *
+ * @brief:              Parse the DT and populate the Clock properties
+ *
+ * @soc_info:           device soc struct to be populated
+ * @src_clk_str         name of src clock that has rate control
+ *
+ * @return:             success or failure
+ */
+static int cam_soc_util_get_dt_clk_info(struct cam_hw_soc_info *soc_info)
+{
+	struct device_node *of_node = NULL;
+	int count;
+	int i, rc;
+	struct platform_device *pdev = NULL;
+	const char *src_clk_str = NULL;
+
+	if (!soc_info || !soc_info->pdev)
+		return -EINVAL;
+
+	pdev = soc_info->pdev;
+
+	of_node = pdev->dev.of_node;
+
+	count = of_property_count_strings(of_node, "clock-names");
+
+	CDBG("count = %d\n", count);
+	if (count > CAM_SOC_MAX_CLK) {
+		pr_err("invalid count of clocks, count=%d", count);
+		rc = -EINVAL;
+		return rc;
+	}
+	if (count <= 0) {
+		CDBG("No clock-names found\n");
+		count = 0;
+		soc_info->num_clk = count;
+		return 0;
+	}
+	soc_info->num_clk = count;
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node, "clock-names",
+				i, &(soc_info->clk_name[i]));
+		CDBG("clock-names[%d] = %s\n", i, soc_info->clk_name[i]);
+		if (rc) {
+			pr_err("i= %d count= %d reading clock-names failed\n",
+				i, count);
+			return rc;
+		}
+	}
+
+	rc = of_property_read_u32_array(of_node, "clock-rates",
+		soc_info->clk_rate, count);
+	if (rc) {
+		pr_err("reading clock-rates failed");
+		return rc;
+	}
+
+	rc = of_property_read_string_index(of_node, "src-clock-name", 0,
+		&src_clk_str);
+	if (rc) {
+		CDBG("No src_clk_str found\n");
+		soc_info->src_clk_idx = -1;
+		rc = 0;
+		/* Bottom loop is dependent on src_clk_str. So return here */
+		return rc;
+	}
+
+	for (i = 0; i < soc_info->num_clk; i++) {
+		soc_info->clk_rate[i] = (soc_info->clk_rate[i] == 0) ?
+			(long)-1 : soc_info->clk_rate[i];
+		if (src_clk_str &&
+			(strcmp(soc_info->clk_name[i], src_clk_str) == 0)) {
+			soc_info->src_clk_idx = i;
+		}
+		CDBG("clk_rate[%d] = %d\n", i, soc_info->clk_rate[i]);
+	}
+
+	return rc;
+}
+
+int cam_soc_util_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+	struct device_node *of_node = NULL;
+	int count = 0, i = 0, rc = 0;
+	struct platform_device *pdev = NULL;
+
+	if (!soc_info || !soc_info->pdev)
+		return -EINVAL;
+
+	pdev = soc_info->pdev;
+
+	of_node = pdev->dev.of_node;
+
+	rc = of_property_read_u32(of_node, "cell-index", &pdev->id);
+	if (rc) {
+		pr_err("device %s failed to read cell-index\n", pdev->name);
+		return rc;
+	}
+
+	count = of_property_count_strings(of_node, "regulator-names");
+	if (count <= 0) {
+		pr_err("no regulators found\n");
+		count = 0;
+	}
+	soc_info->num_rgltr = count;
+
+	for (i = 0; i < soc_info->num_rgltr; i++) {
+		rc = of_property_read_string_index(of_node,
+			"regulator-names", i, &soc_info->rgltr_name[i]);
+		CDBG("rgltr_name[%d] = %s\n", i, soc_info->rgltr_name[i]);
+		if (rc) {
+			pr_err("no regulator resource at cnt=%d\n", i);
+			rc = -ENODEV;
+			return rc;
+		}
+	}
+
+	count = of_property_count_strings(of_node, "reg-names");
+	if (count <= 0) {
+		pr_err("no reg-names found\n");
+		count = 0;
+	}
+	soc_info->num_mem_block = count;
+
+	for (i = 0; i < soc_info->num_mem_block; i++) {
+		rc = of_property_read_string_index(of_node, "reg-names", i,
+			&soc_info->mem_block_name[i]);
+		if (rc) {
+			pr_err("failed to read reg-names at %d\n", i);
+			return rc;
+		}
+		soc_info->mem_block[i] =
+			platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			soc_info->mem_block_name[i]);
+
+		if (!soc_info->mem_block[i]) {
+			pr_err("no mem resource by name %s\n",
+				soc_info->mem_block_name[i]);
+			rc = -ENODEV;
+			return rc;
+		}
+	}
+
+	rc = of_property_read_u32_array(of_node, "reg-cam-base",
+		soc_info->mem_block_cam_base, soc_info->num_mem_block);
+	if (rc) {
+		pr_err("Error reading register offsets\n");
+		return rc;
+	}
+
+	rc = of_property_read_string_index(of_node, "interrupt-names", 0,
+		&soc_info->irq_name);
+	if (rc) {
+		pr_warn("No interrupt line present\n");
+	} else {
+		soc_info->irq_line = platform_get_resource_byname(pdev,
+			IORESOURCE_IRQ, soc_info->irq_name);
+		if (!soc_info->irq_line) {
+			pr_err("no irq resource\n");
+			rc = -ENODEV;
+			return rc;
+		}
+	}
+
+	rc = cam_soc_util_get_dt_clk_info(soc_info);
+
+	return rc;
+}
+
+/**
+ * cam_soc_util_get_regulator()
+ *
+ * @brief:              Get regulator resource named vdd
+ *
+ * @pdev:               Platform device associated with regulator
+ * @reg:                Return pointer to be filled with regulator on success
+ * @rgltr_name:         Name of regulator to get
+ *
+ * @return:             0 for Success, negative value for failure
+ */
+static int cam_soc_util_get_regulator(struct platform_device *pdev,
+	struct regulator **reg, const char *rgltr_name)
+{
+	int rc = 0;
+	*reg = regulator_get(&pdev->dev, rgltr_name);
+	if (IS_ERR_OR_NULL(*reg)) {
+		rc = PTR_ERR(*reg);
+		rc = rc ? rc : -EINVAL;
+		pr_err("Regulator %s get failed %d\n", rgltr_name, rc);
+		*reg = NULL;
+	}
+	return rc;
+}
+
+int cam_soc_util_request_platform_resource(struct cam_hw_soc_info *soc_info,
+	irq_handler_t handler, void *irq_data)
+{
+	int i = 0, rc = 0;
+	struct platform_device *pdev = NULL;
+
+	if (!soc_info || !soc_info->pdev)
+		return -EINVAL;
+
+	pdev = soc_info->pdev;
+
+	for (i = 0; i < soc_info->num_mem_block; i++) {
+		soc_info->reg_map[i].mem_base = ioremap(
+			soc_info->mem_block[i]->start,
+			resource_size(soc_info->mem_block[i]));
+		if (!soc_info->reg_map[i].mem_base) {
+			pr_err("i= %d base NULL\n", i);
+			rc = -ENOMEM;
+			goto unmap_base;
+		}
+		soc_info->reg_map[i].mem_cam_base =
+			soc_info->mem_block_cam_base[i];
+		soc_info->reg_map[i].size =
+			resource_size(soc_info->mem_block[i]);
+		soc_info->num_reg_map++;
+	}
+
+	for (i = 0; i < soc_info->num_rgltr; i++) {
+		rc = cam_soc_util_get_regulator(pdev, &soc_info->rgltr[i],
+			soc_info->rgltr_name[i]);
+		if (rc)
+			goto put_regulator;
+	}
+
+	if (soc_info->irq_line) {
+		rc = devm_request_irq(&pdev->dev, soc_info->irq_line->start,
+			handler, IRQF_TRIGGER_RISING,
+			soc_info->irq_name, irq_data);
+		if (rc < 0) {
+			pr_err("irq request fail\n");
+			rc = -EBUSY;
+			goto put_regulator;
+		}
+		disable_irq(soc_info->irq_line->start);
+	}
+
+	/* Get Clock */
+	for (i = 0; i < soc_info->num_clk; i++) {
+		soc_info->clk[i] = clk_get(&soc_info->pdev->dev,
+			soc_info->clk_name[i]);
+		if (!soc_info->clk[i]) {
+			pr_err("get failed for %s\n", soc_info->clk_name[i]);
+			rc = -ENOENT;
+			goto put_clk;
+		}
+	}
+
+	return rc;
+
+put_clk:
+	if (i == -1)
+		i = soc_info->num_clk;
+	for (i = i - 1; i >= 0; i--) {
+		if (soc_info->clk[i]) {
+			clk_put(soc_info->clk[i]);
+			soc_info->clk[i] = NULL;
+		}
+	}
+
+	if (soc_info->irq_line) {
+		disable_irq(soc_info->irq_line->start);
+		free_irq(soc_info->irq_line->start, soc_info);
+	}
+
+put_regulator:
+	if (i == -1)
+		i = soc_info->num_rgltr;
+	for (i = i - 1; i >= 0; i--) {
+		if (soc_info->rgltr[i]) {
+			regulator_disable(soc_info->rgltr[i]);
+			regulator_put(soc_info->rgltr[i]);
+			soc_info->rgltr[i] = NULL;
+		}
+	}
+
+unmap_base:
+	if (i == -1)
+		i = soc_info->num_reg_map;
+	for (i = i - 1; i >= 0; i--) {
+		iounmap(soc_info->reg_map[i].mem_base);
+		soc_info->reg_map[i].mem_base = NULL;
+		soc_info->reg_map[i].size = 0;
+	}
+
+	return rc;
+}
+
+int cam_soc_util_release_platform_resource(struct cam_hw_soc_info *soc_info)
+{
+	int i;
+	struct platform_device *pdev = NULL;
+
+	if (!soc_info || !soc_info->pdev)
+		return -EINVAL;
+
+	pdev = soc_info->pdev;
+
+	for (i = soc_info->num_clk - 1; i >= 0; i--) {
+		clk_put(soc_info->clk[i]);
+		soc_info->clk[i] = NULL;
+	}
+
+	for (i = soc_info->num_rgltr - 1; i >= 0; i--) {
+		if (soc_info->rgltr[i]) {
+			regulator_put(soc_info->rgltr[i]);
+			soc_info->rgltr[i] = NULL;
+		}
+	}
+
+	for (i = soc_info->num_reg_map - 1; i >= 0; i--) {
+		iounmap(soc_info->reg_map[i].mem_base);
+		soc_info->reg_map[i].mem_base = NULL;
+		soc_info->reg_map[i].size = 0;
+	}
+
+	if (soc_info->irq_line) {
+		disable_irq(soc_info->irq_line->start);
+		free_irq(soc_info->irq_line->start, soc_info);
+	}
+
+	return 0;
+}
+
+int cam_soc_util_enable_platform_resource(struct cam_hw_soc_info *soc_info,
+	bool enable_clocks, bool enable_irq)
+{
+	int i, rc = 0;
+
+	if (!soc_info)
+		return -EINVAL;
+
+	for (i = 0; i < soc_info->num_rgltr; i++) {
+		rc = regulator_enable(soc_info->rgltr[i]);
+		if (rc) {
+			pr_err("Regulator enable %s failed\n",
+				soc_info->rgltr_name[i]);
+			goto disable_regulator;
+		}
+	}
+
+	if (enable_clocks) {
+		rc = cam_soc_util_clk_enable_default(soc_info);
+		if (rc)
+			goto disable_regulator;
+	}
+
+	if (enable_irq) {
+		rc  = cam_soc_util_irq_enable(soc_info);
+		if (rc)
+			goto disable_clk;
+	}
+
+	return rc;
+
+disable_clk:
+	if (enable_clocks)
+		cam_soc_util_clk_disable_default(soc_info);
+
+disable_regulator:
+	if (i == -1)
+		i = soc_info->num_rgltr;
+	for (i = i - 1; i >= 0; i--) {
+		if (soc_info->rgltr[i])
+			regulator_disable(soc_info->rgltr[i]);
+	}
+
+	return rc;
+}
+
+int cam_soc_util_disable_platform_resource(struct cam_hw_soc_info *soc_info,
+	bool disable_clocks, bool disble_irq)
+{
+	int i, rc = 0;
+
+	if (!soc_info)
+		return -EINVAL;
+
+	if (disable_clocks)
+		cam_soc_util_clk_disable_default(soc_info);
+
+	for (i = soc_info->num_rgltr - 1; i >= 0; i--) {
+		rc |= regulator_disable(soc_info->rgltr[i]);
+		if (rc) {
+			pr_err("Regulator disble %s failed\n",
+				soc_info->rgltr_name[i]);
+			continue;
+		}
+	}
+
+	if (disble_irq)
+		rc |= cam_soc_util_irq_disable(soc_info);
+
+	return rc;
+}
+
+int cam_soc_util_reg_dump(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset, int size)
+{
+	void __iomem     *base_addr = NULL;
+
+	CDBG("base_idx %u size=%d\n", base_index, size);
+
+	if (!soc_info || base_index >= soc_info->num_reg_map ||
+		size <= 0 || (offset + size) >=
+		CAM_SOC_GET_REG_MAP_SIZE(soc_info, base_index))
+		return -EINVAL;
+
+	base_addr = CAM_SOC_GET_REG_MAP_START(soc_info, base_index);
+
+	/*
+	 * All error checking already done above,
+	 * hence ignoring the return value below.
+	 */
+	cam_io_dump(base_addr, offset, size);
+
+	return 0;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
new file mode 100644
index 0000000..0baa9e6
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
@@ -0,0 +1,386 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SOC_UTIL_H_
+#define _CAM_SOC_UTIL_H_
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include "cam_io_util.h"
+
+#define NO_SET_RATE  -1
+#define INIT_RATE    -2
+
+/* maximum number of device block */
+#define CAM_SOC_MAX_BLOCK           4
+
+/* maximum number of device base */
+#define CAM_SOC_MAX_BASE            CAM_SOC_MAX_BLOCK
+
+/* maximum number of device regulator */
+#define CAM_SOC_MAX_REGULATOR       4
+
+/* maximum number of device clock */
+#define CAM_SOC_MAX_CLK             32
+
+/**
+ * struct cam_soc_reg_map:   Information about the mapped register space
+ *
+ * @mem_base:               Starting location of MAPPED register space
+ * @mem_cam_base:           Starting offset of this register space compared
+ *                          to ENTIRE Camera register space
+ * @size:                   Size of register space
+ **/
+struct cam_soc_reg_map {
+	void __iomem                   *mem_base;
+	uint32_t                        mem_cam_base;
+	resource_size_t                 size;
+};
+
+/**
+ * struct cam_hw_soc_info:  Soc information pertaining to specific instance of
+ *                          Camera hardware driver module
+ *
+ * @pdev:                   Platform device pointer
+ * @hw_version;             Camera device version
+ * @index:                  Instance id for the camera device
+ * @irq_name:               Name of the irq associated with the device
+ * @irq_line:               Irq resource
+ * @num_mem_block:          Number of entry in the "reg-names"
+ * @mem_block_name:         Array of the reg block name
+ * @mem_block_cam_base:     Array of offset of this register space compared
+ *                          to ENTIRE Camera register space
+ * @mem_block:              Associated resource structs
+ * @reg_map:                Array of Mapped register info for the "reg-names"
+ * @num_reg_map:            Number of mapped register space associated
+ *                          with mem_block. num_reg_map = num_mem_block in
+ *                          most cases
+ * @num_rgltr:              Number of regulators
+ * @rgltr_name:             Array of regulator names
+ * @rgltr:                  Array of associated regulator resources
+ * @num_clk:                Number of clocks
+ * @clk_name:               Array of clock names
+ * @clk:                    Array of associated clock resources
+ * @clk_rate:               Array of default clock rates
+ * @src_clk_idx:            Source clock index that is rate-controllable
+ * @soc_private;            Soc private data
+ *
+ */
+struct cam_hw_soc_info {
+	struct platform_device         *pdev;
+	uint32_t                        hw_version;
+	uint32_t                        index;
+
+	const char                     *irq_name;
+	struct resource                *irq_line;
+
+	uint32_t                        num_mem_block;
+	const char                     *mem_block_name[CAM_SOC_MAX_BLOCK];
+	uint32_t                        mem_block_cam_base[CAM_SOC_MAX_BLOCK];
+	struct resource                *mem_block[CAM_SOC_MAX_BLOCK];
+	struct cam_soc_reg_map          reg_map[CAM_SOC_MAX_BASE];
+	uint32_t                        num_reg_map;
+
+	uint32_t                        num_rgltr;
+	const char                     *rgltr_name[CAM_SOC_MAX_REGULATOR];
+	struct regulator               *rgltr[CAM_SOC_MAX_REGULATOR];
+
+	uint32_t                        num_clk;
+	const char                     *clk_name[CAM_SOC_MAX_CLK];
+	struct clk                     *clk[CAM_SOC_MAX_CLK];
+	int32_t                         clk_rate[CAM_SOC_MAX_CLK];
+	int32_t                         src_clk_idx;
+
+	void                           *soc_private;
+};
+
+/*
+ * CAM_SOC_GET_REG_MAP_START
+ *
+ * @brief:              This MACRO will get the mapped starting address
+ *                      where the register space can be accessed
+ *
+ * @__soc_info:         Device soc information
+ * @__base_index:       Index of register space in the HW block
+ *
+ * @return:             Returns a pointer to the mapped register memory
+ */
+#define CAM_SOC_GET_REG_MAP_START(__soc_info, __base_index)          \
+	((!__soc_info || __base_index >= __soc_info->num_reg_map) ?  \
+		NULL : __soc_info->reg_map[__base_index].mem_base)
+
+/*
+ * CAM_SOC_GET_REG_MAP_CAM_BASE
+ *
+ * @brief:              This MACRO will get the cam_base of the
+ *                      register space
+ *
+ * @__soc_info:         Device soc information
+ * @__base_index:       Index of register space in the HW block
+ *
+ * @return:             Returns an int32_t value.
+ *                        Failure: -1
+ *                        Success: Starting offset of register space compared
+ *                                 to entire Camera Register Map
+ */
+#define CAM_SOC_GET_REG_MAP_CAM_BASE(__soc_info, __base_index)       \
+	((!__soc_info || __base_index >= __soc_info->num_reg_map) ?  \
+		-1 : __soc_info->reg_map[__base_index].mem_cam_base)
+
+/*
+ * CAM_SOC_GET_REG_MAP_SIZE
+ *
+ * @brief:              This MACRO will get the size of the mapped
+ *                      register space
+ *
+ * @__soc_info:         Device soc information
+ * @__base_index:       Index of register space in the HW block
+ *
+ * @return:             Returns a uint32_t value.
+ *                        Failure: 0
+ *                        Success: Non-zero size of mapped register space
+ */
+#define CAM_SOC_GET_REG_MAP_SIZE(__soc_info, __base_index)           \
+	((!__soc_info || __base_index >= __soc_info->num_reg_map) ?  \
+		0 : __soc_info->reg_map[__base_index].size)
+
+
+/**
+ * cam_soc_util_get_dt_properties()
+ *
+ * @brief:              Parse the DT and populate the common properties that
+ *                      are part of the soc_info structure - register map,
+ *                      clocks, regulators, irq, etc.
+ *
+ * @soc_info:           Device soc struct to be populated
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_get_dt_properties(struct cam_hw_soc_info *soc_info);
+
+
+/**
+ * cam_soc_util_request_platform_resource()
+ *
+ * @brief:              Request regulator, irq, and clock resources
+ *
+ * @soc_info:           Device soc information
+ * @handler:            Irq handler function pointer
+ * @irq_data:           Irq handler function CB data
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_request_platform_resource(struct cam_hw_soc_info *soc_info,
+	irq_handler_t handler, void *irq_data);
+
+/**
+ * cam_soc_util_release_platform_resource()
+ *
+ * @brief:              Release regulator, irq, and clock resources
+ *
+ * @soc_info:           Device soc information
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_release_platform_resource(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_soc_util_enable_platform_resource()
+ *
+ * @brief:              Enable regulator, irq resources
+ *
+ * @soc_info:           Device soc information
+ * @enable_clocks:      Boolean flag:
+ *                          TRUE: Enable all clocks in soc_info Now.
+ *                          False: Don't enable clocks Now. Driver will
+ *                                 enable independently.
+ @enable_irq:           Boolean flag:
+ *                          TRUE: Enable IRQ in soc_info Now.
+ *                          False: Don't enable IRQ Now. Driver will
+ *                                 enable independently.
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_enable_platform_resource(struct cam_hw_soc_info *soc_info,
+	bool enable_clocks, bool enable_irq);
+
+/**
+ * cam_soc_util_disable_platform_resource()
+ *
+ * @brief:              Disable regulator, irq resources
+ *
+ * @soc_info:           Device soc information
+ * @disable_irq:        Boolean flag:
+ *                          TRUE: Disable IRQ in soc_info Now.
+ *                          False: Don't disble IRQ Now. Driver will
+ *                                 disable independently.
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_disable_platform_resource(struct cam_hw_soc_info *soc_info,
+	bool disable_clocks, bool disable_irq);
+
+/**
+ * cam_soc_util_clk_enable()
+ *
+ * @brief:              Enable clock specified in params
+ *
+ * @clk:                Clock that needs to be turned ON
+ * @clk_name:           Clocks name associated with clk
+ * @clk_rate:           Clocks rate associated with clk
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_clk_enable(struct clk *clk, const char *clk_name,
+	int32_t clk_rate);
+
+/**
+ * cam_soc_util_clk_disable()
+ *
+ * @brief:              Disable clock specified in params
+ *
+ * @clk:                Clock that needs to be turned OFF
+ * @clk_name:           Clocks name associated with clk
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_clk_disable(struct clk *clk, const char *clk_name);
+
+/**
+ * cam_soc_util_irq_enable()
+ *
+ * @brief:              Enable IRQ in SOC
+ *
+ * @soc_info:           Device soc information
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_irq_enable(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_soc_util_irq_disable()
+ *
+ * @brief:              Disable IRQ in SOC
+ *
+ * @soc_info:           Device soc information
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_irq_disable(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_soc_util_w()
+ *
+ * @brief:              Camera SOC util for register write
+ *
+ * @soc_info:           Device soc information
+ * @base_index:         Index of register space in the HW block
+ * @offset:             Offset of register to be read
+ * @data:               Value to be written
+ *
+ * @return:             Success or Failure
+ */
+static inline int cam_soc_util_w(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset, uint32_t data)
+{
+	if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index))
+		return -EINVAL;
+	return cam_io_w(data,
+		CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset);
+}
+
+/**
+ * cam_soc_util_w_mb()
+ *
+ * @brief:              Camera SOC util for register write with memory barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call wmb() independently in the caller.
+ *
+ * @soc_info:           Device soc information
+ * @base_index:         Index of register space in the HW block
+ * @offset:             Offset of register to be read
+ * @data:               Value to be written
+ *
+ * @return:             Success or Failure
+ */
+static inline int cam_soc_util_w_mb(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset, uint32_t data)
+{
+	if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index))
+		return -EINVAL;
+	return cam_io_w_mb(data,
+		CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset);
+}
+
+/**
+ * cam_soc_util_r()
+ *
+ * @brief:              Camera SOC util for register read
+ *
+ * @soc_info:           Device soc information
+ * @base_index:         Index of register space in the HW block
+ * @offset:             Offset of register to be read
+ *
+ * @return:             Value read from the register address
+ */
+static inline uint32_t cam_soc_util_r(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset)
+{
+	if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index))
+		return 0;
+	return cam_io_r(
+		CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset);
+}
+
+/**
+ * cam_soc_util_r_mb()
+ *
+ * @brief:              Camera SOC util for register read with memory barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call rmb() independently in the caller.
+ *
+ * @soc_info:           Device soc information
+ * @base_index:         Index of register space in the HW block
+ * @offset:             Offset of register to be read
+ *
+ * @return:             Value read from the register address
+ */
+static inline uint32_t cam_soc_util_r_mb(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset)
+{
+	if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index))
+		return 0;
+	return cam_io_r_mb(
+		CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset);
+}
+
+/**
+ * cam_soc_util_reg_dump()
+ *
+ * @brief:              Camera SOC util for dumping a range of register
+ *
+ * @soc_info:           Device soc information
+ * @base_index:         Index of register space in the HW block
+ * @offset:             Start register offset for the dump
+ * @size:               Size specifying the range for dump
+ *
+ * @return:             Success or Failure
+ */
+int cam_soc_util_reg_dump(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset, int size);
+
+#endif /* _CAM_SOC_UTIL_H_ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index ef3846c..a0b53bb 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -96,12 +96,18 @@
  * @SDE_CAPS_R1_WB: MDSS V1.x WB block
  * @SDE_CAPS_R3_WB: MDSS V3.x WB block
  * @SDE_CAPS_R3_1P5_DOWNSCALE: 1.5x downscale rotator support
+ * @SDE_CAPS_MIN_BUS_VOTE: minimum bus vote prior to power enable
+ * @SDE_CAPS_SBUF_1: stream buffer support for inline rotation
+ * @SDE_CAPS_UBWC_2: universal bandwidth compression version 2
  */
 enum sde_caps_settings {
 	SDE_CAPS_R1_WB,
 	SDE_CAPS_R3_WB,
 	SDE_CAPS_R3_1P5_DOWNSCALE,
 	SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
+	SDE_CAPS_MIN_BUS_VOTE,
+	SDE_CAPS_SBUF_1,
+	SDE_CAPS_UBWC_2,
 	SDE_CAPS_MAX,
 };
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index ec511f8..9a28700 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -67,6 +67,8 @@
 #define ROT_OVERHEAD_NUMERATOR		27
 #define ROT_OVERHEAD_DENOMINATOR	10000
 
+/* default minimum bandwidth vote */
+#define ROT_ENABLE_BW_VOTE		64000
 /*
  * Max rotator hw blocks possible. Used for upper array limits instead of
  * alloc and freeing small array
@@ -96,6 +98,9 @@
 	.active_only = 1,
 };
 
+/* forward prototype */
+static int sde_rotator_update_perf(struct sde_rot_mgr *mgr);
+
 static int sde_rotator_bus_scale_set_quota(struct sde_rot_bus_data_type *bus,
 		u64 quota)
 {
@@ -107,7 +112,10 @@
 		return -EINVAL;
 	}
 
-	if (bus->bus_hdl < 1) {
+	if (!bus->bus_hdl) {
+		SDEROT_DBG("bus scaling not enabled\n");
+		return 0;
+	} else if (bus->bus_hdl < 0) {
 		SDEROT_ERR("invalid bus handle %d\n", bus->bus_hdl);
 		return -EINVAL;
 	}
@@ -292,6 +300,7 @@
 
 static void sde_rotator_footswitch_ctrl(struct sde_rot_mgr *mgr, bool on)
 {
+	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
 	int ret;
 
 	if (WARN_ON(mgr->regulator_enable == on)) {
@@ -302,6 +311,11 @@
 	SDEROT_EVTLOG(on);
 	SDEROT_DBG("%s: rotator regulators\n", on ? "Enable" : "Disable");
 
+	if (test_bit(SDE_CAPS_MIN_BUS_VOTE, mdata->sde_caps_map) && on) {
+		mgr->minimum_bw_vote = mgr->enable_bw_vote;
+		sde_rotator_update_perf(mgr);
+	}
+
 	if (mgr->ops_hw_pre_pmevent)
 		mgr->ops_hw_pre_pmevent(mgr, on);
 
@@ -316,6 +330,11 @@
 	if (mgr->ops_hw_post_pmevent)
 		mgr->ops_hw_post_pmevent(mgr, on);
 
+	if (test_bit(SDE_CAPS_MIN_BUS_VOTE, mdata->sde_caps_map) && !on) {
+		mgr->minimum_bw_vote = 0;
+		sde_rotator_update_perf(mgr);
+	}
+
 	mgr->regulator_enable = on;
 }
 
@@ -532,6 +551,10 @@
 	if (!input)
 		dir = DMA_FROM_DEVICE;
 
+	data->sbuf = buffer->sbuf;
+	data->scid = buffer->scid;
+	data->writeback = buffer->writeback;
+
 	memset(planes, 0, sizeof(planes));
 
 	for (i = 0; i < buffer->plane_count; i++) {
@@ -539,6 +562,8 @@
 		planes[i].offset = buffer->planes[i].offset;
 		planes[i].buffer = buffer->planes[i].buffer;
 		planes[i].handle = buffer->planes[i].handle;
+		planes[i].addr = buffer->planes[i].addr;
+		planes[i].len = buffer->planes[i].len;
 	}
 
 	ret =  sde_mdp_data_get_and_validate_size(data, planes,
@@ -760,6 +785,9 @@
 	if (entry->item.flags & SDE_ROTATION_EXT_DMA_BUF)
 		flag |= SDE_ROT_EXT_DMA_BUF;
 
+	if (entry->item.flags & SDE_ROTATION_EXT_IOVA)
+		flag |= SDE_ROT_EXT_IOVA;
+
 	if (entry->item.flags & SDE_ROTATION_SECURE_CAMERA)
 		flag |= SDE_SECURE_CAMERA_SESSION;
 
@@ -800,6 +828,10 @@
 			entry->perf->wrot_limit != mgr->wrot_limit))
 		return true;
 
+	/* sbuf mode is exclusive and may impact queued entries */
+	if (!mgr->sbuf_ctx && entry->perf && entry->perf->config.output.sbuf)
+		return true;
+
 	return false;
 }
 
@@ -855,6 +887,9 @@
 				entry->item.session_id,
 				entry->item.sequence_id);
 		return sde_rotator_is_hw_idle(mgr, hw);
+	} else if (mgr->sbuf_ctx && mgr->sbuf_ctx != entry->private) {
+		SDEROT_DBG("wait until sbuf mode is off\n");
+		return false;
 	} else {
 		return (atomic_read(&hw->num_active) < hw->max_active);
 	}
@@ -907,6 +942,14 @@
 			entry->item.session_id, entry->item.sequence_id);
 	mgr->rdot_limit = entry->perf->rdot_limit;
 	mgr->wrot_limit = entry->perf->wrot_limit;
+
+	if (!mgr->sbuf_ctx && entry->perf->config.output.sbuf) {
+		SDEROT_DBG("acquire sbuf s:%d.%d\n", entry->item.session_id,
+				entry->item.sequence_id);
+		SDEROT_EVTLOG(entry->item.session_id, entry->item.sequence_id);
+		mgr->sbuf_ctx = entry->private;
+	}
+
 	return hw;
 }
 
@@ -1233,8 +1276,12 @@
 				(mgr->overhead.denom - max_fps *
 				mgr->overhead.numer));
 
+	/* use client provided clock if specified */
+	if (config->flags & SDE_ROTATION_EXT_PERF)
+		perf->clk_rate = config->clk_rate;
+
 	/*
-	 * check for Override clock calcualtion
+	 * check for Override clock calculation
 	 */
 	if (rot_dev->min_rot_clk > perf->clk_rate)
 		perf->clk_rate = rot_dev->min_rot_clk;
@@ -1258,6 +1305,10 @@
 	if (rot_dev->min_bw > perf->bw)
 		perf->bw = rot_dev->min_bw;
 
+	/* use client provided bandwidth if specified */
+	if (config->flags & SDE_ROTATION_EXT_PERF)
+		perf->bw = config->data_bw;
+
 	perf->rdot_limit = sde_mdp_get_ot_limit(
 			config->input.width, config->input.height,
 			config->input.format, config->frame_rate, true);
@@ -1291,6 +1342,7 @@
 	}
 
 	total_bw += mgr->pending_close_bw_vote;
+	total_bw = max_t(u64, total_bw, mgr->minimum_bw_vote);
 	sde_rotator_enable_reg_bus(mgr, total_bw);
 	ATRACE_INT("bus_quota", total_bw);
 	sde_rotator_bus_scale_set_quota(&mgr->data_bus, total_bw);
@@ -1560,7 +1612,11 @@
 	if ((in_fmt->is_yuv != out_fmt->is_yuv) ||
 		(in_fmt->pixel_mode != out_fmt->pixel_mode) ||
 		(in_fmt->unpack_tight != out_fmt->unpack_tight)) {
-		SDEROT_ERR("Rotator does not support CSC\n");
+		SDEROT_ERR(
+			"Rotator does not support CSC yuv:%d/%d pm:%d/%d ut:%d/%d\n",
+			in_fmt->is_yuv, out_fmt->is_yuv,
+			in_fmt->pixel_mode, out_fmt->pixel_mode,
+			in_fmt->unpack_tight, out_fmt->unpack_tight);
 		goto verify_error;
 	}
 
@@ -1951,7 +2007,7 @@
 	devm_kfree(&mgr->pdev->dev, req);
 }
 
-static void sde_rotator_cancel_all_requests(struct sde_rot_mgr *mgr,
+void sde_rotator_cancel_all_requests(struct sde_rot_mgr *mgr,
 	struct sde_rot_file_private *private)
 {
 	struct sde_rot_entry_container *req, *req_next;
@@ -2029,6 +2085,34 @@
 	return ret;
 }
 
+/*
+ * sde_rotator_commit_request - commit the request to hardware
+ * @mgr: pointer to rotator manager
+ * @private: pointer to per file context
+ * @req: pointer to rotation request
+ *
+ * This differs from sde_rotator_queue_request in that this
+ * function will wait until request is committed to hardware.
+ */
+void sde_rotator_commit_request(struct sde_rot_mgr *mgr,
+	struct sde_rot_file_private *ctx,
+	struct sde_rot_entry_container *req)
+{
+	int i;
+
+	if (!mgr || !ctx || !req || !req->entries) {
+		SDEROT_ERR("null parameters\n");
+		return;
+	}
+
+	sde_rotator_queue_request(mgr, ctx, req);
+
+	sde_rot_mgr_unlock(mgr);
+	for (i = 0; i < req->count; i++)
+		flush_work(&req->entries[i].commit_work);
+	sde_rot_mgr_lock(mgr);
+}
+
 static int sde_rotator_open_session(struct sde_rot_mgr *mgr,
 	struct sde_rot_file_private *private, u32 session_id)
 {
@@ -2139,6 +2223,12 @@
 	sde_rotator_update_clk(mgr);
 	sde_rotator_resource_ctrl(mgr, false);
 done:
+	if (mgr->sbuf_ctx == private) {
+		SDEROT_DBG("release sbuf session id:%u\n", id);
+		SDEROT_EVTLOG(id);
+		mgr->sbuf_ctx = NULL;
+	}
+
 	SDEROT_DBG("Closed session id:%u\n", id);
 	return 0;
 }
@@ -2183,6 +2273,11 @@
 		goto done;
 	}
 
+	if (config->output.sbuf && mgr->sbuf_ctx != private && mgr->sbuf_ctx) {
+		SDEROT_ERR("too many sbuf sessions\n");
+		goto done;
+	}
+
 	SDEROT_DBG(
 		"reconfig session id=%u in{%u,%u}f:%u out{%u,%u}f:%u fps:%d clk:%lu, bw:%llu\n",
 		config->session_id, config->input.width, config->input.height,
@@ -2409,8 +2504,7 @@
 	mgr->data_bus.bus_scale_pdata = msm_bus_cl_get_pdata(dev);
 	if (IS_ERR_OR_NULL(mgr->data_bus.bus_scale_pdata)) {
 		ret = PTR_ERR(mgr->data_bus.bus_scale_pdata);
-		if (!ret) {
-			ret = -EINVAL;
+		if (ret) {
 			SDEROT_ERR("msm_bus_cl_get_pdata failed. ret=%d\n",
 					ret);
 			mgr->data_bus.bus_scale_pdata = NULL;
@@ -2546,8 +2640,8 @@
 static int sde_rotator_bus_scale_register(struct sde_rot_mgr *mgr)
 {
 	if (!mgr->data_bus.bus_scale_pdata) {
-		SDEROT_ERR("Scale table is NULL\n");
-		return -EINVAL;
+		SDEROT_DBG("Bus scaling is not enabled\n");
+		return 0;
 	}
 
 	mgr->data_bus.bus_hdl =
@@ -2719,6 +2813,7 @@
 	mgr->pdev = pdev;
 	mgr->device = &pdev->dev;
 	mgr->pending_close_bw_vote = 0;
+	mgr->enable_bw_vote = ROT_ENABLE_BW_VOTE;
 	mgr->hwacquire_timeout = ROT_HW_ACQUIRE_TIMEOUT_IN_MS;
 	mgr->queue_count = 1;
 	mgr->pixel_per_clk.numer = ROT_PIXEL_PER_CLK_NUMERATOR;
@@ -2938,6 +3033,7 @@
 	sde_rot_mgr_lock(mgr);
 	atomic_inc(&mgr->device_suspended);
 	sde_rotator_suspend_cancel_rot_work(mgr);
+	mgr->minimum_bw_vote = 0;
 	sde_rotator_update_perf(mgr);
 	ATRACE_END("pm_active");
 	SDEROT_DBG("end pm active %d\n", atomic_read(&mgr->device_suspended));
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
index fd77d78..819f57b 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
@@ -62,6 +62,12 @@
 /* secure camera operation*/
 #define SDE_ROTATION_SECURE_CAMERA	0x40000
 
+/* use client mapped i/o virtual address */
+#define SDE_ROTATION_EXT_IOVA		0x80000
+
+/* use client provided clock/bandwidth parameters */
+#define SDE_ROTATION_EXT_PERF		0x100000
+
 /**********************************************************************
  * configuration structures
  **********************************************************************/
@@ -72,12 +78,14 @@
  * @height: height of buffer region to be processed
  * @format: pixel format of buffer
  * @comp_ratio: compression ratio for the session
+ * @sbuf: true if buffer is streaming buffer
  */
 struct sde_rotation_buf_info {
 	uint32_t width;
 	uint32_t height;
 	uint32_t format;
 	struct sde_mult_factor comp_ratio;
+	bool sbuf;
 };
 
 /*
@@ -86,6 +94,8 @@
  * @input: input buffer information
  * @output: output buffer information
  * @frame_rate: session frame rate in fps
+ * @clk_rate: requested rotator clock rate if SDE_ROTATION_EXT_PERF is set
+ * @data_bw: requested data bus bandwidth if SDE_ROTATION_EXT_PERF is set
  * @flags: configuration flags, e.g. rotation angle, flip, etc...
  */
 struct sde_rotation_config {
@@ -93,6 +103,8 @@
 	struct sde_rotation_buf_info	input;
 	struct sde_rotation_buf_info	output;
 	uint32_t	frame_rate;
+	uint64_t	clk_rate;
+	uint64_t	data_bw;
 	uint32_t	flags;
 };
 
@@ -121,10 +133,22 @@
 	SDE_ROTATOR_CLK_MAX
 };
 
+enum sde_rotator_trigger {
+	SDE_ROTATOR_TRIGGER_IMMEDIATE,
+	SDE_ROTATOR_TRIGGER_VIDEO,
+	SDE_ROTATOR_TRIGGER_COMMAND,
+};
+
 struct sde_rotation_item {
 	/* rotation request flag */
 	uint32_t	flags;
 
+	/* rotation trigger mode */
+	uint32_t	trigger;
+
+	/* prefill bandwidth in Bps */
+	uint64_t	prefill_bw;
+
 	/* Source crop rectangle */
 	struct sde_rect	src_rect;
 
@@ -233,6 +257,26 @@
 struct sde_rot_mgr;
 struct sde_rot_file_private;
 
+/*
+ * struct sde_rot_entry - rotation entry
+ * @item: rotation item
+ * @commit_work: work descriptor for commit handler
+ * @done_work: work descriptor for done handler
+ * @commitq: pointer to commit handler rotator queue
+ * @fenceq: pointer to fence signaling rotator queue
+ * @doneq: pointer to done handler rotator queue
+ * @request: pointer to containing request
+ * @src_buf: descriptor of source buffer
+ * @dst_buf: descriptor of destination buffer
+ * @input_fence: pointer to input fence for when input content is available
+ * @output_fence: pointer to output fence for when output content is available
+ * @output_signaled: true if output fence of this entry has been signaled
+ * @dnsc_factor_w: calculated width downscale factor for this entry
+ * @dnsc_factor_w: calculated height downscale factor for this entry
+ * @perf: pointer to performance configuration associated with this entry
+ * @work_assigned: true if this item is assigned to h/w queue/unit
+ * @private: pointer to controlling session context
+ */
 struct sde_rot_entry {
 	struct sde_rotation_item item;
 	struct work_struct commit_work;
@@ -258,6 +302,18 @@
 	struct sde_rot_file_private *private;
 };
 
+/*
+ * struct sde_rot_perf - rotator session performance configuration
+ * @list: list of performance configuration under one session
+ * @config: current rotation configuration
+ * @clk_rate: current clock rate in Hz
+ * @bw: current bandwidth in byte per second
+ * @work_dis_lock: serialization lock for updating work distribution (not used)
+ * @work_distribution: work distribution among multiple hardware queue/unit
+ * @last_wb_idx: last queue/unit index, used to account for pre-distributed work
+ * @rdot_limit: read OT limit of this session
+ * @wrot_limit: write OT limit of this session
+ */
 struct sde_rot_perf {
 	struct list_head list;
 	struct sde_rotation_config config;
@@ -270,6 +326,14 @@
 	u32 wrot_limit;
 };
 
+/*
+ * struct sde_rot_file_private - rotator manager per session context
+ * @list: list of all session context
+ * @req_list: list of rotation request for this session
+ * @perf_list: list of performance configuration for this session (only one)
+ * @mgr: pointer to the controlling rotator manager
+ * @fenceq: pointer to rotator queue to signal when entry is done
+ */
 struct sde_rot_file_private {
 	struct list_head list;
 	struct list_head req_list;
@@ -278,6 +342,13 @@
 	struct sde_rot_queue *fenceq;
 };
 
+/*
+ * struct sde_rot_bus_data_type - rotator bus scaling configuration
+ * @bus_cale_pdata: pointer to bus scaling configuration table
+ * @bus_hdl: msm bus scaling handle
+ * @curr_bw_uc_idx; current usecase index into configuration table
+ * @curr_quota_val: current bandwidth request in byte per second
+ */
 struct sde_rot_bus_data_type {
 	struct msm_bus_scale_pdata *bus_scale_pdata;
 	u32 bus_hdl;
@@ -285,6 +356,37 @@
 	u64 curr_quota_val;
 };
 
+/*
+ * struct sde_rot_mgr - core rotator manager
+ * @lock: serialization lock to rotator manager functions
+ * @device_suspended: 0 if device is not suspended; non-zero suspended
+ * @pdev: pointer to controlling platform device
+ * @device: pointer to controlling device
+ * @queue_count: number of hardware queue/unit available
+ * @commitq: array of rotator commit queue corresponding to hardware queue
+ * @doneq: array of rotator done queue corresponding to hardware queue
+ * @file_list: list of all sessions managed by rotator manager
+ * @pending_close_bw_vote: bandwidth of closed sessions with pending work
+ * @minimum_bw_vote: minimum bandwidth required for current use case
+ * @enable_bw_vote: minimum bandwidth required for power enable
+ * @data_bus: data bus configuration state
+ * @reg_bus: register bus configuration state
+ * @module_power: power/clock configuration state
+ * @regulator_enable: true if foot switch is enabled; false otherwise
+ * @res_ref_cnt: reference count of how many times resource is requested
+ * @rot_enable_clk_cnt: reference count of how many times clock is requested
+ * @rot_clk: array of rotator and periphery clocks
+ * @num_rot_clk: size of the rotator clock array
+ * @rdot_limit: current read OT limit
+ * @wrot_limit: current write OT limit
+ * @hwacquire_timeout: maximum wait time for hardware availability in msec
+ * @pixel_per_clk: rotator hardware performance in pixel for clock
+ * @fudge_factor: fudge factor for clock calculation
+ * @overhead: software overhead for offline rotation in msec
+ * @sbuf_ctx: pointer to sbuf session context
+ * @ops_xxx: function pointers of rotator HAL layer
+ * @hw_data: private handle of rotator HAL layer
+ */
 struct sde_rot_mgr {
 	struct mutex lock;
 	atomic_t device_suspended;
@@ -306,6 +408,8 @@
 	struct list_head file_list;
 
 	u64 pending_close_bw_vote;
+	u64 minimum_bw_vote;
+	u64 enable_bw_vote;
 	struct sde_rot_bus_data_type data_bus;
 	struct sde_rot_bus_data_type reg_bus;
 
@@ -325,6 +429,8 @@
 	struct sde_mult_factor fudge_factor;
 	struct sde_mult_factor overhead;
 
+	struct sde_rot_file_private *sbuf_ctx;
+
 	int (*ops_config_hw)(struct sde_rot_hw_resource *hw,
 			struct sde_rot_entry *entry);
 	int (*ops_kickoff_entry)(struct sde_rot_hw_resource *hw,
@@ -351,6 +457,9 @@
 			bool input);
 	int (*ops_hw_is_valid_pixfmt)(struct sde_rot_mgr *mgr, u32 pixfmt,
 			bool input);
+	int (*ops_hw_get_downscale_caps)(struct sde_rot_mgr *mgr, char *caps,
+			int len);
+	int (*ops_hw_get_maxlinewidth)(struct sde_rot_mgr *mgr);
 
 	void *hw_data;
 };
@@ -373,6 +482,23 @@
 	return 0;
 }
 
+static inline int sde_rotator_get_downscale_caps(struct sde_rot_mgr *mgr,
+		char *caps, int len)
+{
+	if (mgr && mgr->ops_hw_get_downscale_caps)
+		return mgr->ops_hw_get_downscale_caps(mgr, caps, len);
+
+	return 0;
+}
+
+static inline int sde_rotator_get_maxlinewidth(struct sde_rot_mgr *mgr)
+{
+	if (mgr && mgr->ops_hw_get_maxlinewidth)
+		return mgr->ops_hw_get_maxlinewidth(mgr);
+
+	return 2048;
+}
+
 static inline int __compare_session_item_rect(
 	struct sde_rotation_buf_info *s_rect,
 	struct sde_rect *i_rect, uint32_t i_fmt, bool src)
@@ -510,6 +636,18 @@
 	struct sde_rot_entry_container *req);
 
 /*
+ * sde_rotator_commit_request - queue/schedule the given request and wait
+ *	until h/w commit
+ * @rot_dev: Pointer to rotator device
+ * @private: Pointer to rotator manager per file context
+ * @req: Pointer to rotation request
+ * return: 0 if success; error code otherwise
+ */
+void sde_rotator_commit_request(struct sde_rot_mgr *mgr,
+	struct sde_rot_file_private *ctx,
+	struct sde_rot_entry_container *req);
+
+/*
  * sde_rotator_verify_config_all - verify given rotation configuration
  * @rot_dev: Pointer to rotator device
  * @config: Pointer to rotator configuration
@@ -557,6 +695,14 @@
 int sde_rotator_clk_ctrl(struct sde_rot_mgr *mgr, int enable);
 
 /*
+ * sde_rotator_cancel_all_requests - cancel all outstanding requests
+ * @mgr: Pointer to rotator manager
+ * @private: Pointer to rotator manager per file context
+ */
+void sde_rotator_cancel_all_requests(struct sde_rot_mgr *mgr,
+	struct sde_rot_file_private *private);
+
+/*
  * sde_rot_mgr_lock - serialization lock prior to rotator manager calls
  * @mgr: Pointer to rotator manager
  */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index a41c450..e56c70a 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -868,6 +868,12 @@
 		return -EINVAL;
 	}
 
+	if (!debugfs_create_u64("enable_bw_vote", 0644,
+			debugfs_root, &mgr->enable_bw_vote)) {
+		SDEROT_WARN("failed to create enable_bw_vote\n");
+		return -EINVAL;
+	}
+
 	if (mgr->ops_hw_create_debugfs) {
 		ret = mgr->ops_hw_create_debugfs(mgr, debugfs_root);
 		if (ret)
@@ -990,11 +996,14 @@
 {
 	struct sde_rotator_debug_base *dbg = file->private_data;
 
-	if (dbg && dbg->buf) {
+	if (dbg) {
+		mutex_lock(&dbg->buflock);
 		kfree(dbg->buf);
 		dbg->buf_len = 0;
 		dbg->buf = NULL;
+		mutex_unlock(&dbg->buflock);
 	}
+
 	return 0;
 }
 
@@ -1026,8 +1035,10 @@
 	if (cnt > (dbg->max_offset - off))
 		cnt = dbg->max_offset - off;
 
+	mutex_lock(&dbg->buflock);
 	dbg->off = off;
 	dbg->cnt = cnt;
+	mutex_unlock(&dbg->buflock);
 
 	SDEROT_DBG("offset=%x cnt=%x\n", off, cnt);
 
@@ -1047,7 +1058,10 @@
 	if (*ppos)
 		return 0;	/* the end */
 
+	mutex_lock(&dbg->buflock);
 	len = snprintf(buf, sizeof(buf), "0x%08zx %zx\n", dbg->off, dbg->cnt);
+	mutex_unlock(&dbg->buflock);
+
 	if (len < 0 || len >= sizeof(buf))
 		return 0;
 
@@ -1086,6 +1100,8 @@
 	if (off >= dbg->max_offset)
 		return -EFAULT;
 
+	mutex_lock(&dbg->buflock);
+
 	/* Enable Clock for register access */
 	sde_rotator_clk_ctrl(dbg->mgr, true);
 
@@ -1094,6 +1110,8 @@
 	/* Disable Clock after register access */
 	sde_rotator_clk_ctrl(dbg->mgr, false);
 
+	mutex_unlock(&dbg->buflock);
+
 	SDEROT_DBG("addr=%zx data=%x\n", off, data);
 
 	return count;
@@ -1104,12 +1122,14 @@
 {
 	struct sde_rotator_debug_base *dbg = file->private_data;
 	size_t len;
+	int rc = 0;
 
 	if (!dbg) {
 		SDEROT_ERR("invalid handle\n");
 		return -ENODEV;
 	}
 
+	mutex_lock(&dbg->buflock);
 	if (!dbg->buf) {
 		char dump_buf[64];
 		char *ptr;
@@ -1121,7 +1141,8 @@
 
 		if (!dbg->buf) {
 			SDEROT_ERR("not enough memory to hold reg dump\n");
-			return -ENOMEM;
+			rc = -ENOMEM;
+			goto debug_read_error;
 		}
 
 		ptr = dbg->base + dbg->off;
@@ -1151,18 +1172,26 @@
 		dbg->buf_len = tot;
 	}
 
-	if (*ppos >= dbg->buf_len)
-		return 0; /* done reading */
+	if (*ppos >= dbg->buf_len) {
+		rc = 0; /* done reading */
+		goto debug_read_error;
+	}
 
 	len = min(count, dbg->buf_len - (size_t) *ppos);
 	if (copy_to_user(user_buf, dbg->buf + *ppos, len)) {
 		SDEROT_ERR("failed to copy to user\n");
-		return -EFAULT;
+		rc = -EFAULT;
+		goto debug_read_error;
 	}
 
 	*ppos += len; /* increase offset */
 
+	mutex_unlock(&dbg->buflock);
 	return len;
+
+debug_read_error:
+	mutex_unlock(&dbg->buflock);
+	return rc;
 }
 
 static const struct file_operations sde_rotator_off_fops = {
@@ -1196,6 +1225,9 @@
 	if (!dbg)
 		return -ENOMEM;
 
+	mutex_init(&dbg->buflock);
+	mutex_lock(&dbg->buflock);
+
 	if (name)
 		strlcpy(dbg->name, name, sizeof(dbg->name));
 	dbg->base = io_data->base;
@@ -1217,6 +1249,7 @@
 			dbg->base += rot_dev->mdata->regdump ?
 				rot_dev->mdata->regdump[0].offset : 0;
 	}
+	mutex_unlock(&dbg->buflock);
 
 	strlcpy(dbgname + prefix_len, "off", sizeof(dbgname) - prefix_len);
 	ent_off = debugfs_create_file(dbgname, 0644, debugfs_root, dbg,
@@ -1234,7 +1267,9 @@
 		goto reg_fail;
 	}
 
+	mutex_lock(&dbg->buflock);
 	dbg->mgr = rot_dev->mgr;
+	mutex_unlock(&dbg->buflock);
 
 	return 0;
 reg_fail:
@@ -1283,6 +1318,13 @@
 		return NULL;
 	}
 
+	if (!debugfs_create_u32("disable_syscache", 0644,
+			debugfs_root, &rot_dev->disable_syscache)) {
+		SDEROT_ERR("fail create disable_syscache\n");
+		debugfs_remove_recursive(debugfs_root);
+		return NULL;
+	}
+
 	if (!debugfs_create_u32("streamoff_timeout", 0644,
 			debugfs_root, &rot_dev->streamoff_timeout)) {
 		SDEROT_ERR("fail create streamoff_timeout\n");
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
index c2c6f97..c6d0151 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -53,6 +53,7 @@
 	char *buf;
 	size_t buf_len;
 	struct sde_rot_mgr *mgr;
+	struct mutex buflock;
 };
 
 #if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 89171b7..1c94632 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -27,6 +27,7 @@
 #include <media/videobuf2-v4l2.h>
 #include <media/v4l2-mem2mem.h>
 
+#include "sde_rotator_inline.h"
 #include "sde_rotator_base.h"
 #include "sde_rotator_core.h"
 #include "sde_rotator_dev.h"
@@ -449,11 +450,15 @@
 			list_empty(&ctx->pending_list),
 			msecs_to_jiffies(rot_dev->streamoff_timeout));
 	mutex_lock(q->lock);
-	if (!ret)
+	if (!ret) {
 		SDEDEV_ERR(rot_dev->dev,
 				"timeout to stream off s:%d t:%d p:%d\n",
 				ctx->session_id, q->type,
 				!list_empty(&ctx->pending_list));
+		sde_rot_mgr_lock(rot_dev->mgr);
+		sde_rotator_cancel_all_requests(rot_dev->mgr, ctx->private);
+		sde_rot_mgr_unlock(rot_dev->mgr);
+	}
 
 	sde_rotator_return_all_buffers(q, VB2_BUF_STATE_ERROR);
 
@@ -845,24 +850,26 @@
 }
 
 /*
- * sde_rotator_open - Rotator device open method.
- * @file: Pointer to file struct.
+ * sde_rotator_ctx_open - Rotator device open method.
+ * @rot_dev: Pointer to rotator device structure
+ * @file: Pointer to file struct (optional)
+ * return: Pointer rotator context if success; ptr error code, otherwise.
  */
-static int sde_rotator_open(struct file *file)
+struct sde_rotator_ctx *sde_rotator_ctx_open(
+		struct sde_rotator_device *rot_dev, struct file *file)
 {
-	struct sde_rotator_device *rot_dev = video_drvdata(file);
-	struct video_device *video = video_devdata(file);
+	struct video_device *video = file ? video_devdata(file) : NULL;
 	struct sde_rotator_ctx *ctx;
 	struct v4l2_ctrl_handler *ctrl_handler;
 	char name[32];
 	int i, ret;
 
 	if (atomic_read(&rot_dev->mgr->device_suspended))
-		return -EPERM;
+		return ERR_PTR(-EPERM);
 
 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
 	if (mutex_lock_interruptible(&rot_dev->lock)) {
 		ret = -ERESTARTSYS;
@@ -870,6 +877,7 @@
 	}
 
 	ctx->rot_dev = rot_dev;
+	ctx->file = file;
 
 	/* Set context defaults */
 	ctx->session_id = rot_dev->session_id++;
@@ -910,15 +918,17 @@
 		list_add_tail(&request->list, &ctx->retired_list);
 	}
 
-	v4l2_fh_init(&ctx->fh, video);
-	file->private_data = &ctx->fh;
-	v4l2_fh_add(&ctx->fh);
+	if (ctx->file) {
+		v4l2_fh_init(&ctx->fh, video);
+		file->private_data = &ctx->fh;
+		v4l2_fh_add(&ctx->fh);
 
-	ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(rot_dev->m2m_dev,
-		ctx, sde_rotator_queue_init);
-	if (IS_ERR_OR_NULL(ctx->fh.m2m_ctx)) {
-		ret = PTR_ERR(ctx->fh.m2m_ctx);
-		goto error_m2m_init;
+		ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(rot_dev->m2m_dev,
+			ctx, sde_rotator_queue_init);
+		if (IS_ERR_OR_NULL(ctx->fh.m2m_ctx)) {
+			ret = PTR_ERR(ctx->fh.m2m_ctx);
+			goto error_m2m_init;
+		}
 	}
 
 	ret = kobject_init_and_add(&ctx->kobj, &sde_rotator_fs_ktype,
@@ -963,33 +973,34 @@
 	sde_rot_mgr_unlock(rot_dev->mgr);
 
 	/* Create control */
-	ctrl_handler = &ctx->ctrl_handler;
-	v4l2_ctrl_handler_init(ctrl_handler, 4);
-	v4l2_ctrl_new_std(ctrl_handler,
+	if (ctx->file) {
+		ctrl_handler = &ctx->ctrl_handler;
+		v4l2_ctrl_handler_init(ctrl_handler, 4);
+		v4l2_ctrl_new_std(ctrl_handler,
 			&sde_rotator_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
-	v4l2_ctrl_new_std(ctrl_handler,
+		v4l2_ctrl_new_std(ctrl_handler,
 			&sde_rotator_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
-	v4l2_ctrl_new_std(ctrl_handler,
+		v4l2_ctrl_new_std(ctrl_handler,
 			&sde_rotator_ctrl_ops, V4L2_CID_ROTATE, 0, 270, 90, 0);
-	v4l2_ctrl_new_custom(ctrl_handler,
+		v4l2_ctrl_new_custom(ctrl_handler,
 			&sde_rotator_ctrl_secure, NULL);
-	v4l2_ctrl_new_custom(ctrl_handler,
+		v4l2_ctrl_new_custom(ctrl_handler,
 			&sde_rotator_ctrl_secure_camera, NULL);
-	if (ctrl_handler->error) {
-		ret = ctrl_handler->error;
-		v4l2_ctrl_handler_free(ctrl_handler);
-		goto error_ctrl_handler;
+		if (ctrl_handler->error) {
+			ret = ctrl_handler->error;
+			v4l2_ctrl_handler_free(ctrl_handler);
+			goto error_ctrl_handler;
+		}
+		ctx->fh.ctrl_handler = ctrl_handler;
+		v4l2_ctrl_handler_setup(ctrl_handler);
 	}
-	ctx->fh.ctrl_handler = ctrl_handler;
-	v4l2_ctrl_handler_setup(ctrl_handler);
-
 	mutex_unlock(&rot_dev->lock);
 
 	SDEDEV_DBG(ctx->rot_dev->dev, "SDE v4l2 rotator open success\n");
 
 	ATRACE_BEGIN(ctx->kobj.name);
 
-	return 0;
+	return ctx;
 error_ctrl_handler:
 error_open_session:
 	sde_rot_mgr_unlock(rot_dev->mgr);
@@ -1001,23 +1012,26 @@
 	kobject_put(&ctx->kobj);
 error_kobj_init:
 error_m2m_init:
-	v4l2_fh_del(&ctx->fh);
-	v4l2_fh_exit(&ctx->fh);
+	if (ctx->file) {
+		v4l2_fh_del(&ctx->fh);
+		v4l2_fh_exit(&ctx->fh);
+	}
 	mutex_unlock(&rot_dev->lock);
 error_lock:
 	kfree(ctx);
-	return ret;
+	return ERR_PTR(ret);
 }
 
 /*
- * sde_rotator_release - Rotator device release method.
- * @file: Pointer to file struct.
+ * sde_rotator_ctx_release - Rotator device release method.
+ * @ctx: Pointer rotator context.
+ * @file: Pointer to file struct (optional)
+ * return: 0 if success; error code, otherwise
  */
-static int sde_rotator_release(struct file *file)
+static int sde_rotator_ctx_release(struct sde_rotator_ctx *ctx,
+		struct file *file)
 {
-	struct sde_rotator_device *rot_dev = video_drvdata(file);
-	struct sde_rotator_ctx *ctx =
-			sde_rotator_ctx_from_fh(file->private_data);
+	struct sde_rotator_device *rot_dev = ctx->rot_dev;
 	u32 session_id = ctx->session_id;
 	struct list_head *curr, *next;
 
@@ -1025,10 +1039,14 @@
 
 	SDEDEV_DBG(rot_dev->dev, "release s:%d\n", session_id);
 	mutex_lock(&rot_dev->lock);
-	v4l2_ctrl_handler_free(&ctx->ctrl_handler);
-	SDEDEV_DBG(rot_dev->dev, "release streams s:%d\n", session_id);
-	v4l2_m2m_streamoff(file, ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
-	v4l2_m2m_streamoff(file, ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+	if (ctx->file) {
+		v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+		SDEDEV_DBG(rot_dev->dev, "release streams s:%d\n", session_id);
+		v4l2_m2m_streamoff(file, ctx->fh.m2m_ctx,
+				V4L2_BUF_TYPE_VIDEO_OUTPUT);
+		v4l2_m2m_streamoff(file, ctx->fh.m2m_ctx,
+				V4L2_BUF_TYPE_VIDEO_CAPTURE);
+	}
 	mutex_unlock(&rot_dev->lock);
 	SDEDEV_DBG(rot_dev->dev, "release submit work s:%d\n", session_id);
 	list_for_each_safe(curr, next, &ctx->pending_list) {
@@ -1058,9 +1076,11 @@
 	destroy_workqueue(ctx->work_queue.rot_work_queue);
 	sysfs_remove_group(&ctx->kobj, &sde_rotator_fs_attr_group);
 	kobject_put(&ctx->kobj);
-	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
-	v4l2_fh_del(&ctx->fh);
-	v4l2_fh_exit(&ctx->fh);
+	if (ctx->file) {
+		v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+		v4l2_fh_del(&ctx->fh);
+		v4l2_fh_exit(&ctx->fh);
+	}
 	kfree(ctx->vbinfo_out);
 	kfree(ctx->vbinfo_cap);
 	kfree(ctx);
@@ -1115,6 +1135,7 @@
 	ctx = request->ctx;
 
 	request->req = NULL;
+	request->committed = false;
 	spin_lock(&ctx->list_lock);
 	list_del_init(&request->list);
 	list_add_tail(&request->list, &ctx->retired_list);
@@ -1125,6 +1146,566 @@
 }
 
 /*
+ * sde_rotator_is_request_retired - Return true if given request already expired
+ * @request: Pointer to rotator request
+ */
+static bool sde_rotator_is_request_retired(struct sde_rotator_request *request)
+{
+	struct sde_rotator_ctx *ctx;
+	struct sde_rot_entry_container *req;
+	u32 sequence_id;
+	s32 retire_delta;
+
+	if (!request || !request->ctx || !request->req ||
+			!request->req->entries || !request->req->count)
+		return true;
+
+	ctx = request->ctx;
+	req = request->req;
+	sequence_id = req->entries[req->count - 1].item.sequence_id;
+
+	retire_delta = (s32) (ctx->retired_sequence_id - sequence_id);
+
+	SDEROT_DBG("sequence:%u/%u\n", sequence_id, ctx->retired_sequence_id);
+
+	return retire_delta >= 0;
+}
+
+/*
+ * sde_rotator_inline_open - open inline rotator session
+ * @pdev: Pointer to rotator platform device
+ * @video_mode: true if video mode is requested
+ * return: Pointer to new rotator session context
+ */
+void *sde_rotator_inline_open(struct platform_device *pdev)
+{
+	struct sde_rotator_device *rot_dev;
+	struct sde_rotator_ctx *ctx;
+	int rc;
+
+	if (!pdev) {
+		SDEROT_ERR("invalid platform device\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rot_dev = (struct sde_rotator_device *) platform_get_drvdata(pdev);
+	if (!rot_dev) {
+		SDEROT_ERR("invalid rotator device\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	ctx = sde_rotator_ctx_open(rot_dev, NULL);
+	if (IS_ERR_OR_NULL(ctx)) {
+		rc = PTR_ERR(ctx);
+		SDEROT_ERR("failed to open rotator context %d\n", rc);
+		goto rotator_open_error;
+	}
+
+	ctx->slice = llcc_slice_getd(rot_dev->dev, "rotator");
+	if (IS_ERR(ctx->slice)) {
+		rc = PTR_ERR(ctx->slice);
+		SDEROT_ERR("failed to get system cache %d\n", rc);
+		goto slice_getd_error;
+	}
+
+	if (!rot_dev->disable_syscache) {
+		rc = llcc_slice_activate(ctx->slice);
+		if (rc) {
+			SDEROT_ERR("failed to activate slice %d\n", rc);
+			goto activate_error;
+		}
+		SDEROT_DBG("scid %d size %zukb\n",
+				llcc_get_slice_id(ctx->slice),
+				llcc_get_slice_size(ctx->slice));
+	} else {
+		SDEROT_DBG("syscache bypassed\n");
+	}
+
+	SDEROT_EVTLOG(ctx->session_id, llcc_get_slice_id(ctx->slice),
+			llcc_get_slice_size(ctx->slice),
+			rot_dev->disable_syscache);
+
+	return ctx;
+
+activate_error:
+	llcc_slice_putd(ctx->slice);
+	ctx->slice = NULL;
+slice_getd_error:
+	sde_rotator_ctx_release(ctx, NULL);
+rotator_open_error:
+	return ERR_PTR(rc);
+}
+EXPORT_SYMBOL(sde_rotator_inline_open);
+
+int sde_rotator_inline_release(void *handle)
+{
+	struct sde_rotator_device *rot_dev;
+	struct sde_rotator_ctx *ctx;
+
+	if (!handle) {
+		SDEROT_ERR("invalid rotator ctx\n");
+		return -EINVAL;
+	}
+
+	ctx = handle;
+	rot_dev = ctx->rot_dev;
+
+	if (!rot_dev) {
+		SDEROT_ERR("invalid rotator device\n");
+		return -EINVAL;
+	}
+
+	if (ctx->slice) {
+		if (!rot_dev->disable_syscache)
+			llcc_slice_deactivate(ctx->slice);
+		llcc_slice_putd(ctx->slice);
+		ctx->slice = NULL;
+	}
+
+	SDEROT_EVTLOG(ctx->session_id);
+
+	return sde_rotator_ctx_release(ctx, NULL);
+}
+EXPORT_SYMBOL(sde_rotator_inline_release);
+
+/*
+ * sde_rotator_inline_get_dst_pixfmt - determine output pixel format
+ * @pdev: Pointer to platform device
+ * @src_pixfmt: input pixel format
+ * @dst_pixfmt: Pointer to output pixel format (output)
+ * return: 0 if success; error code otherwise
+ */
+int sde_rotator_inline_get_dst_pixfmt(struct platform_device *pdev,
+		u32 src_pixfmt, u32 *dst_pixfmt)
+{
+	return sde_rot_get_base_tilea5x_pixfmt(src_pixfmt, dst_pixfmt);
+}
+EXPORT_SYMBOL(sde_rotator_inline_get_dst_pixfmt);
+
+/*
+ * sde_rotator_inline_get_downscale_caps - get scaling capability
+ * @pdev: Pointer to platform device
+ * @caps: string buffer for capability
+ * @len: length of string buffer
+ * return: length of capability string
+ */
+int sde_rotator_inline_get_downscale_caps(struct platform_device *pdev,
+		char *caps, int len)
+{
+	struct sde_rotator_device *rot_dev;
+	int rc;
+
+	if (!pdev) {
+		SDEROT_ERR("invalid platform device\n");
+		return -EINVAL;
+	}
+
+	rot_dev = (struct sde_rotator_device *) platform_get_drvdata(pdev);
+	if (!rot_dev || !rot_dev->mgr) {
+		SDEROT_ERR("invalid rotator device\n");
+		return -EINVAL;
+	}
+
+	sde_rot_mgr_lock(rot_dev->mgr);
+	rc = sde_rotator_get_downscale_caps(rot_dev->mgr, caps, len);
+	sde_rot_mgr_unlock(rot_dev->mgr);
+
+	return rc;
+}
+EXPORT_SYMBOL(sde_rotator_inline_get_downscale_caps);
+
+/*
+ * sde_rotator_inline_get_maxlinewidth - get maximum line width of rotator
+ * @pdev: Pointer to platform device
+ * return: maximum line width
+ */
+int sde_rotator_inline_get_maxlinewidth(struct platform_device *pdev)
+{
+	struct sde_rotator_device *rot_dev;
+	int maxlinewidth;
+
+	if (!pdev) {
+		SDEROT_ERR("invalid platform device\n");
+		return -EINVAL;
+	}
+
+	rot_dev = (struct sde_rotator_device *)platform_get_drvdata(pdev);
+	if (!rot_dev || !rot_dev->mgr) {
+		SDEROT_ERR("invalid rotator device\n");
+		return -EINVAL;
+	}
+
+	sde_rot_mgr_lock(rot_dev->mgr);
+	maxlinewidth = sde_rotator_get_maxlinewidth(rot_dev->mgr);
+	sde_rot_mgr_unlock(rot_dev->mgr);
+
+	return maxlinewidth;
+}
+EXPORT_SYMBOL(sde_rotator_inline_get_maxlinewidth);
+
+/*
+ * sde_rotator_inline_get_pixfmt_caps - get pixel format capability
+ * @pdev: Pointer to platform device
+ * @pixfmt: array of pixel format buffer
+ * @len: length of pixel format buffer
+ * return: length of pixel format capability if success; error code otherwise
+ */
+int sde_rotator_inline_get_pixfmt_caps(struct platform_device *pdev,
+		bool input, u32 *pixfmts, int len)
+{
+	struct sde_rotator_device *rot_dev;
+	u32 i, pixfmt;
+
+	if (!pdev) {
+		SDEROT_ERR("invalid platform device\n");
+		return -EINVAL;
+	}
+
+	rot_dev = (struct sde_rotator_device *) platform_get_drvdata(pdev);
+	if (!rot_dev || !rot_dev->mgr) {
+		SDEROT_ERR("invalid rotator device\n");
+		return -EINVAL;
+	}
+
+	sde_rot_mgr_lock(rot_dev->mgr);
+	for (i = 0;; i++) {
+		pixfmt = sde_rotator_get_pixfmt(rot_dev->mgr, i, input);
+		if (!pixfmt)
+			break;
+		if (pixfmts && i < len)
+			pixfmts[i] = pixfmt;
+	}
+	sde_rot_mgr_unlock(rot_dev->mgr);
+
+	return i;
+}
+EXPORT_SYMBOL(sde_rotator_inline_get_pixfmt_caps);
+
+/*
+ * sde_rotator_inline_commit - commit given rotator command
+ * @handle: Pointer to rotator context
+ * @cmd: Pointer to rotator command
+ * @cmd_type: command type - validate/prepare/commit/cleanup
+ * return: 0 if success; error code otherwise
+ */
+int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd,
+		enum sde_rotator_inline_cmd_type cmd_type)
+{
+	struct sde_rotator_ctx *ctx;
+	struct sde_rotator_device *rot_dev;
+	struct sde_rotator_request *request = NULL;
+	struct sde_rot_entry_container *req = NULL;
+	ktime_t *ts;
+	u32 flags = 0;
+	int i, ret;
+
+	if (!handle || !cmd) {
+		SDEROT_ERR("invalid rotator handle/cmd\n");
+		return -EINVAL;
+	}
+
+	ctx = handle;
+	rot_dev = ctx->rot_dev;
+
+	if (!rot_dev) {
+		SDEROT_ERR("invalid rotator device\n");
+		return -EINVAL;
+	}
+
+	SDEROT_DBG(
+		"s:%d.%u src:(%u,%u,%u,%u)/%ux%u/%c%c%c%c dst:(%u,%u,%u,%u)/%c%c%c%c r:%d f:%d/%d s:%d fps:%u clk:%llu bw:%llu wb:%d vid:%d cmd:%d\n",
+		ctx->session_id, cmd->sequence_id,
+		cmd->src_rect_x, cmd->src_rect_y,
+		cmd->src_rect_w, cmd->src_rect_h,
+		cmd->src_width, cmd->src_height,
+		cmd->src_pixfmt >> 0, cmd->src_pixfmt >> 8,
+		cmd->src_pixfmt >> 16, cmd->src_pixfmt >> 24,
+		cmd->dst_rect_x, cmd->dst_rect_y,
+		cmd->dst_rect_w, cmd->dst_rect_h,
+		cmd->dst_pixfmt >> 0, cmd->dst_pixfmt >> 8,
+		cmd->dst_pixfmt >> 16, cmd->dst_pixfmt >> 24,
+		cmd->rot90, cmd->hflip, cmd->vflip, cmd->secure, cmd->fps,
+		cmd->clkrate, cmd->data_bw,
+		cmd->dst_writeback, cmd->video_mode, cmd_type);
+	SDEROT_EVTLOG(ctx->session_id, cmd->sequence_id,
+		cmd->src_rect_x, cmd->src_rect_y,
+		cmd->src_rect_w, cmd->src_rect_h,
+		cmd->src_width, cmd->src_height,
+		cmd->src_pixfmt,
+		cmd->dst_rect_x, cmd->dst_rect_y,
+		cmd->dst_rect_w, cmd->dst_rect_h,
+		cmd->dst_pixfmt,
+		cmd->rot90, cmd->hflip, cmd->vflip, cmd->secure, cmd->fps,
+		cmd->clkrate, cmd->data_bw,
+		cmd->dst_writeback, cmd->video_mode, cmd_type);
+
+
+	sde_rot_mgr_lock(rot_dev->mgr);
+
+	if (cmd_type == SDE_ROTATOR_INLINE_CMD_VALIDATE ||
+			cmd_type == SDE_ROTATOR_INLINE_CMD_COMMIT) {
+
+		struct sde_rotation_item item;
+		struct sde_rotator_statistics *stats = &rot_dev->stats;
+		int scid = llcc_get_slice_id(ctx->slice);
+
+		/* allocate slot for timestamp */
+		ts = stats->ts[stats->count++ % SDE_ROTATOR_NUM_EVENTS];
+
+		if (cmd->rot90)
+			flags |= SDE_ROTATION_90;
+		if (cmd->hflip)
+			flags |= SDE_ROTATION_FLIP_LR;
+		if (cmd->vflip)
+			flags |= SDE_ROTATION_FLIP_UD;
+		if (cmd->secure)
+			flags |= SDE_ROTATION_SECURE;
+
+		flags |= SDE_ROTATION_EXT_PERF;
+
+		/* fill in item work structure */
+		memset(&item, 0, sizeof(struct sde_rotation_item));
+		item.flags = flags | SDE_ROTATION_EXT_IOVA;
+		item.trigger = cmd->video_mode ? SDE_ROTATOR_TRIGGER_VIDEO :
+				SDE_ROTATOR_TRIGGER_COMMAND;
+		item.prefill_bw = cmd->prefill_bw;
+		item.session_id = ctx->session_id;
+		item.sequence_id = cmd->sequence_id;
+		item.src_rect.x = cmd->src_rect_x;
+		item.src_rect.y = cmd->src_rect_y;
+		item.src_rect.w = cmd->src_rect_w;
+		item.src_rect.h = cmd->src_rect_h;
+		item.input.width = cmd->src_width;
+		item.input.height = cmd->src_height;
+		item.input.format = cmd->src_pixfmt;
+
+		for (i = 0; i < SDE_ROTATOR_INLINE_PLANE_MAX; i++) {
+			item.input.planes[i].addr = cmd->src_addr[i];
+			item.input.planes[i].len = cmd->src_len[i];
+			item.input.planes[i].fd = -1;
+		}
+		item.input.plane_count = cmd->src_planes;
+		item.input.comp_ratio.numer = 1;
+		item.input.comp_ratio.denom = 1;
+
+		item.output.width = cmd->dst_rect_x + cmd->dst_rect_w;
+		item.output.height = cmd->dst_rect_y + cmd->dst_rect_h;
+		item.dst_rect.x = cmd->dst_rect_x;
+		item.dst_rect.y = cmd->dst_rect_y;
+		item.dst_rect.w = cmd->dst_rect_w;
+		item.dst_rect.h = cmd->dst_rect_h;
+		item.output.sbuf = true;
+		item.output.scid = scid;
+		item.output.writeback = cmd->dst_writeback;
+		item.output.format = cmd->dst_pixfmt;
+
+		for (i = 0; i < SDE_ROTATOR_INLINE_PLANE_MAX; i++) {
+			item.output.planes[i].addr = cmd->dst_addr[i];
+			item.output.planes[i].len = cmd->dst_len[i];
+			item.output.planes[i].fd = -1;
+		}
+		item.output.plane_count = cmd->dst_planes;
+		item.output.comp_ratio.numer = 1;
+		item.output.comp_ratio.denom = 1;
+		item.sequence_id = ++(ctx->commit_sequence_id);
+		item.ts = ts;
+
+		req = sde_rotator_req_init(rot_dev->mgr, ctx->private,
+				&item, 1, 0);
+		if (IS_ERR_OR_NULL(req)) {
+			SDEROT_ERR("fail allocate request s:%d\n",
+					ctx->session_id);
+			ret = -ENOMEM;
+			goto error_init_request;
+		}
+	}
+
+	if (cmd_type == SDE_ROTATOR_INLINE_CMD_VALIDATE) {
+		struct sde_rotation_config rotcfg;
+
+		memset(&rotcfg, 0, sizeof(struct sde_rotation_config));
+		rotcfg.flags = flags;
+		rotcfg.frame_rate = cmd->fps;
+		rotcfg.clk_rate = cmd->clkrate;
+		rotcfg.data_bw = cmd->data_bw;
+		rotcfg.session_id = ctx->session_id;
+		rotcfg.input.width = cmd->src_rect_w;
+		rotcfg.input.height = cmd->src_rect_h;
+		rotcfg.input.format = cmd->src_pixfmt;
+		rotcfg.input.comp_ratio.numer = 1;
+		rotcfg.input.comp_ratio.denom = 1;
+		rotcfg.output.width = cmd->dst_rect_w;
+		rotcfg.output.height = cmd->dst_rect_h;
+		rotcfg.output.format = cmd->dst_pixfmt;
+		rotcfg.output.comp_ratio.numer = 1;
+		rotcfg.output.comp_ratio.denom = 1;
+		rotcfg.output.sbuf = true;
+
+		if (memcmp(&rotcfg, &ctx->rotcfg, sizeof(rotcfg))) {
+			ret = sde_rotator_session_config(rot_dev->mgr,
+					ctx->private, &rotcfg);
+			if (ret) {
+				SDEROT_ERR("fail session config s:%d\n",
+						ctx->session_id);
+				goto error_session_config;
+			}
+
+			ctx->rotcfg = rotcfg;
+		}
+
+		ret = sde_rotator_validate_request(rot_dev->mgr, ctx->private,
+				req);
+		if (ret) {
+			SDEROT_ERR("fail validate request s:%d\n",
+					ctx->session_id);
+			goto error_validate_request;
+		}
+
+		devm_kfree(rot_dev->dev, req);
+		req = NULL;
+
+	} else if (cmd_type == SDE_ROTATOR_INLINE_CMD_COMMIT) {
+
+		request = list_first_entry_or_null(&ctx->retired_list,
+				struct sde_rotator_request, list);
+		if (!request) {
+			/* should not happen */
+			ret = -ENOMEM;
+			SDEROT_ERR("no free request s:%d\n", ctx->session_id);
+			goto error_retired_list;
+		}
+
+		request->req = req;
+
+		spin_lock(&ctx->list_lock);
+		list_del_init(&request->list);
+		list_add_tail(&request->list, &ctx->pending_list);
+		spin_unlock(&ctx->list_lock);
+
+		ts = req->entries[0].item.ts;
+		if (ts) {
+			ts[SDE_ROTATOR_TS_SRCQB] = ktime_get();
+			ts[SDE_ROTATOR_TS_DSTQB] = ktime_get();
+			ts[SDE_ROTATOR_TS_FENCE] = ktime_get();
+		} else {
+			SDEROT_ERR("invalid stats timestamp\n");
+		}
+		req->retireq = ctx->work_queue.rot_work_queue;
+		req->retire_work = &request->retire_work;
+
+		trace_rot_entry_fence(
+			ctx->session_id, cmd->sequence_id,
+			req->entries[0].item.wb_idx,
+			req->entries[0].item.flags,
+			req->entries[0].item.input.format,
+			req->entries[0].item.input.width,
+			req->entries[0].item.input.height,
+			req->entries[0].item.src_rect.x,
+			req->entries[0].item.src_rect.y,
+			req->entries[0].item.src_rect.w,
+			req->entries[0].item.src_rect.h,
+			req->entries[0].item.output.format,
+			req->entries[0].item.output.width,
+			req->entries[0].item.output.height,
+			req->entries[0].item.dst_rect.x,
+			req->entries[0].item.dst_rect.y,
+			req->entries[0].item.dst_rect.w,
+			req->entries[0].item.dst_rect.h);
+
+		ret = sde_rotator_handle_request_common(
+				rot_dev->mgr, ctx->private, req);
+		if (ret) {
+			SDEROT_ERR("fail handle request s:%d\n",
+					ctx->session_id);
+			goto error_handle_request;
+		}
+
+		sde_rotator_commit_request(rot_dev->mgr, ctx->private, req);
+
+		request->committed = true;
+
+		/* save request in private handle */
+		cmd->priv_handle = request;
+
+	} else if (cmd_type == SDE_ROTATOR_INLINE_CMD_CLEANUP) {
+		if (!cmd->priv_handle) {
+			ret = -EINVAL;
+			SDEROT_ERR("invalid private handle\n");
+			goto error_invalid_handle;
+		}
+
+		request = cmd->priv_handle;
+		req = request->req;
+
+		if (request->committed) {
+			/* wait until request is finished */
+			sde_rot_mgr_unlock(rot_dev->mgr);
+			ret = wait_event_timeout(ctx->wait_queue,
+				sde_rotator_is_request_retired(request),
+				msecs_to_jiffies(rot_dev->streamoff_timeout));
+			if (!ret)
+				SDEROT_ERR("timeout w/o retire s:%d\n",
+						ctx->session_id);
+			else if (ret == 1)
+				SDEROT_ERR("timeout w/ retire s:%d\n",
+						ctx->session_id);
+
+			sde_rot_mgr_lock(rot_dev->mgr);
+		}
+
+		sde_rotator_req_finish(rot_dev->mgr, ctx->private, req);
+		sde_rotator_retire_request(request);
+	}
+
+	sde_rot_mgr_unlock(rot_dev->mgr);
+	return 0;
+
+error_handle_request:
+	sde_rotator_update_retire_sequence(request);
+	sde_rotator_retire_request(request);
+error_retired_list:
+error_validate_request:
+error_session_config:
+	devm_kfree(rot_dev->dev, req);
+error_invalid_handle:
+error_init_request:
+	sde_rot_mgr_unlock(rot_dev->mgr);
+	return ret;
+}
+EXPORT_SYMBOL(sde_rotator_inline_commit);
+
+/*
+ * sde_rotator_open - Rotator device open method.
+ * @file: Pointer to file struct.
+ */
+static int sde_rotator_open(struct file *file)
+{
+	struct sde_rotator_device *rot_dev = video_drvdata(file);
+	struct sde_rotator_ctx *ctx;
+	int ret = 0;
+
+	ctx = sde_rotator_ctx_open(rot_dev, file);
+	if (IS_ERR_OR_NULL(ctx)) {
+		SDEDEV_DBG(rot_dev->dev, "failed to open %d\n", ret);
+		ret = PTR_ERR(ctx);
+	}
+
+	return ret;
+}
+
+/*
+ * sde_rotator_release - Rotator device release method.
+ * @file: Pointer to file struct.
+ */
+static int sde_rotator_release(struct file *file)
+{
+	struct sde_rotator_ctx *ctx =
+			sde_rotator_ctx_from_fh(file->private_data);
+
+	return sde_rotator_ctx_release(ctx, file);
+}
+
+/*
  * sde_rotator_poll - rotator device pool method.
  * @file: Pointer to file struct.
  * @wait: Pointer to poll table struct.
@@ -1625,6 +2206,7 @@
 				ctx->session_id, buf_type, ret);
 			return ret;
 		}
+		ctx->rotcfg = config;
 	}
 
 	ret = v4l2_m2m_streamon(file, ctx->fh.m2m_ctx, buf_type);
@@ -2125,7 +2707,9 @@
 		return;
 	}
 
-	if (rot_dev->early_submit) {
+	if (!ctx->file) {
+		sde_rotator_update_retire_sequence(request);
+	} else if (rot_dev->early_submit) {
 		if (IS_ERR_OR_NULL(request->req)) {
 			/* fail pending request or something wrong */
 			SDEDEV_ERR(rot_dev->dev,
@@ -2469,7 +3053,7 @@
 		src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
 		if (!src_buf || !dst_buf) {
 			SDEDEV_ERR(rot_dev->dev,
-				"null buffer in device run s:%d sb:%p db:%p\n",
+				"null buffer in device run s:%d sb:%pK db:%pK\n",
 				ctx->session_id,
 				src_buf, dst_buf);
 			goto error_empty_buffer;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
index 898437b..100ce27 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
@@ -21,6 +21,8 @@
 #include <linux/iommu.h>
 #include <linux/dma-buf.h>
 #include <linux/msm-bus.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/llcc-qcom.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-fh.h>
 #include <media/v4l2-ctrls.h>
@@ -89,6 +91,7 @@
  * @retire_work: retire work structure
  * @request: Pointer to core layer rotator manager request
  * @ctx: Pointer to parent context
+ * @committed: true if request committed to hardware
  */
 struct sde_rotator_request {
 	struct list_head list;
@@ -96,12 +99,14 @@
 	struct work_struct retire_work;
 	struct sde_rot_entry_container *req;
 	struct sde_rotator_ctx *ctx;
+	bool committed;
 };
 
 /*
  * struct sde_rotator_ctx - Structure contains per open file handle context.
  * @kobj: kernel object of this context
  * @rot_dev: Pointer to rotator device.
+ * @file: Pointer to device file handle
  * @fh: V4l2 file handle.
  * @ctrl_handler: control handler
  * @format_cap: Current capture format.
@@ -122,16 +127,19 @@
  * @wait_queue: Wait queue for signaling end of job
  * @work_queue: work queue for submit and retire processing
  * @private: Pointer to session private information
+ * @slice: Pointer to system cache slice descriptor
  * @commit_sequence_id: last committed sequence id
  * @retired_sequence_id: last retired sequence id
  * @list_lock: lock for pending/retired list
  * @pending_list: list of pending request
  * @retired_list: list of retired/free request
  * @requests: static allocation of free requests
+ * @rotcfg: current core rotation configuration
  */
 struct sde_rotator_ctx {
 	struct kobject kobj;
 	struct sde_rotator_device *rot_dev;
+	struct file *file;
 	struct v4l2_fh fh;
 	struct v4l2_ctrl_handler ctrl_handler;
 	struct v4l2_format format_cap;
@@ -153,12 +161,14 @@
 	wait_queue_head_t wait_queue;
 	struct sde_rot_queue work_queue;
 	struct sde_rot_file_private *private;
+	struct llcc_slice_desc *slice;
 	u32 commit_sequence_id;
 	u32 retired_sequence_id;
 	spinlock_t list_lock;
 	struct list_head pending_list;
 	struct list_head retired_list;
 	struct sde_rotator_request requests[SDE_ROTATOR_REQUEST_MAX];
+	struct sde_rotation_config rotcfg;
 };
 
 /*
@@ -183,6 +193,7 @@
  * @pdev: Pointer to platform device.
  * @drvdata: Pointer to driver data.
  * @early_submit: flag enable job submission in ready state.
+ * @disable_syscache: true to disable system cache
  * @mgr: Pointer to core rotator manager.
  * @mdata: Pointer to common rotator data/resource.
  * @session_id: Next context session identifier
@@ -203,6 +214,7 @@
 	struct platform_device *pdev;
 	const void *drvdata;
 	u32 early_submit;
+	u32 disable_syscache;
 	struct sde_rot_mgr *mgr;
 	struct sde_rot_data_type *mdata;
 	u32 session_id;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
index 967a1c4..573e0a8 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
@@ -284,6 +284,27 @@
 		},
 	},
 	{
+		.mdp_format = {
+			FMT_YUV_COMMON(SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC),
+			.description = "SDE/Y_CBCR_H2V2_P010_UBWC",
+			.flag = 0,
+			.fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR,
+			.chroma_sample = SDE_MDP_CHROMA_420,
+			.unpack_count = 2,
+			.bpp = 2,
+			.frame_format = SDE_MDP_FMT_TILE_A5X,
+			.pixel_mode = SDE_MDP_PIXEL_10BIT,
+			.element = { C1_B_Cb, C2_R_Cr },
+			.unpack_tight = 0,
+			.unpack_align_msb = 1,
+			.is_ubwc = SDE_MDP_COMPRESS_UBWC
+		},
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 32,
+		},
+	},
+	{
 		.mdp_format =
 			FMT_RGB_1010102(SDE_PIX_FMT_RGBA_1010102_TILE,
 			"SDE/RGBA_1010102_TILE",
@@ -517,6 +538,27 @@
 			.tile_width = 16,
 		},
 	},
+	{
+		.mdp_format = {
+			FMT_YUV_COMMON(SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE),
+			.description = "SDE/Y_CBCR_H2V2_P010_TILE",
+			.flag = SDE_MDP_FORMAT_FLAG_PRIVATE,
+			.fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR,
+			.chroma_sample = SDE_MDP_CHROMA_420,
+			.unpack_count = 2,
+			.bpp = 2,
+			.frame_format = SDE_MDP_FMT_TILE_A5X,
+			.pixel_mode = SDE_MDP_PIXEL_10BIT,
+			.element = { C1_B_Cb, C2_R_Cr },
+			.unpack_tight = 0,
+			.unpack_align_msb = 1,
+			.is_ubwc = SDE_MDP_COMPRESS_NONE,
+		},
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 32,
+		},
+	},
 };
 
 static struct sde_mdp_format_params sde_mdp_format_map[] = {
@@ -777,3 +819,98 @@
 
 	return 0;
 }
+
+/*
+ * sde_rot_get_tilea5x_pixfmt - get base a5x tile format of given source format
+ * @src_pixfmt: source pixel format to be converted
+ * @dst_pixfmt: pointer to base a5x tile pixel format
+ * return: 0 if success; error code otherwise
+ */
+int sde_rot_get_base_tilea5x_pixfmt(u32 src_pixfmt, u32 *dst_pixfmt)
+{
+	int rc = 0;
+
+	if (!dst_pixfmt) {
+		SDEROT_ERR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	switch (src_pixfmt) {
+	case SDE_PIX_FMT_Y_CBCR_H2V2:
+	case SDE_PIX_FMT_Y_CBCR_H2V2_UBWC:
+	case SDE_PIX_FMT_Y_CBCR_H2V2_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_TILE;
+		break;
+	case SDE_PIX_FMT_Y_CRCB_H2V2:
+	case SDE_PIX_FMT_Y_CRCB_H2V2_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_Y_CRCB_H2V2_TILE;
+		break;
+	case SDE_PIX_FMT_RGBA_8888:
+	case SDE_PIX_FMT_RGBA_8888_UBWC:
+	case SDE_PIX_FMT_RGBA_8888_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_RGBA_8888_TILE;
+		break;
+	case SDE_PIX_FMT_RGBX_8888:
+	case SDE_PIX_FMT_RGBX_8888_UBWC:
+	case SDE_PIX_FMT_RGBX_8888_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_RGBX_8888_TILE;
+		break;
+	case SDE_PIX_FMT_ARGB_8888:
+	case SDE_PIX_FMT_ARGB_8888_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_ARGB_8888_TILE;
+		break;
+	case SDE_PIX_FMT_XRGB_8888:
+	case SDE_PIX_FMT_XRGB_8888_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_XRGB_8888_TILE;
+		break;
+	case SDE_PIX_FMT_ABGR_8888:
+	case SDE_PIX_FMT_ABGR_8888_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_ABGR_8888_TILE;
+		break;
+	case SDE_PIX_FMT_XBGR_8888:
+	case SDE_PIX_FMT_XBGR_8888_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_XBGR_8888_TILE;
+		break;
+	case SDE_PIX_FMT_ARGB_2101010:
+	case SDE_PIX_FMT_ARGB_2101010_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_ARGB_2101010_TILE;
+		break;
+	case SDE_PIX_FMT_XRGB_2101010:
+	case SDE_PIX_FMT_XRGB_2101010_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_XRGB_2101010_TILE;
+		break;
+	case SDE_PIX_FMT_ABGR_2101010:
+	case SDE_PIX_FMT_ABGR_2101010_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_ABGR_2101010_TILE;
+		break;
+	case SDE_PIX_FMT_XBGR_2101010:
+	case SDE_PIX_FMT_XBGR_2101010_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_XBGR_2101010_TILE;
+		break;
+	case SDE_PIX_FMT_BGRA_1010102:
+	case SDE_PIX_FMT_BGRA_1010102_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_BGRA_1010102_TILE;
+		break;
+	case SDE_PIX_FMT_BGRX_1010102:
+	case SDE_PIX_FMT_BGRX_1010102_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_BGRX_1010102_TILE;
+		break;
+	case SDE_PIX_FMT_Y_CBCR_H2V2_P010:
+	case SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE:
+	case SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC:
+		*dst_pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE;
+		break;
+	case SDE_PIX_FMT_Y_CBCR_H2V2_TP10:
+	case SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC:
+		*dst_pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_TP10;
+		break;
+	default:
+		SDEROT_ERR("invalid src pixel format %c%c%c%c\n",
+				src_pixfmt >> 0, src_pixfmt >> 8,
+				src_pixfmt >> 16, src_pixfmt >> 24);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.h
index aebdb12..5bb6198 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.h
@@ -129,6 +129,8 @@
 
 int sde_rot_get_ubwc_micro_dim(u32 format, u16 *w, u16 *h);
 
+int sde_rot_get_base_tilea5x_pixfmt(u32 src_pixfmt, u32 *dst_pixfmt);
+
 static inline bool sde_mdp_is_tilea4x_format(struct sde_mdp_format_params *fmt)
 {
 	return fmt && (fmt->frame_format == SDE_MDP_FMT_TILE_A4X);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
new file mode 100644
index 0000000..27fd0c3
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
@@ -0,0 +1,114 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_ROTATOR_INLINE_H__
+#define __SDE_ROTATOR_INLINE_H__
+
+#include <linux/types.h>
+#include <linux/dma-buf.h>
+#include <linux/platform_device.h>
+
+#include "sde_rotator_formats.h"
+
+#define SDE_ROTATOR_INLINE_PLANE_MAX	4
+
+/*
+ * enum sde_rotator_inline_cmd_type - inline rotator command stages
+ * @SDE_ROTATOR_INLINE_CMD_VALIDATE: validate command only
+ * @SDE_ROTATOR_INLINE_CMD_COMMIT: commit command to hardware
+ * @SDE_ROTATOR_INLINE_CMD_CLEANUP: cleanup after commit is done
+ */
+enum sde_rotator_inline_cmd_type {
+	SDE_ROTATOR_INLINE_CMD_VALIDATE,
+	SDE_ROTATOR_INLINE_CMD_COMMIT,
+	SDE_ROTATOR_INLINE_CMD_CLEANUP,
+};
+
+/**
+ * sde_rotator_inline_cmd - inline rotation command
+ * @sequence_id: unique command sequence identifier
+ * @video_mode: true if video interface is connected
+ * @fps: frame rate in frame-per-second
+ * @rot90: rotate 90 counterclockwise
+ * @hflip: horizontal flip prior to rotation
+ * @vflip: vertical flip prior to rotation
+ * @secure: true if buffer is in secure domain
+ * @prefill_bw: prefill bandwidth in Bps
+ * @clkrate: clock rate in Hz
+ * @data_bw: data bus bandwidth in Bps
+ * @src_addr: source i/o buffer virtual address
+ * @src_len: source i/o buffer length
+ * @src_planes: source plane number
+ * @src_pixfmt: v4l2 fourcc pixel format of source buffer
+ * @src_width: width of source buffer
+ * @src_height: height of source buffer
+ * @src_rect_x: roi x coordinate of source buffer
+ * @src_rect_y: roi y coordinate of source buffer
+ * @src_rect_w: roi width of source buffer
+ * @src_rect_h: roi height of source buffer
+ * @dst_addr: destination i/o virtual buffer address
+ * @dst_len: destination i/o buffer length
+ * @dst_planes: destination plane number
+ * @dst_pixfmt: v4l2 fourcc pixel format of destination buffer
+ * @dst_rect_x: roi x coordinate of destination buffer
+ * @dst_rect_y: roi y coordinate of destination buffer
+ * @dst_rect_w: roi width of destination buffer
+ * @dst_rect_h: roi height of destination buffer
+ * @dst_writeback: true if cache writeback is required
+ * @priv_handle: private handle of rotator session
+ */
+struct sde_rotator_inline_cmd {
+	u32 sequence_id;
+	bool video_mode;
+	u32 fps;
+	bool rot90;
+	bool hflip;
+	bool vflip;
+	bool secure;
+	u64 prefill_bw;
+	u64 clkrate;
+	u64 data_bw;
+	dma_addr_t src_addr[SDE_ROTATOR_INLINE_PLANE_MAX];
+	u32 src_len[SDE_ROTATOR_INLINE_PLANE_MAX];
+	u32 src_planes;
+	u32 src_pixfmt;
+	u32 src_width;
+	u32 src_height;
+	u32 src_rect_x;
+	u32 src_rect_y;
+	u32 src_rect_w;
+	u32 src_rect_h;
+	dma_addr_t dst_addr[SDE_ROTATOR_INLINE_PLANE_MAX];
+	u32 dst_len[SDE_ROTATOR_INLINE_PLANE_MAX];
+	u32 dst_planes;
+	u32 dst_pixfmt;
+	u32 dst_rect_x;
+	u32 dst_rect_y;
+	u32 dst_rect_w;
+	u32 dst_rect_h;
+	bool dst_writeback;
+	void *priv_handle;
+};
+
+void *sde_rotator_inline_open(struct platform_device *pdev);
+int sde_rotator_inline_get_dst_pixfmt(struct platform_device *pdev,
+		u32 src_pixfmt, u32 *dst_pixfmt);
+int sde_rotator_inline_get_downscale_caps(struct platform_device *pdev,
+		char *downscale_caps, int len);
+int sde_rotator_inline_get_maxlinewidth(struct platform_device *pdev);
+int sde_rotator_inline_get_pixfmt_caps(struct platform_device *pdev,
+		bool input, u32 *pixfmt, int len);
+int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd,
+		enum sde_rotator_inline_cmd_type cmd_type);
+int sde_rotator_inline_release(void *handle);
+
+#endif /* __SDE_ROTATOR_INLINE_H__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 0512083..a152573 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -41,17 +41,27 @@
 /* traffic shaping clock ticks = finish_time x 19.2MHz */
 #define TRAFFIC_SHAPE_CLKTICK_14MS   268800
 #define TRAFFIC_SHAPE_CLKTICK_12MS   230400
+#define TRAFFIC_SHAPE_VSYNC_CLK      19200000
 
 /* XIN mapping */
 #define XIN_SSPP		0
 #define XIN_WRITEBACK		1
 
 /* wait for at most 2 vsync for lowest refresh rate (24hz) */
-#define KOFF_TIMEOUT msecs_to_jiffies(42 * 32)
+#define KOFF_TIMEOUT		(84)
+
+/* default stream buffer headroom in lines */
+#define DEFAULT_SBUF_HEADROOM	20
+#define DEFAULT_UBWC_MALSIZE	1
+#define DEFAULT_UBWC_SWIZZLE	1
+
+#define DEFAULT_MAXLINEWIDTH	4096
 
 /* Macro for constructing the REGDMA command */
 #define SDE_REGDMA_WRITE(p, off, data) \
 	do { \
+		SDEROT_DBG("SDEREG.W:[%s:0x%X] <= 0x%X\n", #off, (off),\
+				(u32)(data));\
 		*p++ = REGDMA_OP_REGWRITE | \
 			((off) & REGDMA_ADDR_OFFSET_MASK); \
 		*p++ = (data); \
@@ -59,6 +69,8 @@
 
 #define SDE_REGDMA_MODIFY(p, off, mask, data) \
 	do { \
+		SDEROT_DBG("SDEREG.M:[%s:0x%X] <= 0x%X\n", #off, (off),\
+				(u32)(data));\
 		*p++ = REGDMA_OP_REGMODIFY | \
 			((off) & REGDMA_ADDR_OFFSET_MASK); \
 		*p++ = (mask); \
@@ -67,6 +79,8 @@
 
 #define SDE_REGDMA_BLKWRITE_INC(p, off, len) \
 	do { \
+		SDEROT_DBG("SDEREG.B:[%s:0x%X:0x%X]\n", #off, (off),\
+				(u32)(len));\
 		*p++ = REGDMA_OP_BLKWRITE_INC | \
 			((off) & REGDMA_ADDR_OFFSET_MASK); \
 		*p++ = (len); \
@@ -74,18 +88,23 @@
 
 #define SDE_REGDMA_BLKWRITE_DATA(p, data) \
 	do { \
+		SDEROT_DBG("SDEREG.I:[:] <= 0x%X\n", (u32)(data));\
 		*(p) = (data); \
 		(p)++; \
 	} while (0)
 
 /* Macro for directly accessing mapped registers */
 #define SDE_ROTREG_WRITE(base, off, data) \
-	writel_relaxed(data, (base + (off)))
+	do { \
+		SDEROT_DBG("SDEREG.D:[%s:0x%X] <= 0x%X\n", #off, (off)\
+				, (u32)(data));\
+		writel_relaxed(data, (base + (off))); \
+	} while (0)
 
 #define SDE_ROTREG_READ(base, off) \
 	readl_relaxed(base + (off))
 
-static u32 sde_hw_rotator_input_pixfmts[] = {
+static u32 sde_hw_rotator_v3_inpixfmts[] = {
 	SDE_PIX_FMT_XRGB_8888,
 	SDE_PIX_FMT_ARGB_8888,
 	SDE_PIX_FMT_ABGR_8888,
@@ -145,7 +164,7 @@
 	SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
 };
 
-static u32 sde_hw_rotator_output_pixfmts[] = {
+static u32 sde_hw_rotator_v3_outpixfmts[] = {
 	SDE_PIX_FMT_XRGB_8888,
 	SDE_PIX_FMT_ARGB_8888,
 	SDE_PIX_FMT_ABGR_8888,
@@ -205,6 +224,166 @@
 	SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
 };
 
+static u32 sde_hw_rotator_v4_inpixfmts[] = {
+	SDE_PIX_FMT_XRGB_8888,
+	SDE_PIX_FMT_ARGB_8888,
+	SDE_PIX_FMT_ABGR_8888,
+	SDE_PIX_FMT_RGBA_8888,
+	SDE_PIX_FMT_BGRA_8888,
+	SDE_PIX_FMT_RGBX_8888,
+	SDE_PIX_FMT_BGRX_8888,
+	SDE_PIX_FMT_XBGR_8888,
+	SDE_PIX_FMT_RGBA_5551,
+	SDE_PIX_FMT_ARGB_1555,
+	SDE_PIX_FMT_ABGR_1555,
+	SDE_PIX_FMT_BGRA_5551,
+	SDE_PIX_FMT_BGRX_5551,
+	SDE_PIX_FMT_RGBX_5551,
+	SDE_PIX_FMT_XBGR_1555,
+	SDE_PIX_FMT_XRGB_1555,
+	SDE_PIX_FMT_ARGB_4444,
+	SDE_PIX_FMT_RGBA_4444,
+	SDE_PIX_FMT_BGRA_4444,
+	SDE_PIX_FMT_ABGR_4444,
+	SDE_PIX_FMT_RGBX_4444,
+	SDE_PIX_FMT_XRGB_4444,
+	SDE_PIX_FMT_BGRX_4444,
+	SDE_PIX_FMT_XBGR_4444,
+	SDE_PIX_FMT_RGB_888,
+	SDE_PIX_FMT_BGR_888,
+	SDE_PIX_FMT_RGB_565,
+	SDE_PIX_FMT_BGR_565,
+	SDE_PIX_FMT_Y_CB_CR_H2V2,
+	SDE_PIX_FMT_Y_CR_CB_H2V2,
+	SDE_PIX_FMT_Y_CR_CB_GH2V2,
+	SDE_PIX_FMT_Y_CBCR_H2V2,
+	SDE_PIX_FMT_Y_CRCB_H2V2,
+	SDE_PIX_FMT_Y_CBCR_H1V2,
+	SDE_PIX_FMT_Y_CRCB_H1V2,
+	SDE_PIX_FMT_Y_CBCR_H2V1,
+	SDE_PIX_FMT_Y_CRCB_H2V1,
+	SDE_PIX_FMT_YCBYCR_H2V1,
+	SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
+	SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
+	SDE_PIX_FMT_RGBA_8888_UBWC,
+	SDE_PIX_FMT_RGBX_8888_UBWC,
+	SDE_PIX_FMT_RGB_565_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
+	SDE_PIX_FMT_RGBA_1010102,
+	SDE_PIX_FMT_RGBX_1010102,
+	SDE_PIX_FMT_ARGB_2101010,
+	SDE_PIX_FMT_XRGB_2101010,
+	SDE_PIX_FMT_BGRA_1010102,
+	SDE_PIX_FMT_BGRX_1010102,
+	SDE_PIX_FMT_ABGR_2101010,
+	SDE_PIX_FMT_XBGR_2101010,
+	SDE_PIX_FMT_RGBA_1010102_UBWC,
+	SDE_PIX_FMT_RGBX_1010102_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_P010,
+	SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
+	SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
+	SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
+	SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
+	SDE_PIX_FMT_XRGB_8888_TILE,
+	SDE_PIX_FMT_ARGB_8888_TILE,
+	SDE_PIX_FMT_ABGR_8888_TILE,
+	SDE_PIX_FMT_XBGR_8888_TILE,
+	SDE_PIX_FMT_RGBA_8888_TILE,
+	SDE_PIX_FMT_BGRA_8888_TILE,
+	SDE_PIX_FMT_RGBX_8888_TILE,
+	SDE_PIX_FMT_BGRX_8888_TILE,
+	SDE_PIX_FMT_RGBA_1010102_TILE,
+	SDE_PIX_FMT_RGBX_1010102_TILE,
+	SDE_PIX_FMT_ARGB_2101010_TILE,
+	SDE_PIX_FMT_XRGB_2101010_TILE,
+	SDE_PIX_FMT_BGRA_1010102_TILE,
+	SDE_PIX_FMT_BGRX_1010102_TILE,
+	SDE_PIX_FMT_ABGR_2101010_TILE,
+	SDE_PIX_FMT_XBGR_2101010_TILE,
+};
+
+static u32 sde_hw_rotator_v4_outpixfmts[] = {
+	SDE_PIX_FMT_XRGB_8888,
+	SDE_PIX_FMT_ARGB_8888,
+	SDE_PIX_FMT_ABGR_8888,
+	SDE_PIX_FMT_RGBA_8888,
+	SDE_PIX_FMT_BGRA_8888,
+	SDE_PIX_FMT_RGBX_8888,
+	SDE_PIX_FMT_BGRX_8888,
+	SDE_PIX_FMT_XBGR_8888,
+	SDE_PIX_FMT_RGBA_5551,
+	SDE_PIX_FMT_ARGB_1555,
+	SDE_PIX_FMT_ABGR_1555,
+	SDE_PIX_FMT_BGRA_5551,
+	SDE_PIX_FMT_BGRX_5551,
+	SDE_PIX_FMT_RGBX_5551,
+	SDE_PIX_FMT_XBGR_1555,
+	SDE_PIX_FMT_XRGB_1555,
+	SDE_PIX_FMT_ARGB_4444,
+	SDE_PIX_FMT_RGBA_4444,
+	SDE_PIX_FMT_BGRA_4444,
+	SDE_PIX_FMT_ABGR_4444,
+	SDE_PIX_FMT_RGBX_4444,
+	SDE_PIX_FMT_XRGB_4444,
+	SDE_PIX_FMT_BGRX_4444,
+	SDE_PIX_FMT_XBGR_4444,
+	SDE_PIX_FMT_RGB_888,
+	SDE_PIX_FMT_BGR_888,
+	SDE_PIX_FMT_RGB_565,
+	SDE_PIX_FMT_BGR_565,
+	/* SDE_PIX_FMT_Y_CB_CR_H2V2 */
+	/* SDE_PIX_FMT_Y_CR_CB_H2V2 */
+	/* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
+	SDE_PIX_FMT_Y_CBCR_H2V2,
+	SDE_PIX_FMT_Y_CRCB_H2V2,
+	SDE_PIX_FMT_Y_CBCR_H1V2,
+	SDE_PIX_FMT_Y_CRCB_H1V2,
+	SDE_PIX_FMT_Y_CBCR_H2V1,
+	SDE_PIX_FMT_Y_CRCB_H2V1,
+	/* SDE_PIX_FMT_YCBYCR_H2V1 */
+	SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
+	SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
+	SDE_PIX_FMT_RGBA_8888_UBWC,
+	SDE_PIX_FMT_RGBX_8888_UBWC,
+	SDE_PIX_FMT_RGB_565_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
+	SDE_PIX_FMT_RGBA_1010102,
+	SDE_PIX_FMT_RGBX_1010102,
+	/* SDE_PIX_FMT_ARGB_2101010 */
+	/* SDE_PIX_FMT_XRGB_2101010 */
+	SDE_PIX_FMT_BGRA_1010102,
+	SDE_PIX_FMT_BGRX_1010102,
+	/* SDE_PIX_FMT_ABGR_2101010 */
+	/* SDE_PIX_FMT_XBGR_2101010 */
+	SDE_PIX_FMT_RGBA_1010102_UBWC,
+	SDE_PIX_FMT_RGBX_1010102_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_P010,
+	SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
+	SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
+	SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
+	SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
+	SDE_PIX_FMT_XRGB_8888_TILE,
+	SDE_PIX_FMT_ARGB_8888_TILE,
+	SDE_PIX_FMT_ABGR_8888_TILE,
+	SDE_PIX_FMT_XBGR_8888_TILE,
+	SDE_PIX_FMT_RGBA_8888_TILE,
+	SDE_PIX_FMT_BGRA_8888_TILE,
+	SDE_PIX_FMT_RGBX_8888_TILE,
+	SDE_PIX_FMT_BGRX_8888_TILE,
+	SDE_PIX_FMT_RGBA_1010102_TILE,
+	SDE_PIX_FMT_RGBX_1010102_TILE,
+	SDE_PIX_FMT_ARGB_2101010_TILE,
+	SDE_PIX_FMT_XRGB_2101010_TILE,
+	SDE_PIX_FMT_BGRA_1010102_TILE,
+	SDE_PIX_FMT_BGRX_1010102_TILE,
+	SDE_PIX_FMT_ABGR_2101010_TILE,
+	SDE_PIX_FMT_XBGR_2101010_TILE,
+};
+
 static struct sde_rot_vbif_debug_bus nrt_vbif_dbg_bus_r3[] = {
 	{0x214, 0x21c, 16, 1, 0x10}, /* arb clients */
 	{0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */
@@ -284,6 +463,30 @@
 }
 
 /**
+ * sde_hw_rotator_update_swts - update software timestamp with given value
+ * @rot: Pointer to hw rotator
+ * @ctx: Pointer to rotator contxt
+ * @swts: new software timestamp
+ * @return: new combined swts
+ */
+static u32 sde_hw_rotator_update_swts(struct sde_hw_rotator *rot,
+		struct sde_hw_rotator_context *ctx, u32 swts)
+{
+	u32 mask = SDE_REGDMA_SWTS_MASK;
+
+	swts &= SDE_REGDMA_SWTS_MASK;
+	if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY) {
+		swts <<= SDE_REGDMA_SWTS_SHIFT;
+		mask <<= SDE_REGDMA_SWTS_SHIFT;
+	}
+
+	swts |= (SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG) & ~mask);
+	SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, swts);
+
+	return swts;
+}
+
+/**
  * sde_hw_rotator_enable_irq - Enable hw rotator interrupt with ref. count
  *				Also, clear rotator/regdma irq status.
  * @rot: Pointer to hw rotator
@@ -376,6 +579,13 @@
 	SDEROT_ERR("VBIF XIN HALT status = %x VBIF AXI HALT status = %x\n",
 		SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL1),
 		SDE_VBIF_READ(mdata, MMSS_VBIF_AXI_HALT_CTRL1));
+
+	SDEROT_ERR(
+		"sbuf_status_plane0 = %x, sbuf_status_plane1 = %x\n",
+		SDE_ROTREG_READ(rot->mdss_base,
+			ROT_WB_SBUF_STATUS_PLANE0),
+		SDE_ROTREG_READ(rot->mdss_base,
+			ROT_WB_SBUF_STATUS_PLANE1));
 }
 
 /**
@@ -540,6 +750,17 @@
 
 	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
 
+	/*
+	 * initialize start control trigger selection first
+	 */
+	if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
+		if (ctx->sbuf_mode)
+			SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL,
+					ctx->start_ctrl);
+		else
+			SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 0);
+	}
+
 	/* source image setup */
 	if ((flags & SDE_ROT_FLAG_DEINTERLACE)
 			&& !(flags & SDE_ROT_FLAG_SOURCE_ROTATED_90)) {
@@ -618,6 +839,9 @@
 	if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
 		src_format |= BIT(14); /* UNPACK_DX_FORMAT */
 
+	if (rot->solid_fill)
+		src_format |= BIT(22); /* SOLID_FILL */
+
 	/* SRC_FORMAT */
 	SDE_REGDMA_BLKWRITE_DATA(wrptr, src_format);
 
@@ -652,11 +876,21 @@
 			fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_128;
 	}
 
+	if (rot->solid_fill)
+		SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_CONSTANT_COLOR,
+				rot->constant_color);
+
 	SDE_REGDMA_WRITE(wrptr, ROT_SSPP_FETCH_CONFIG,
 			fetch_blocksize |
 			SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE |
 			((rot->highest_bank & 0x3) << 18));
 
+	if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
+		SDE_REGDMA_WRITE(wrptr, ROT_SSPP_UBWC_STATIC_CTRL, BIT(31) |
+				((ctx->rot->ubwc_malsize & 0x3) << 8) |
+				((ctx->rot->highest_bank & 0x3) << 4) |
+				((ctx->rot->ubwc_swizzle & 0x1) << 0));
+
 	/* setup source buffer plane security status */
 	if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
 			SDE_ROT_FLAG_SECURE_CAMERA_SESSION)) {
@@ -697,6 +931,7 @@
 		struct sde_hw_rot_wb_cfg *cfg,
 		u32 flags)
 {
+	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
 	struct sde_mdp_format_params *fmt;
 	u32 *wrptr;
 	u32 pack = 0;
@@ -784,17 +1019,25 @@
 			cfg->v_downscale_factor |
 			(cfg->h_downscale_factor << 16));
 
-	/* write config setup for bank configration */
+	/* write config setup for bank configuration */
 	SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
 			(ctx->rot->highest_bank & 0x3) << 8);
 
-	if (flags & SDE_ROT_FLAG_ROT_90)
-		SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 0x3);
-	else
-		SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 0x1);
+	if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
+		SDE_REGDMA_WRITE(wrptr, ROT_WB_UBWC_STATIC_CTRL,
+				((ctx->rot->ubwc_malsize & 0x3) << 8) |
+				((ctx->rot->highest_bank & 0x3) << 4) |
+				((ctx->rot->ubwc_swizzle & 0x1) << 0));
 
-	/* setup traffic shaper for 4k 30fps content */
-	if (ctx->is_traffic_shaping) {
+	if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map))
+		SDE_REGDMA_WRITE(wrptr, ROT_WB_SYS_CACHE_MODE,
+				ctx->sys_cache_mode);
+
+	SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, ctx->op_mode |
+			(flags & SDE_ROT_FLAG_ROT_90 ? BIT(1) : 0) | BIT(0));
+
+	/* setup traffic shaper for 4k 30fps content or if prefill_bw is set */
+	if (ctx->is_traffic_shaping || cfg->prefill_bw) {
 		u32 bw;
 
 		/*
@@ -813,10 +1056,16 @@
 			bw *= fmt->bpp;
 
 		bw /= TRAFFIC_SHAPE_CLKTICK_12MS;
+
+		/* use prefill bandwidth instead if specified */
+		if (cfg->prefill_bw)
+			bw = DIV_ROUND_UP(cfg->prefill_bw,
+					TRAFFIC_SHAPE_VSYNC_CLK);
+
 		if (bw > 0xFF)
 			bw = 0xFF;
 		SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT,
-				BIT(31) | bw);
+				BIT(31) | (cfg->prefill_bw ? BIT(27) : 0) | bw);
 		SDEROT_DBG("Enable ROT_WB Traffic Shaper:%d\n", bw);
 	} else {
 		SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT, 0);
@@ -852,7 +1101,7 @@
 		sde_hw_rotator_enable_irq(rot);
 	}
 
-	SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
+	SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
 
 	/* Update command queue write ptr */
 	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
@@ -942,13 +1191,15 @@
 	u32  enableInt;
 	u32  swts = 0;
 	u32  mask = 0;
+	u32  trig_sel;
 
 	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
 
 	/*
 	 * Last ROT command must be ROT_START before REGDMA start
 	 */
-	SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
+	SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
+
 	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
 
 	/*
@@ -959,6 +1210,8 @@
 	offset = (u32)(ctx->regdma_base - (u32 *)(rot->mdss_base +
 				REGDMA_RAM_REGDMA_CMD_RAM));
 	enableInt = ((ctx->timestamp & 1) + 1) << 30;
+	trig_sel = ctx->sbuf_mode ? REGDMA_CMD_TRIG_SEL_MDP_FLUSH :
+			REGDMA_CMD_TRIG_SEL_SW_START;
 
 	SDEROT_DBG(
 		"regdma(%d)[%d] <== INT:0x%X|length:%ld|offset:0x%X, ts:%X\n",
@@ -972,34 +1225,39 @@
 	if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
 		SDE_ROTREG_WRITE(rot->mdss_base,
 				REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
-				(length << 14) | offset);
+				(ctx->sbuf_mode ? enableInt : 0) | trig_sel |
+				((length & 0x3ff) << 14) | offset);
 		swts = ctx->timestamp;
 		mask = ~SDE_REGDMA_SWTS_MASK;
 	} else {
 		SDE_ROTREG_WRITE(rot->mdss_base,
 				REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
-				(length << 14) | offset);
+				(ctx->sbuf_mode ? enableInt : 0) | trig_sel |
+				((length & 0x3ff) << 14) | offset);
 		swts = ctx->timestamp << SDE_REGDMA_SWTS_SHIFT;
 		mask = ~(SDE_REGDMA_SWTS_MASK << SDE_REGDMA_SWTS_SHIFT);
 	}
 
-	/* Write timestamp after previous rotator job finished */
-	sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
-	offset += length;
-	ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
-	WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
+	/* timestamp update can only be used in offline multi-context mode */
+	if (!ctx->sbuf_mode) {
+		/* Write timestamp after previous rotator job finished */
+		sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
+		offset += length;
+		ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
+		WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
 
-	/* ensure command packet is issue before the submit command */
-	wmb();
+		/* ensure command packet is issue before the submit command */
+		wmb();
 
-	if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
-		SDE_ROTREG_WRITE(rot->mdss_base,
-				REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
-				enableInt | (ts_length << 14) | offset);
-	} else {
-		SDE_ROTREG_WRITE(rot->mdss_base,
-				REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
-				enableInt | (ts_length << 14) | offset);
+		if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
+			SDE_ROTREG_WRITE(rot->mdss_base,
+					REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
+					enableInt | (ts_length << 14) | offset);
+		} else {
+			SDE_ROTREG_WRITE(rot->mdss_base,
+					REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
+					enableInt | (ts_length << 14) | offset);
+		}
 	}
 
 	/* Update command queue write ptr */
@@ -1027,7 +1285,7 @@
 	if (rot->irq_num >= 0) {
 		SDEROT_DBG("Wait for Rotator completion\n");
 		rc = wait_for_completion_timeout(&ctx->rot_comp,
-					KOFF_TIMEOUT);
+				msecs_to_jiffies(rot->koff_timeout));
 
 		spin_lock_irqsave(&rot->rotisr_lock, flags);
 		status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
@@ -1098,7 +1356,7 @@
 				ctx, ctx->timestamp);
 		rc = wait_event_timeout(ctx->regdma_waitq,
 				!sde_hw_rotator_pending_swts(rot, ctx, &swts),
-				KOFF_TIMEOUT);
+				msecs_to_jiffies(rot->koff_timeout));
 
 		ATRACE_INT("sde_rot_done", 0);
 		spin_lock_irqsave(&rot->rotisr_lock, flags);
@@ -1506,13 +1764,15 @@
  * @rot: Pointer to rotator hw
  * @hw: Pointer to rotator resource
  * @session_id: Session identifier of this context
+ * @sbuf_mode: true if stream buffer is requested
  *
  * This function allocates a new rotator context for the given session id.
  */
 static struct sde_hw_rotator_context *sde_hw_rotator_alloc_rotctx(
 		struct sde_hw_rotator *rot,
 		struct sde_rot_hw_resource *hw,
-		u32    session_id)
+		u32    session_id,
+		bool   sbuf_mode)
 {
 	struct sde_hw_rotator_context *ctx;
 
@@ -1530,6 +1790,8 @@
 	ctx->timestamp  = atomic_add_return(1, &rot->timestamp[ctx->q_id]);
 	ctx->timestamp &= SDE_REGDMA_SWTS_MASK;
 	ctx->is_secure  = false;
+	ctx->sbuf_mode  = sbuf_mode;
+	INIT_LIST_HEAD(&ctx->list);
 
 	ctx->regdma_base  = rot->cmd_wr_ptr[ctx->q_id]
 		[sde_hw_rotator_get_regdma_ctxidx(ctx)];
@@ -1547,10 +1809,11 @@
 	sde_hw_rotator_put_ctx(ctx);
 
 	SDEROT_DBG(
-		"New rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d\n",
+		"New rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
 		ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
 		ctx->q_id, ctx->timestamp,
-		atomic_read(&ctx->hwres->num_active));
+		atomic_read(&ctx->hwres->num_active),
+		ctx->sbuf_mode);
 
 	return ctx;
 }
@@ -1567,10 +1830,11 @@
 		return;
 
 	SDEROT_DBG(
-		"Free rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d\n",
+		"Free rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
 		ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
 		ctx->q_id, ctx->timestamp,
-		atomic_read(&ctx->hwres->num_active));
+		atomic_read(&ctx->hwres->num_active),
+		ctx->sbuf_mode);
 
 	/* Clear rotator context from lookup purpose */
 	sde_hw_rotator_clr_ctx(ctx);
@@ -1598,7 +1862,9 @@
 	u32 danger_lut = 0;	/* applicable for realtime client only */
 	u32 safe_lut = 0;	/* applicable for realtime client only */
 	u32 flags = 0;
+	u32 rststs = 0;
 	struct sde_rotation_item *item;
+	int ret;
 
 	if (!hw || !entry) {
 		SDEROT_ERR("null hw resource/entry\n");
@@ -1609,16 +1875,105 @@
 	rot = resinfo->rot;
 	item = &entry->item;
 
-	ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id);
+	ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id,
+			item->output.sbuf);
 	if (!ctx) {
 		SDEROT_ERR("Failed allocating rotator context!!\n");
 		return -EINVAL;
 	}
 
+	/* save entry for debugging purposes */
+	ctx->last_entry = entry;
+
+	if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
+		if (entry->dst_buf.sbuf) {
+			u32 op_mode;
+
+			if (entry->item.trigger ==
+					SDE_ROTATOR_TRIGGER_COMMAND)
+				ctx->start_ctrl = (rot->cmd_trigger << 4);
+			else if (entry->item.trigger ==
+					SDE_ROTATOR_TRIGGER_VIDEO)
+				ctx->start_ctrl = (rot->vid_trigger << 4);
+			else
+				ctx->start_ctrl = 0;
+
+			ctx->sys_cache_mode = BIT(15) |
+					((item->output.scid & 0x1f) << 8) |
+					(item->output.writeback ? 0x5 : 0);
+
+			ctx->op_mode = BIT(4) |
+				((ctx->rot->sbuf_headroom & 0xff) << 8);
+
+			/* detect transition to inline mode */
+			op_mode = (SDE_ROTREG_READ(rot->mdss_base,
+					ROTTOP_OP_MODE) >> 4) & 0x3;
+			if (!op_mode) {
+				u32 status;
+
+				status = SDE_ROTREG_READ(rot->mdss_base,
+						ROTTOP_STATUS);
+				if (status & BIT(0)) {
+					SDEROT_ERR("rotator busy 0x%x\n",
+							status);
+					sde_hw_rotator_dump_status(rot);
+					SDEROT_EVTLOG_TOUT_HANDLER("rot",
+							"vbif_dbg_bus",
+							"panic");
+				}
+			}
+
+		} else {
+			ctx->start_ctrl = BIT(0);
+			ctx->sys_cache_mode = 0;
+			ctx->op_mode = 0;
+		}
+	} else  {
+		ctx->start_ctrl = BIT(0);
+	}
+
+	SDEROT_EVTLOG(ctx->start_ctrl, ctx->sys_cache_mode, ctx->op_mode);
+
+	/*
+	 * if Rotator HW is reset, but missing PM event notification, we
+	 * need to init the SW timestamp automatically.
+	 */
+	rststs = SDE_ROTREG_READ(rot->mdss_base, REGDMA_RESET_STATUS_REG);
+	if (!rot->reset_hw_ts && rststs) {
+		u32 l_ts, h_ts, swts;
+
+		swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
+		h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
+		l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
+		SDEROT_EVTLOG(0xbad0, rststs, swts, h_ts, l_ts);
+
+		if (ctx->q_id == ROT_QUEUE_HIGH_PRIORITY)
+			h_ts = (h_ts - 1) & SDE_REGDMA_SWTS_MASK;
+		else
+			l_ts = (l_ts - 1) & SDE_REGDMA_SWTS_MASK;
+
+		/* construct the combined timstamp */
+		swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
+			((l_ts & SDE_REGDMA_SWTS_MASK) <<
+			 SDE_REGDMA_SWTS_SHIFT);
+
+		SDEROT_DBG("swts:0x%x, h_ts:0x%x, l_ts;0x%x\n",
+				swts, h_ts, l_ts);
+		SDEROT_EVTLOG(0x900d, swts, h_ts, l_ts);
+		rot->last_hw_ts = swts;
+
+		SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
+				rot->last_hw_ts);
+		SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
+		/* ensure write is issued to the rotator HW */
+		wmb();
+	}
+
 	if (rot->reset_hw_ts) {
 		SDEROT_EVTLOG(rot->last_hw_ts);
 		SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
 				rot->last_hw_ts);
+		SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
 		/* ensure write is issued to the rotator HW */
 		wmb();
 		rot->reset_hw_ts = false;
@@ -1645,7 +2000,8 @@
 	sspp_cfg.fmt = sde_get_format_params(item->input.format);
 	if (!sspp_cfg.fmt) {
 		SDEROT_ERR("null format\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto error;
 	}
 	sspp_cfg.src_rect = &item->src_rect;
 	sspp_cfg.data = &entry->src_buf;
@@ -1673,6 +2029,7 @@
 
 	wb_cfg.v_downscale_factor = entry->dnsc_factor_h;
 	wb_cfg.h_downscale_factor = entry->dnsc_factor_w;
+	wb_cfg.prefill_bw = item->prefill_bw;
 
 	rot->ops.setup_rotator_wbengine(ctx, ctx->q_id, &wb_cfg, flags);
 
@@ -1778,6 +2135,10 @@
 			BIT(XIN_WRITEBACK));
 
 	return 0;
+
+error:
+	sde_hw_rotator_free_rotctx(rot, ctx);
+	return ret;
 }
 
 /*
@@ -1887,6 +2248,7 @@
 
 	set_bit(SDE_CAPS_R3_WB, mdata->sde_caps_map);
 
+	/* features exposed via rotator top h/w version */
 	if (hw_version != SDE_ROT_TYPE_V1_0) {
 		SDEROT_DBG("Supporting 1.5 downscale for SDE Rotator\n");
 		set_bit(SDE_CAPS_R3_1P5_DOWNSCALE,  mdata->sde_caps_map);
@@ -1901,6 +2263,29 @@
 	mdata->regdump = sde_rot_r3_regdump;
 	mdata->regdump_size = ARRAY_SIZE(sde_rot_r3_regdump);
 	SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, 0);
+
+	/* features exposed via mdss h/w version */
+	if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version, SDE_MDP_HW_REV_400)) {
+		SDEROT_DBG("Supporting sys cache inline rotation\n");
+		set_bit(SDE_CAPS_MIN_BUS_VOTE,  mdata->sde_caps_map);
+		set_bit(SDE_CAPS_SBUF_1,  mdata->sde_caps_map);
+		set_bit(SDE_CAPS_UBWC_2,  mdata->sde_caps_map);
+		rot->inpixfmts = sde_hw_rotator_v4_inpixfmts;
+		rot->num_inpixfmt = ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
+		rot->outpixfmts = sde_hw_rotator_v4_outpixfmts;
+		rot->num_outpixfmt = ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
+		rot->downscale_caps =
+			"LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
+	} else {
+		rot->inpixfmts = sde_hw_rotator_v3_inpixfmts;
+		rot->num_inpixfmt = ARRAY_SIZE(sde_hw_rotator_v3_inpixfmts);
+		rot->outpixfmts = sde_hw_rotator_v3_outpixfmts;
+		rot->num_outpixfmt = ARRAY_SIZE(sde_hw_rotator_v3_outpixfmts);
+		rot->downscale_caps = (hw_version == SDE_ROT_TYPE_V1_0) ?
+			"LINEAR/2/4/8/16/32/64 TILE/2/4 TP10/2" :
+			"LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
+	}
+
 	return 0;
 }
 
@@ -1989,6 +2374,23 @@
 			SDEROT_ERR("unknown ISR status: isr=0x%X\n", isr);
 			goto done_isr_handle;
 		}
+
+		/*
+		 * Timestamp packet is not available in sbuf mode.
+		 * Simulate timestamp update in the handler instead.
+		 */
+		if (!list_empty(&rot->sbuf_ctx[q_id])) {
+			ctx = list_first_entry_or_null(&rot->sbuf_ctx[q_id],
+					struct sde_hw_rotator_context, list);
+			if (ctx) {
+				ts = ctx->timestamp;
+				sde_hw_rotator_update_swts(rot, ctx, ts);
+				SDEROT_DBG("update swts:0x%X\n", ts);
+			} else {
+				SDEROT_ERR("invalid swts ctx\n");
+			}
+		}
+
 		ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
 
 		/*
@@ -2057,11 +2459,24 @@
 		struct sde_rot_entry *entry)
 {
 	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+	struct sde_hw_rotator *hw_data;
 	int ret = 0;
 	u16 src_w, src_h, dst_w, dst_h;
 	struct sde_rotation_item *item = &entry->item;
 	struct sde_mdp_format_params *fmt;
 
+	if (!mgr || !entry || !mgr->hw_data) {
+		SDEROT_ERR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	hw_data = mgr->hw_data;
+
+	if (hw_data->maxlinewidth < item->src_rect.w) {
+		SDEROT_ERR("invalid src width %u\n", item->src_rect.w);
+		return -EINVAL;
+	}
+
 	src_w = item->src_rect.w;
 	src_h = item->src_rect.h;
 
@@ -2076,6 +2491,12 @@
 	entry->dnsc_factor_w = 0;
 	entry->dnsc_factor_h = 0;
 
+	if (item->output.sbuf &&
+			!test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
+		SDEROT_ERR("stream buffer not supported\n");
+		return -EINVAL;
+	}
+
 	if ((src_w != dst_w) || (src_h != dst_h)) {
 		if ((src_w % dst_w) || (src_h % dst_h)) {
 			SDEROT_DBG("non integral scale not support\n");
@@ -2183,6 +2604,9 @@
 
 	SPRINT("downscale_compression=1\n");
 
+	if (hw_data->downscale_caps)
+		SPRINT("downscale_ratios=%s\n", hw_data->downscale_caps);
+
 #undef SPRINT
 	return cnt;
 }
@@ -2253,14 +2677,23 @@
 static u32 sde_hw_rotator_get_pixfmt(struct sde_rot_mgr *mgr,
 		int index, bool input)
 {
+	struct sde_hw_rotator *rot;
+
+	if (!mgr || !mgr->hw_data) {
+		SDEROT_ERR("null parameters\n");
+		return 0;
+	}
+
+	rot = mgr->hw_data;
+
 	if (input) {
-		if (index < ARRAY_SIZE(sde_hw_rotator_input_pixfmts))
-			return sde_hw_rotator_input_pixfmts[index];
+		if ((index < rot->num_inpixfmt) && rot->inpixfmts)
+			return rot->inpixfmts[index];
 		else
 			return 0;
 	} else {
-		if (index < ARRAY_SIZE(sde_hw_rotator_output_pixfmts))
-			return sde_hw_rotator_output_pixfmts[index];
+		if ((index < rot->num_outpixfmt) && rot->outpixfmts)
+			return rot->outpixfmts[index];
 		else
 			return 0;
 	}
@@ -2275,22 +2708,88 @@
 static int sde_hw_rotator_is_valid_pixfmt(struct sde_rot_mgr *mgr, u32 pixfmt,
 		bool input)
 {
+	struct sde_hw_rotator *rot;
+	u32 *pixfmts;
+	u32 num_pixfmt;
 	int i;
 
-	if (input) {
-		for (i = 0; i < ARRAY_SIZE(sde_hw_rotator_input_pixfmts); i++)
-			if (sde_hw_rotator_input_pixfmts[i] == pixfmt)
-				return true;
-	} else {
-		for (i = 0; i < ARRAY_SIZE(sde_hw_rotator_output_pixfmts); i++)
-			if (sde_hw_rotator_output_pixfmts[i] == pixfmt)
-				return true;
+	if (!mgr || !mgr->hw_data) {
+		SDEROT_ERR("null parameters\n");
+		return false;
 	}
 
+	rot = mgr->hw_data;
+
+	if (input) {
+		pixfmts = rot->inpixfmts;
+		num_pixfmt = rot->num_inpixfmt;
+	} else {
+		pixfmts = rot->outpixfmts;
+		num_pixfmt = rot->num_outpixfmt;
+	}
+
+	if (!pixfmts || !num_pixfmt) {
+		SDEROT_ERR("invalid pixel format tables\n");
+		return false;
+	}
+
+	for (i = 0; i < num_pixfmt; i++)
+		if (pixfmts[i] == pixfmt)
+			return true;
+
 	return false;
 }
 
 /*
+ * sde_hw_rotator_get_downscale_caps - get scaling capability string
+ * @mgr: Pointer to rotator manager
+ * @caps: Pointer to capability string buffer; NULL to return maximum length
+ * @len: length of capability string buffer
+ * return: length of capability string
+ */
+static int sde_hw_rotator_get_downscale_caps(struct sde_rot_mgr *mgr,
+		char *caps, int len)
+{
+	struct sde_hw_rotator *rot;
+	int rc = 0;
+
+	if (!mgr || !mgr->hw_data) {
+		SDEROT_ERR("null parameters\n");
+		return -EINVAL;
+	}
+
+	rot = mgr->hw_data;
+
+	if (rot->downscale_caps) {
+		if (caps)
+			rc = snprintf(caps, len, "%s", rot->downscale_caps);
+		else
+			rc = strlen(rot->downscale_caps);
+	}
+
+	return rc;
+}
+
+/*
+ * sde_hw_rotator_get_maxlinewidth - get maximum line width supported
+ * @mgr: Pointer to rotator manager
+ * return: maximum line width supported by hardware
+ */
+static int sde_hw_rotator_get_maxlinewidth(struct sde_rot_mgr *mgr)
+{
+	struct sde_hw_rotator *rot;
+
+	if (!mgr || !mgr->hw_data) {
+		SDEROT_ERR("null parameters\n");
+		return -EINVAL;
+	}
+
+	rot = mgr->hw_data;
+
+	return rot->maxlinewidth;
+}
+
+/*
  * sde_hw_rotator_parse_dt - parse r3 specific device tree settings
  * @hw_data: Pointer to rotator hw
  * @dev: Pointer to platform device
@@ -2329,6 +2828,46 @@
 		hw_data->highest_bank = data;
 	}
 
+	ret = of_property_read_u32(dev->dev.of_node,
+			"qcom,sde-ubwc-malsize", &data);
+	if (ret) {
+		ret = 0;
+		hw_data->ubwc_malsize = DEFAULT_UBWC_MALSIZE;
+	} else {
+		SDEROT_DBG("set ubwc malsize to %d\n", data);
+		hw_data->ubwc_malsize = data;
+	}
+
+	ret = of_property_read_u32(dev->dev.of_node,
+			"qcom,sde-ubwc_swizzle", &data);
+	if (ret) {
+		ret = 0;
+		hw_data->ubwc_swizzle = DEFAULT_UBWC_SWIZZLE;
+	} else {
+		SDEROT_DBG("set ubwc swizzle to %d\n", data);
+		hw_data->ubwc_swizzle = data;
+	}
+
+	ret = of_property_read_u32(dev->dev.of_node,
+			"qcom,mdss-sbuf-headroom", &data);
+	if (ret) {
+		ret = 0;
+		hw_data->sbuf_headroom = DEFAULT_SBUF_HEADROOM;
+	} else {
+		SDEROT_DBG("set sbuf headroom to %d\n", data);
+		hw_data->sbuf_headroom = data;
+	}
+
+	ret = of_property_read_u32(dev->dev.of_node,
+			"qcom,mdss-rot-linewidth", &data);
+	if (ret) {
+		ret = 0;
+		hw_data->maxlinewidth = DEFAULT_MAXLINEWIDTH;
+	} else {
+		SDEROT_DBG("set mdss-rot-linewidth to %d\n", data);
+		hw_data->maxlinewidth = data;
+	}
+
 	return ret;
 }
 
@@ -2356,6 +2895,9 @@
 
 	rot->mdss_base = mdata->sde_io.base;
 	rot->pdev      = mgr->pdev;
+	rot->koff_timeout = KOFF_TIMEOUT;
+	rot->vid_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
+	rot->cmd_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
 
 	/* Assign ops */
 	mgr->ops_hw_destroy = sde_hw_rotator_destroy;
@@ -2372,6 +2914,8 @@
 	mgr->ops_hw_is_valid_pixfmt = sde_hw_rotator_is_valid_pixfmt;
 	mgr->ops_hw_pre_pmevent = sde_hw_rotator_pre_pmevent;
 	mgr->ops_hw_post_pmevent = sde_hw_rotator_post_pmevent;
+	mgr->ops_hw_get_downscale_caps = sde_hw_rotator_get_downscale_caps;
+	mgr->ops_hw_get_maxlinewidth = sde_hw_rotator_get_maxlinewidth;
 
 	ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
 	if (ret)
@@ -2425,8 +2969,10 @@
 					(i + SDE_HW_ROT_REGDMA_TOTAL_CTX));
 	}
 
-	atomic_set(&rot->timestamp[0], 0);
-	atomic_set(&rot->timestamp[1], 0);
+	for (i = 0; i < ROT_QUEUE_MAX; i++) {
+		atomic_set(&rot->timestamp[i], 0);
+		INIT_LIST_HEAD(&rot->sbuf_ctx[i]);
+	}
 
 	ret = sde_rotator_hw_rev_init(rot);
 	if (ret)
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_debug.c
index 987e61c..da67527 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_debug.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -44,5 +44,41 @@
 		return -EINVAL;
 	}
 
+	if (!debugfs_create_u32("koff_timeout", 0644,
+			debugfs_root, &hw_data->koff_timeout)) {
+		SDEROT_ERR("fail create koff_timeout\n");
+		return -EINVAL;
+	}
+
+	if (!debugfs_create_u32("vid_trigger", 0644,
+			debugfs_root, &hw_data->vid_trigger)) {
+		SDEROT_ERR("fail create vid_trigger\n");
+		return -EINVAL;
+	}
+
+	if (!debugfs_create_u32("cmd_trigger", 0644,
+			debugfs_root, &hw_data->cmd_trigger)) {
+		SDEROT_ERR("fail create cmd_trigger\n");
+		return -EINVAL;
+	}
+
+	if (!debugfs_create_u32("sbuf_headroom", 0644,
+			debugfs_root, &hw_data->sbuf_headroom)) {
+		SDEROT_ERR("fail create sbuf_headroom\n");
+		return -EINVAL;
+	}
+
+	if (!debugfs_create_u32("solid_fill", 0644,
+			debugfs_root, &hw_data->solid_fill)) {
+		SDEROT_ERR("fail create solid_fill\n");
+		return -EINVAL;
+	}
+
+	if (!debugfs_create_u32("constant_color", 0644,
+			debugfs_root, &hw_data->constant_color)) {
+		SDEROT_ERR("fail create constant_color\n");
+		return -EINVAL;
+	}
+
 	return 0;
 }
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
index fedade1..aa762dd 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
@@ -45,6 +45,11 @@
 #define ROTTOP_ROT_UBWC_DEC_VERSION             (SDE_ROT_ROTTOP_OFFSET+0x58)
 #define ROTTOP_ROT_UBWC_ENC_VERSION             (SDE_ROT_ROTTOP_OFFSET+0x5C)
 
+#define ROTTOP_START_CTRL_TRIG_SEL_SW           0
+#define ROTTOP_START_CTRL_TRIG_SEL_DONE         1
+#define ROTTOP_START_CTRL_TRIG_SEL_REGDMA       2
+#define ROTTOP_START_CTRL_TRIG_SEL_MDP          3
+
 /* SDE_ROT_SSPP:
  * OFFSET=0x0A8900
  */
@@ -65,6 +70,7 @@
 #define ROT_SSPP_SRC_UNPACK_PATTERN             (SDE_ROT_SSPP_OFFSET+0x34)
 #define ROT_SSPP_SRC_OP_MODE                    (SDE_ROT_SSPP_OFFSET+0x38)
 #define ROT_SSPP_SRC_CONSTANT_COLOR             (SDE_ROT_SSPP_OFFSET+0x3C)
+#define ROT_SSPP_UBWC_STATIC_CTRL               (SDE_ROT_SSPP_OFFSET+0x44)
 #define ROT_SSPP_FETCH_CONFIG                   (SDE_ROT_SSPP_OFFSET+0x48)
 #define ROT_SSPP_VC1_RANGE                      (SDE_ROT_SSPP_OFFSET+0x4C)
 #define ROT_SSPP_REQPRIORITY_FIFO_WATERMARK_0   (SDE_ROT_SSPP_OFFSET+0x50)
@@ -160,6 +166,10 @@
 #define ROT_WB_SAFE_LUT                         (SDE_ROT_WB_OFFSET+0x088)
 #define ROT_WB_CREQ_LUT                         (SDE_ROT_WB_OFFSET+0x08C)
 #define ROT_WB_QOS_CTRL                         (SDE_ROT_WB_OFFSET+0x090)
+#define ROT_WB_SYS_CACHE_MODE                   (SDE_ROT_WB_OFFSET+0x094)
+#define ROT_WB_UBWC_STATIC_CTRL                 (SDE_ROT_WB_OFFSET+0x144)
+#define ROT_WB_SBUF_STATUS_PLANE0               (SDE_ROT_WB_OFFSET+0x148)
+#define ROT_WB_SBUF_STATUS_PLANE1               (SDE_ROT_WB_OFFSET+0x14C)
 #define ROT_WB_CSC_MATRIX_COEFF_0               (SDE_ROT_WB_OFFSET+0x260)
 #define ROT_WB_CSC_MATRIX_COEFF_1               (SDE_ROT_WB_OFFSET+0x264)
 #define ROT_WB_CSC_MATRIX_COEFF_2               (SDE_ROT_WB_OFFSET+0x268)
@@ -251,6 +261,10 @@
 /* REGDMA ADDR offset Mask */
 #define REGDMA_ADDR_OFFSET_MASK         0xFFFFF
 
+/* REGDMA command trigger select */
+#define REGDMA_CMD_TRIG_SEL_SW_START    (0 << 27)
+#define REGDMA_CMD_TRIG_SEL_MDP_FLUSH   (1 << 27)
+
 /* General defines */
 #define ROT_DONE_MASK                   0x1
 #define ROT_DONE_CLEAR                  0x1
@@ -277,5 +291,6 @@
 #define REGDMA_INT_LOW_MASK             0x00000700
 #define REGDMA_INT_ERR_MASK             0x000F0000
 #define REGDMA_TIMESTAMP_REG            ROT_SSPP_TPG_PATTERN_GEN_INIT_VAL
+#define REGDMA_RESET_STATUS_REG         ROT_SSPP_TPG_RGB_MAPPING
 
 #endif /*_SDE_ROTATOR_R3_HWIO_H */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
index 5502cc0..22eaa3f 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
@@ -85,6 +85,7 @@
  *  @dest_rect: dest ROI, caller takes into account the different operations
  *              such as decimation, flip etc to program this field
  *  @addr:      destination surface address
+ *  @prefill_bw: prefill bandwidth in Bps
  */
 struct sde_hw_rot_wb_cfg {
 	struct sde_mdp_format_params   *fmt;
@@ -97,6 +98,7 @@
 	u32                             h_downscale_factor;
 	u32                             fps;
 	u64                             bw;
+	u64                             prefill_bw;
 };
 
 
@@ -200,9 +202,16 @@
  * struct sde_hw_rotator_context : Each rotator context ties to each priority
  * queue. Max number of concurrent contexts in regdma is limited to regdma
  * ram segment size allocation. Each rotator context can be any priority. A
- * incrementatl timestamp is used to identify and assigne to each context.
+ * incremental timestamp is used to identify and assigned to each context.
+ * @list: list of pending context
+ * @sbuf_mode: true if stream buffer is requested
+ * @start_ctrl: start control register update value
+ * @sys_cache_mode: sys cache mode register update value
+ * @op_mode: rot top op mode selection
+ * @last_entry: pointer to last configured entry (for debugging purposes)
  */
 struct sde_hw_rotator_context {
+	struct list_head list;
 	struct sde_hw_rotator *rot;
 	struct sde_rot_hw_resource *hwres;
 	enum   sde_rot_queue_prio q_id;
@@ -219,6 +228,11 @@
 	dma_addr_t ts_addr;
 	bool   is_secure;
 	bool   is_traffic_shaping;
+	bool   sbuf_mode;
+	u32    start_ctrl;
+	u32    sys_cache_mode;
+	u32    op_mode;
+	struct sde_rot_entry *last_entry;
 };
 
 /**
@@ -234,6 +248,21 @@
  * struct sde_hw_rotator : Rotator description
  * @hw:           mdp register mapped offset
  * @ops:          pointer to operations possible for the rotator HW
+ * @highest_bank: highest bank size of memory
+ * @ubwc_malsize: ubwc minimum allowable length
+ * @ubwc_swizzle: ubwc swizzle enable
+ * @sbuf_headroom: stream buffer headroom in lines
+ * @solid_fill: true if solid fill is requested
+ * @constant_color: solid fill constant color
+ * @sbuf_ctx: list of active sbuf context in FIFO order
+ * @vid_trigger: video mode trigger select
+ * @cmd_trigger: command mode trigger select
+ * @inpixfmts: array of supported input pixel formats forucc
+ * @num_inpixfmt: size of the supported input pixel format array
+ * @outpixfmts: array of supported output pixel formats in fourcc
+ * @num_outpixfmt: size of the supported output pixel formats array
+ * @downscale_caps: capability string of scaling
+ * @maxlinewidth: maximum line width supported
  */
 struct sde_hw_rotator {
 	/* base */
@@ -271,6 +300,11 @@
 	void *swts_buffer;
 
 	u32    highest_bank;
+	u32    ubwc_malsize;
+	u32    ubwc_swizzle;
+	u32    sbuf_headroom;
+	u32    solid_fill;
+	u32    constant_color;
 
 	spinlock_t rotctx_lock;
 	spinlock_t rotisr_lock;
@@ -278,6 +312,18 @@
 	bool    dbgmem;
 	bool reset_hw_ts;
 	u32 last_hw_ts;
+	u32 koff_timeout;
+	u32 vid_trigger;
+	u32 cmd_trigger;
+
+	struct list_head sbuf_ctx[ROT_QUEUE_MAX];
+
+	u32 *inpixfmts;
+	u32 num_inpixfmt;
+	u32 *outpixfmts;
+	u32 num_outpixfmt;
+	const char *downscale_caps;
+	u32 maxlinewidth;
 };
 
 /**
@@ -349,15 +395,17 @@
  */
 static inline void sde_hw_rotator_put_ctx(struct sde_hw_rotator_context *ctx)
 {
-	 struct sde_hw_rotator *rot = ctx->rot;
-	 u32 idx = sde_hw_rotator_get_regdma_ctxidx(ctx);
-	 unsigned long flags;
+	struct sde_hw_rotator *rot = ctx->rot;
+	u32 idx = sde_hw_rotator_get_regdma_ctxidx(ctx);
+	unsigned long flags;
 
-	 spin_lock_irqsave(&rot->rotisr_lock, flags);
-	 rot->rotCtx[ctx->q_id][idx] = ctx;
-	 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
+	spin_lock_irqsave(&rot->rotisr_lock, flags);
+	rot->rotCtx[ctx->q_id][idx] = ctx;
+	if (ctx->sbuf_mode)
+		list_add_tail(&rot->sbuf_ctx[ctx->q_id], &ctx->list);
+	spin_unlock_irqrestore(&rot->rotisr_lock, flags);
 
-	 SDEROT_DBG("rotCtx[%d][%d] <== ctx:%p | session-id:%d\n",
+	SDEROT_DBG("rotCtx[%d][%d] <== ctx:%p | session-id:%d\n",
 			 ctx->q_id, idx, ctx, ctx->session_id);
 }
 
@@ -367,15 +415,17 @@
  */
 static inline void sde_hw_rotator_clr_ctx(struct sde_hw_rotator_context *ctx)
 {
-	 struct sde_hw_rotator *rot = ctx->rot;
-	 u32 idx = sde_hw_rotator_get_regdma_ctxidx(ctx);
-	 unsigned long flags;
+	struct sde_hw_rotator *rot = ctx->rot;
+	u32 idx = sde_hw_rotator_get_regdma_ctxidx(ctx);
+	unsigned long flags;
 
-	 spin_lock_irqsave(&rot->rotisr_lock, flags);
-	 rot->rotCtx[ctx->q_id][idx] = NULL;
-	 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
+	spin_lock_irqsave(&rot->rotisr_lock, flags);
+	rot->rotCtx[ctx->q_id][idx] = NULL;
+	if (ctx->sbuf_mode)
+		list_del_init(&ctx->list);
+	spin_unlock_irqrestore(&rot->rotisr_lock, flags);
 
-	 SDEROT_DBG("rotCtx[%d][%d] <== null | session-id:%d\n",
+	SDEROT_DBG("rotCtx[%d][%d] <== null | session-id:%d\n",
 			 ctx->q_id, idx, ctx->session_id);
 }
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
index 4f6386b..e209192 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
@@ -471,11 +471,18 @@
 
 	sde_smmu = (struct sde_smmu_client *)token;
 
-	/* trigger rotator panic and dump */
-	SDEROT_ERR("trigger rotator panic and dump, iova=0x%08lx\n", iova);
+	/* trigger rotator dump */
+	SDEROT_ERR("trigger rotator dump, iova=0x%08lx, flags=0x%x\n",
+			iova, flags);
+	SDEROT_ERR("SMMU device:%s", sde_smmu->dev->kobj.name);
 
-	sde_rot_dump_panic();
+	/* generate dump, but no panic */
+	sde_rot_evtlog_tout_handler(false, __func__, "rot", "vbif_dbg_bus");
 
+	/*
+	 * return -ENOSYS to allow smmu driver to dump out useful
+	 * debug info.
+	 */
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
index 8fb027d..9ef4282 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
@@ -210,6 +210,32 @@
 		ps->ystride[3] = ALIGN(DIV_ROUND_UP(width / 2, 16), 64);
 		ps->plane_size[3] = ALIGN(ps->ystride[3] *
 			ALIGN(DIV_ROUND_UP(height / 2, 8), 16), 4096);
+	} else if (sde_mdp_is_p010_format(fmt)) {
+		ps->num_planes = 2;
+		/* Y bitstream stride and plane size */
+		ps->ystride[0] = ALIGN(width * 2, 256);
+		ps->plane_size[0] = ALIGN(ps->ystride[0] * ALIGN(height, 16),
+					4096);
+
+		/* CbCr bitstream stride and plane size */
+		ps->ystride[1] = ALIGN(width * 2, 256);
+		ps->plane_size[1] = ALIGN(ps->ystride[1] *
+			ALIGN(height / 2, 16), 4096);
+
+		if (!sde_mdp_is_ubwc_format(fmt))
+			goto done;
+
+		ps->num_planes += 2;
+
+		/* Y meta data stride and plane size */
+		ps->ystride[2] = ALIGN(DIV_ROUND_UP(width, 32), 64);
+		ps->plane_size[2] = ALIGN(ps->ystride[2] *
+			ALIGN(DIV_ROUND_UP(height, 4), 16), 4096);
+
+		/* CbCr meta data stride and plane size */
+		ps->ystride[3] = ALIGN(DIV_ROUND_UP(width / 2, 16), 64);
+		ps->plane_size[3] = ALIGN(ps->ystride[3] *
+			ALIGN(DIV_ROUND_UP(height / 2, 4), 16), 4096);
 	} else if (sde_mdp_is_tp10_format(fmt)) {
 		u32 yWidth   = sde_mdp_general_align(width, 192);
 		u32 yHeight  = ALIGN(height, 16);
@@ -735,6 +761,12 @@
 {
 	u32 domain;
 
+	if (data->flags & SDE_ROT_EXT_IOVA) {
+		SDEROT_DBG("buffer %pad/%lx is client mapped\n",
+				&data->addr, data->len);
+		return 0;
+	}
+
 	if (!IS_ERR_OR_NULL(data->srcp_dma_buf)) {
 		SDEROT_DBG("ion hdl=%p buf=0x%pa\n", data->srcp_dma_buf,
 							&data->addr);
@@ -787,9 +819,14 @@
 	len = &data->len;
 	data->flags |= img->flags;
 	data->offset = img->offset;
-	if (data->flags & SDE_ROT_EXT_DMA_BUF)
+	if (data->flags & SDE_ROT_EXT_DMA_BUF) {
 		data->srcp_dma_buf = img->buffer;
-	else if (IS_ERR(data->srcp_dma_buf)) {
+	} else if (data->flags & SDE_ROT_EXT_IOVA) {
+		data->addr = img->addr;
+		data->len = img->len;
+		SDEROT_DBG("use client %pad/%lx\n", &data->addr, data->len);
+		return 0;
+	} else if (IS_ERR(data->srcp_dma_buf)) {
 		SDEROT_ERR("error on ion_import_fd\n");
 		ret = PTR_ERR(data->srcp_dma_buf);
 		data->srcp_dma_buf = NULL;
@@ -891,6 +928,12 @@
 	if (data->addr && data->len)
 		return 0;
 
+	if (data->flags & SDE_ROT_EXT_IOVA) {
+		SDEROT_DBG("buffer %pad/%lx is client mapped\n",
+				&data->addr, data->len);
+		return 0;
+	}
+
 	if (!IS_ERR_OR_NULL(data->srcp_dma_buf)) {
 		if (sde_mdp_is_map_needed(data)) {
 			domain = sde_smmu_get_domain_type(data->flags,
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.h
index 3f94a15..cc367cd 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -66,6 +66,7 @@
 #define SDE_SECURE_OVERLAY_SESSION	0x00008000
 #define SDE_ROT_EXT_DMA_BUF		0x00010000
 #define SDE_SECURE_CAMERA_SESSION	0x00020000
+#define SDE_ROT_EXT_IOVA			0x00040000
 
 struct sde_rot_data_type;
 
@@ -77,7 +78,8 @@
 	int id;
 	uint32_t flags;
 	uint32_t priv;
-	uint32_t iova;
+	dma_addr_t addr;
+	u32 len;
 };
 
 struct sde_layer_plane {
@@ -86,6 +88,10 @@
 	struct dma_buf *buffer;
 	struct ion_handle *handle;
 
+	/* i/o virtual address & length */
+	dma_addr_t addr;
+	u32 len;
+
 	/* Pixel offset in the dma buffer. */
 	uint32_t offset;
 
@@ -127,6 +133,15 @@
 	 * for new content.
 	 */
 	struct sde_rot_sync_fence *fence;
+
+	/* indicate if this is a stream (inline) buffer */
+	bool sbuf;
+
+	/* specify the system cache id in stream buffer mode */
+	int scid;
+
+	/* indicate if system cache writeback is required */
+	bool writeback;
 };
 
 struct sde_mdp_plane_sizes {
@@ -151,22 +166,12 @@
 	struct sg_table *srcp_table;
 };
 
-enum sde_data_state {
-	SDE_BUF_STATE_UNUSED,
-	SDE_BUF_STATE_READY,
-	SDE_BUF_STATE_ACTIVE,
-	SDE_BUF_STATE_CLEANUP,
-};
-
 struct sde_mdp_data {
-	enum sde_data_state state;
 	u8 num_planes;
 	struct sde_mdp_img_data p[SDE_ROT_MAX_PLANES];
-	struct list_head buf_list;
-	struct list_head pipe_list;
-	struct list_head chunk_list;
-	u64 last_alloc;
-	u64 last_freed;
+	bool sbuf;
+	int scid;
+	bool writeback;
 };
 
 void sde_mdp_get_v_h_subsample_rate(u8 chroma_sample,
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 6b3ddfa..87a4ac8 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -33,7 +33,6 @@
 	[ilog2(HAL_H264_PROFILE_CONSTRAINED_HIGH)] =
 		HFI_H264_PROFILE_CONSTRAINED_HIGH,
 	[ilog2(HAL_VPX_PROFILE_VERSION_1)] = HFI_VPX_PROFILE_VERSION_1,
-	[ilog2(HAL_MVC_PROFILE_STEREO_HIGH)] = HFI_H264_PROFILE_STEREO_HIGH,
 };
 
 static int entropy_mode[] = {
@@ -68,13 +67,10 @@
 	[ilog2(HAL_COLOR_FORMAT_BGR565)] = HFI_COLOR_FORMAT_BGR565,
 	[ilog2(HAL_COLOR_FORMAT_RGB888)] = HFI_COLOR_FORMAT_RGB888,
 	[ilog2(HAL_COLOR_FORMAT_BGR888)] = HFI_COLOR_FORMAT_BGR888,
-	[ilog2(HAL_COLOR_FORMAT_RGBA8888)] = HFI_COLOR_FORMAT_RGBA8888,
 	/* UBWC Color formats*/
 	[ilog2(HAL_COLOR_FORMAT_NV12_UBWC)] =  HFI_COLOR_FORMAT_NV12_UBWC,
 	[ilog2(HAL_COLOR_FORMAT_NV12_TP10_UBWC)] =
 			HFI_COLOR_FORMAT_YUV420_TP10_UBWC,
-	[ilog2(HAL_COLOR_FORMAT_RGBA8888_UBWC)] =
-			HFI_COLOR_FORMAT_RGBA8888_UBWC,
 };
 
 static int nal_type[] = {
@@ -126,26 +122,6 @@
 	}
 }
 
-u32 get_hfi_layout(enum hal_buffer_layout_type hal_buf_layout)
-{
-	u32 hfi_layout;
-
-	switch (hal_buf_layout) {
-	case HAL_BUFFER_LAYOUT_TOP_BOTTOM:
-		hfi_layout = HFI_MVC_BUFFER_LAYOUT_TOP_BOTTOM;
-		break;
-	case HAL_BUFFER_LAYOUT_SEQ:
-		hfi_layout = HFI_MVC_BUFFER_LAYOUT_SEQ;
-		break;
-	default:
-		dprintk(VIDC_ERR, "Invalid buffer layout: %#x\n",
-			hal_buf_layout);
-		hfi_layout = HFI_MVC_BUFFER_LAYOUT_SEQ;
-		break;
-	}
-	return hfi_layout;
-}
-
 enum hal_domain vidc_get_hal_domain(u32 hfi_domain)
 {
 	enum hal_domain hal_domain = 0;
@@ -192,9 +168,6 @@
 	case HFI_VIDEO_CODEC_VP9:
 		hal_codec = HAL_VIDEO_CODEC_VP9;
 		break;
-	case HFI_VIDEO_CODEC_HEVC_HYBRID:
-		hal_codec = HAL_VIDEO_CODEC_HEVC_HYBRID;
-		break;
 	default:
 		dprintk(VIDC_INFO, "%s: invalid codec 0x%x\n",
 			__func__, hfi_codec);
@@ -233,7 +206,6 @@
 	u32 hfi_codec = 0;
 
 	switch (hal_codec) {
-	case HAL_VIDEO_CODEC_MVC:
 	case HAL_VIDEO_CODEC_H264:
 		hfi_codec = HFI_VIDEO_CODEC_H264;
 		break;
@@ -252,9 +224,6 @@
 	case HAL_VIDEO_CODEC_VP9:
 		hfi_codec = HFI_VIDEO_CODEC_VP9;
 		break;
-	case HAL_VIDEO_CODEC_HEVC_HYBRID:
-		hfi_codec = HFI_VIDEO_CODEC_HEVC_HYBRID;
-		break;
 	default:
 		dprintk(VIDC_INFO, "%s: invalid codec 0x%x\n",
 			__func__, hal_codec);
@@ -555,12 +524,6 @@
 	case HAL_EXTRADATA_INTERLACE_VIDEO:
 		ret = HFI_PROPERTY_PARAM_VDEC_INTERLACE_VIDEO_EXTRADATA;
 		break;
-	case HAL_EXTRADATA_VC1_FRAMEDISP:
-		ret = HFI_PROPERTY_PARAM_VDEC_VC1_FRAMEDISP_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_VC1_SEQDISP:
-		ret = HFI_PROPERTY_PARAM_VDEC_VC1_SEQDISP_EXTRADATA;
-		break;
 	case HAL_EXTRADATA_TIMESTAMP:
 		ret = HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA;
 		break;
@@ -673,9 +636,6 @@
 	case HAL_LTR_MODE_MANUAL:
 		ltrmode = HFI_LTR_MODE_MANUAL;
 		break;
-	case HAL_LTR_MODE_PERIODIC:
-		ltrmode = HFI_LTR_MODE_PERIODIC;
-		break;
 	default:
 		dprintk(VIDC_ERR, "Invalid ltr mode: %#x\n",
 			ltr_mode_type);
@@ -939,31 +899,10 @@
 		struct hfi_cmd_session_get_property_packet *pkt,
 		struct hal_session *session, enum hal_property ptype)
 {
-	int rc = 0;
-
-	if (!pkt || !session) {
-		dprintk(VIDC_ERR, "%s Invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-	pkt->size = sizeof(struct hfi_cmd_session_get_property_packet);
-	pkt->packet_type = HFI_CMD_SESSION_GET_PROPERTY;
-	pkt->session_id = hash32_ptr(session);
-	pkt->num_properties = 1;
-	switch (ptype) {
-	case HAL_CONFIG_VDEC_ENTROPY:
-		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_VDEC_ENTROPY;
-		break;
-	case HAL_PARAM_PROFILE_LEVEL_CURRENT:
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
-		break;
-	default:
-		dprintk(VIDC_ERR, "%s cmd:%#x not supported\n", __func__,
+	/* Currently no get property is supported */
+	dprintk(VIDC_ERR, "%s cmd:%#x not supported\n", __func__,
 			ptype);
-		rc = -EINVAL;
-		break;
-	}
-	return rc;
+	return -EINVAL;
 }
 
 int create_pkt_cmd_session_set_property(
@@ -1028,8 +967,6 @@
 		break;
 	case HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO:
 		break;
-	case HAL_PARAM_EXTRA_DATA_HEADER_CONFIG:
-		break;
 	case HAL_PARAM_FRAME_SIZE:
 	{
 		struct hfi_frame_size *hfi;
@@ -1142,14 +1079,6 @@
 		pkt->size += sizeof(u32) * 2;
 		break;
 	}
-	case HAL_CONFIG_VDEC_POST_LOOP_DEBLOCKER:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(u32) * 2;
-		break;
-	}
 	case HAL_PARAM_VDEC_MULTI_STREAM:
 	{
 		struct hfi_multi_stream *hfi;
@@ -1199,10 +1128,6 @@
 			HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME;
 		pkt->size += sizeof(u32);
 		break;
-	case HAL_PARAM_VENC_MPEG4_SHORT_HEADER:
-		break;
-	case HAL_PARAM_VENC_MPEG4_AC_PREDICTION:
-		break;
 	case HAL_CONFIG_VENC_TARGET_BITRATE:
 	{
 		struct hfi_bitrate *hfi;
@@ -1225,6 +1150,10 @@
 			HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
 		hfi = (struct hfi_profile_level *)
 			&pkt->rg_property_data[1];
+
+		/* There is an assumption here that HAL level is same as
+		 * HFI level
+		 */
 		hfi->level = prop->level;
 		hfi->profile = hal_to_hfi_type(HAL_PARAM_PROFILE_LEVEL_CURRENT,
 				prop->profile);
@@ -1235,13 +1164,6 @@
 					prop->profile);
 		}
 
-		if (!hfi->level) {
-			hfi->level = 1;
-			dprintk(VIDC_WARN,
-					"Level %d not supported, falling back to high\n",
-					prop->level);
-		}
-
 		pkt->size += sizeof(u32) + sizeof(struct hfi_profile_level);
 		break;
 	}
@@ -1481,21 +1403,13 @@
 		case HAL_INTRA_REFRESH_NONE:
 			hfi->mode = HFI_INTRA_REFRESH_NONE;
 			break;
-		case HAL_INTRA_REFRESH_ADAPTIVE:
-			hfi->mode = HFI_INTRA_REFRESH_ADAPTIVE;
-			hfi->mbs = prop->air_mbs;
-			break;
 		case HAL_INTRA_REFRESH_CYCLIC:
 			hfi->mode = HFI_INTRA_REFRESH_CYCLIC;
-			hfi->mbs = prop->cir_mbs;
-			break;
-		case HAL_INTRA_REFRESH_CYCLIC_ADAPTIVE:
-			hfi->mode = HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE;
-			hfi->mbs = prop->air_mbs;
+			hfi->mbs = prop->ir_mbs;
 			break;
 		case HAL_INTRA_REFRESH_RANDOM:
 			hfi->mode = HFI_INTRA_REFRESH_RANDOM;
-			hfi->mbs = prop->air_mbs;
+			hfi->mbs = prop->ir_mbs;
 			break;
 		default:
 			dprintk(VIDC_ERR,
@@ -1601,14 +1515,6 @@
 		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
 		break;
 	}
-	case HAL_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
-		break;
-	}
 	case HAL_PARAM_VENC_PRESERVE_TEXT_QUALITY:
 	{
 		create_pkt_enable(pkt->rg_property_data,
@@ -1617,21 +1523,6 @@
 		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
 		break;
 	}
-	case HAL_PARAM_MVC_BUFFER_LAYOUT:
-	{
-		struct hfi_mvc_buffer_layout_descp_type *hfi;
-		struct hal_mvc_buffer_layout *layout_info = pdata;
-
-		pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT;
-		hfi = (struct hfi_mvc_buffer_layout_descp_type *)
-			&pkt->rg_property_data[1];
-		hfi->layout_type = get_hfi_layout(layout_info->layout_type);
-		hfi->bright_view_first = layout_info->bright_view_first;
-		hfi->ngap = layout_info->ngap;
-		pkt->size += sizeof(u32) +
-			sizeof(struct hfi_mvc_buffer_layout_descp_type);
-		break;
-	}
 	case HAL_PARAM_VENC_LTRMODE:
 	{
 		struct hfi_ltr_mode *hfi;
@@ -1742,14 +1633,6 @@
 		pkt->size += sizeof(u32) * 2;
 		break;
 	}
-	case HAL_PARAM_VENC_HIER_B_MAX_ENH_LAYERS:
-	{
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_HIER_B_MAX_NUM_ENH_LAYER;
-		pkt->rg_property_data[1] = *(u32 *)pdata;
-		pkt->size += sizeof(u32) * 2;
-		break;
-	}
 	case HAL_PARAM_VENC_HIER_P_HYBRID_MODE:
 	{
 		pkt->rg_property_data[0] =
@@ -1948,7 +1831,6 @@
 	case HAL_PARAM_VDEC_MB_QUANTIZATION:
 	case HAL_PARAM_VDEC_NUM_CONCEALED_MB:
 	case HAL_PARAM_VDEC_H264_ENTROPY_SWITCHING:
-	case HAL_PARAM_VENC_MPEG4_DATA_PARTITIONING:
 	case HAL_CONFIG_BUFFER_COUNT_ACTUAL:
 	case HAL_CONFIG_VDEC_MULTI_STREAM:
 	case HAL_PARAM_VENC_MULTI_SLICE_INFO:
diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c
index a949c55..19a1e3f 100644
--- a/drivers/media/platform/msm/vidc/msm_smem.c
+++ b/drivers/media/platform/msm/vidc/msm_smem.c
@@ -202,8 +202,8 @@
 	unsigned long align = SZ_4K;
 	unsigned long ion_flags = 0;
 
-#ifndef CONFIG_ION
-	hndl = ion_import_dma_buf(client->clnt, fd);
+#ifdef CONFIG_ION
+	hndl = ion_import_dma_buf_fd(client->clnt, fd);
 #endif
 	dprintk(VIDC_DBG, "%s ion handle: %pK\n", __func__, hndl);
 	if (IS_ERR_OR_NULL(hndl)) {
@@ -476,8 +476,8 @@
 			clt, priv);
 		return false;
 	}
-#ifndef CONFIG_ION
-	handle = ion_import_dma_buf(client->clnt, fd);
+#ifdef CONFIG_ION
+	handle = ion_import_dma_buf_fd(client->clnt, fd);
 #endif
 	ret = handle == priv;
 	(!IS_ERR_OR_NULL(handle)) ? ion_free(client->clnt, handle) : 0;
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
index 2db245e..c82db74 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
@@ -73,11 +73,6 @@
 
 	trace_msm_v4l2_vidc_close_start("msm_v4l2_close start");
 	vidc_inst = get_vidc_inst(filp, NULL);
-	rc = msm_vidc_release_buffers(vidc_inst,
-			V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
-	if (rc)
-		dprintk(VIDC_WARN,
-			"Failed in %s for release output buffers\n", __func__);
 
 	rc = msm_vidc_close(vidc_inst);
 	trace_msm_v4l2_vidc_close_end("msm_v4l2_close end");
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index fd6e681..7c99e90 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -26,13 +26,6 @@
 #define MAX_OPERATING_FRAME_RATE (300 << 16)
 #define OPERATING_FRAME_RATE_STEP (1 << 16)
 
-static const char *const mpeg_video_vidc_divx_format[] = {
-	"DIVX Format 3",
-	"DIVX Format 4",
-	"DIVX Format 5",
-	"DIVX Format 6",
-	NULL
-};
 static const char *const mpeg_video_stream_format[] = {
 	"NAL Format Start Codes",
 	"NAL Format One NAL Per Buffer",
@@ -57,29 +50,6 @@
 	"Turbo"
 };
 
-static const char *const h263_level[] = {
-	"1.0",
-	"2.0",
-	"3.0",
-	"4.0",
-	"4.5",
-	"5.0",
-	"6.0",
-	"7.0",
-};
-
-static const char *const h263_profile[] = {
-	"Baseline",
-	"H320 Coding",
-	"Backward Compatible",
-	"ISWV2",
-	"ISWV3",
-	"High Compression",
-	"Internet",
-	"Interlace",
-	"High Latency",
-};
-
 static const char *const vp8_profile_level[] = {
 	"Unused",
 	"0.0",
@@ -108,11 +78,6 @@
 	"CABAC Entropy Mode",
 };
 
-static const char *const mpeg_vidc_video_h264_mvc_layout[] = {
-	"Frame packing arrangement sequential",
-	"Frame packing arrangement top-bottom",
-};
-
 static const char *const mpeg_vidc_video_dpb_color_format[] = {
 	"DPB Color Format None",
 	"DPB Color Format UBWC",
@@ -462,37 +427,6 @@
 	return frame_size;
 }
 
-static int is_ctrl_valid_for_codec(struct msm_vidc_inst *inst,
-					struct v4l2_ctrl *ctrl)
-{
-	int rc = 0;
-
-	switch (ctrl->id) {
-	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
-		if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_H264_MVC &&
-			ctrl->val != V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH) {
-			dprintk(VIDC_ERR,
-					"Profile %#x not supported for MVC\n",
-					ctrl->val);
-			rc = -ENOTSUPP;
-			break;
-		}
-		break;
-	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
-		if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_H264_MVC &&
-			ctrl->val >= V4L2_MPEG_VIDEO_H264_LEVEL_5_2) {
-			dprintk(VIDC_ERR, "Level %#x not supported for MVC\n",
-					ctrl->val);
-			rc = -ENOTSUPP;
-			break;
-		}
-		break;
-	default:
-		break;
-	}
-	return rc;
-}
-
 struct msm_vidc_format vdec_formats[] = {
 	{
 		.name = "YCbCr Semiplanar 4:2:0",
@@ -516,14 +450,6 @@
 		.type = CAPTURE_PORT,
 	},
 	{
-		.name = "Mpeg4",
-		.description = "Mpeg4 compressed format",
-		.fourcc = V4L2_PIX_FMT_MPEG4,
-		.get_frame_size = get_frame_size_compressed,
-		.type = OUTPUT_PORT,
-		.defer_outputs = false,
-	},
-	{
 		.name = "Mpeg2",
 		.description = "Mpeg2 compressed format",
 		.fourcc = V4L2_PIX_FMT_MPEG2,
@@ -532,30 +458,6 @@
 		.defer_outputs = false,
 	},
 	{
-		.name = "H263",
-		.description = "H263 compressed format",
-		.fourcc = V4L2_PIX_FMT_H263,
-		.get_frame_size = get_frame_size_compressed,
-		.type = OUTPUT_PORT,
-		.defer_outputs = false,
-	},
-	{
-		.name = "VC1",
-		.description = "VC-1 compressed format",
-		.fourcc = V4L2_PIX_FMT_VC1_ANNEX_G,
-		.get_frame_size = get_frame_size_compressed,
-		.type = OUTPUT_PORT,
-		.defer_outputs = false,
-	},
-	{
-		.name = "VC1 SP",
-		.description = "VC-1 compressed format G",
-		.fourcc = V4L2_PIX_FMT_VC1_ANNEX_L,
-		.get_frame_size = get_frame_size_compressed,
-		.type = OUTPUT_PORT,
-		.defer_outputs = false,
-	},
-	{
 		.name = "H264",
 		.description = "H264 compressed format",
 		.fourcc = V4L2_PIX_FMT_H264,
@@ -564,14 +466,6 @@
 		.defer_outputs = false,
 	},
 	{
-		.name = "H264_MVC",
-		.description = "H264_MVC compressed format",
-		.fourcc = V4L2_PIX_FMT_H264_MVC,
-		.get_frame_size = get_frame_size_compressed,
-		.type = OUTPUT_PORT,
-		.defer_outputs = false,
-	},
-	{
 		.name = "HEVC",
 		.description = "HEVC compressed format",
 		.fourcc = V4L2_PIX_FMT_HEVC,
@@ -826,10 +720,6 @@
 	}
 	hdev = inst->core->device;
 
-	rc = is_ctrl_valid_for_codec(inst, ctrl);
-	if (rc)
-		return rc;
-
 	/* Small helper macro for quickly getting a control and err checking */
 #define TRY_GET_CTRL(__ctrl_id) ({ \
 		struct v4l2_ctrl *__temp; \
@@ -883,6 +773,7 @@
 		property_id = HAL_PARAM_VDEC_SYNC_FRAME_DECODE;
 		hal_property.enable = ctrl->val;
 		pdata = &hal_property;
+		msm_dcvs_try_enable(inst);
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_SECURE:
 		inst->flags |= VIDC_SECURE;
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 58a9bf8..13cc1b2 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -37,11 +37,6 @@
 #define MAX_HYBRID_HIER_P_LAYERS 6
 
 #define L_MODE V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY
-#define CODING V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY
-#define BITSTREAM_RESTRICT_ENABLED \
-	V4L2_MPEG_VIDC_VIDEO_H264_VUI_BITSTREAM_RESTRICT_ENABLED
-#define BITSTREAM_RESTRICT_DISABLED \
-	V4L2_MPEG_VIDC_VIDEO_H264_VUI_BITSTREAM_RESTRICT_DISABLED
 #define MIN_TIME_RESOLUTION 1
 #define MAX_TIME_RESOLUTION 0xFFFFFF
 #define DEFAULT_TIME_RESOLUTION 0x7530
@@ -91,30 +86,8 @@
 	NULL
 };
 
-static const char *const h263_level[] = {
-	"1.0",
-	"2.0",
-	"3.0",
-	"4.0",
-	"4.5",
-	"5.0",
-	"6.0",
-	"7.0",
-};
-
-static const char *const h263_profile[] = {
-	"Baseline",
-	"H320 Coding",
-	"Backward Compatible",
-	"ISWV2",
-	"ISWV3",
-	"High Compression",
-	"Internet",
-	"Interlace",
-	"High Latency",
-};
-
 static const char *const hevc_tier_level[] = {
+	"Level unknown"
 	"Main Tier Level 1",
 	"Main Tier Level 2",
 	"Main Tier Level 2.1",
@@ -454,8 +427,8 @@
 		.name = "H264 Level",
 		.type = V4L2_CTRL_TYPE_MENU,
 		.minimum = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
-		.maximum = V4L2_MPEG_VIDEO_H264_LEVEL_5_2,
-		.default_value = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+		.maximum = V4L2_MPEG_VIDEO_H264_LEVEL_UNKNOWN,
+		.default_value = V4L2_MPEG_VIDEO_H264_LEVEL_UNKNOWN,
 		.menu_skip_mask = 0,
 	},
 	{
@@ -491,9 +464,9 @@
 		.name = "HEVC Tier and Level",
 		.type = V4L2_CTRL_TYPE_MENU,
 		.minimum = V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_1,
-		.maximum = V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_5_2,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_UNKNOWN,
 		.default_value =
-			V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_1,
+			V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_UNKNOWN,
 		.menu_skip_mask = ~(
 		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_1) |
 		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_2) |
@@ -513,7 +486,8 @@
 		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_4) |
 		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_4_1) |
 		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_5) |
-		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_5_1)
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_5_1) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_UNKNOWN)
 		),
 		.qmenu = hevc_tier_level,
 	},
@@ -795,72 +769,6 @@
 		.qmenu = NULL,
 	},
 	{
-		.id = V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_X_RANGE,
-		.name = "I-Frame X coordinate search range",
-		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = 4,
-		.maximum = 128,
-		.default_value = 4,
-		.step = 1,
-		.menu_skip_mask = 0,
-		.qmenu = NULL,
-	},
-	{
-		.id = V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_Y_RANGE,
-		.name = "I-Frame Y coordinate search range",
-		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = 4,
-		.maximum = 128,
-		.default_value = 4,
-		.step = 1,
-		.menu_skip_mask = 0,
-		.qmenu = NULL,
-	},
-	{
-		.id = V4L2_CID_MPEG_VIDC_VIDEO_PFRAME_X_RANGE,
-		.name = "P-Frame X coordinate search range",
-		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = 4,
-		.maximum = 128,
-		.default_value = 4,
-		.step = 1,
-		.menu_skip_mask = 0,
-		.qmenu = NULL,
-	},
-	{
-		.id = V4L2_CID_MPEG_VIDC_VIDEO_PFRAME_Y_RANGE,
-		.name = "P-Frame Y coordinate search range",
-		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = 4,
-		.maximum = 128,
-		.default_value = 4,
-		.step = 1,
-		.menu_skip_mask = 0,
-		.qmenu = NULL,
-	},
-	{
-		.id = V4L2_CID_MPEG_VIDC_VIDEO_BFRAME_X_RANGE,
-		.name = "B-Frame X coordinate search range",
-		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = 4,
-		.maximum = 128,
-		.default_value = 4,
-		.step = 1,
-		.menu_skip_mask = 0,
-		.qmenu = NULL,
-	},
-	{
-		.id = V4L2_CID_MPEG_VIDC_VIDEO_BFRAME_Y_RANGE,
-		.name = "B-Frame Y coordinate search range",
-		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = 4,
-		.maximum = 128,
-		.default_value = 4,
-		.step = 1,
-		.menu_skip_mask = 0,
-		.qmenu = NULL,
-	},
-	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_HIER_B_NUM_LAYERS,
 		.name = "Set Hier B num layers",
 		.type = V4L2_CTRL_TYPE_INTEGER,
@@ -1279,7 +1187,7 @@
 	struct hal_ltr_use use_ltr;
 	struct hal_ltr_mark mark_ltr;
 	struct hal_hybrid_hierp hyb_hierp;
-	u32 hier_p_layers = 0, hier_b_layers = 0;
+	u32 hier_p_layers = 0;
 	int max_hierp_layers;
 	int baselayerid = 0;
 	struct hal_video_signal_info signal_info = {0};
@@ -1447,6 +1355,7 @@
 		bitrate.bit_rate = ctrl->val;
 		bitrate.layer_id = 0;
 		pdata = &bitrate;
+		inst->bitrate = ctrl->val;
 		break;
 	}
 	case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
@@ -1495,28 +1404,6 @@
 			temp_ctrl->val);
 		pdata = &h264_entropy_control;
 		break;
-	case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
-		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL);
-
-		property_id = HAL_PARAM_PROFILE_LEVEL_CURRENT;
-		profile_level.profile = msm_comm_v4l2_to_hal(ctrl->id,
-						ctrl->val);
-		profile_level.level = msm_comm_v4l2_to_hal(
-				V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
-				temp_ctrl->val);
-		pdata = &profile_level;
-		break;
-	case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
-		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE);
-
-		property_id = HAL_PARAM_PROFILE_LEVEL_CURRENT;
-		profile_level.level = msm_comm_v4l2_to_hal(ctrl->id,
-							ctrl->val);
-		profile_level.profile = msm_comm_v4l2_to_hal(
-				V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
-				temp_ctrl->val);
-		pdata = &profile_level;
-		break;
 	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
 		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_LEVEL);
 
@@ -1645,38 +1532,31 @@
 		pdata = &enable;
 		break;
 	}
-	case V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE: {
-		struct v4l2_ctrl *air_mbs, *air_ref = NULL, *cir_mbs = NULL;
-		bool is_cont_intra_supported = false;
+	case V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE:
+	{
+		struct v4l2_ctrl *ir_mbs;
 
-		air_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_IR_MBS);
-
-		is_cont_intra_supported =
-		(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264) ||
-		(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC);
+		ir_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_IR_MBS);
 
 		property_id = HAL_PARAM_VENC_INTRA_REFRESH;
 
-		intra_refresh.mode = ctrl->val;
-		intra_refresh.air_mbs = air_mbs->val;
-		intra_refresh.air_ref = air_ref->val;
-		intra_refresh.cir_mbs = cir_mbs->val;
+		intra_refresh.mode   = ctrl->val;
+		intra_refresh.ir_mbs = ir_mbs->val;
 
 		pdata = &intra_refresh;
 		break;
 	}
-	case V4L2_CID_MPEG_VIDC_VIDEO_IR_MBS: {
-		struct v4l2_ctrl *ir_mode, *air_ref = NULL, *cir_mbs = NULL;
+	case V4L2_CID_MPEG_VIDC_VIDEO_IR_MBS:
+	{
+		struct v4l2_ctrl *ir_mode;
 
 		ir_mode = TRY_GET_CTRL(
 				V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE);
 
 		property_id = HAL_PARAM_VENC_INTRA_REFRESH;
 
-		intra_refresh.air_mbs = ctrl->val;
-		intra_refresh.mode = ir_mode->val;
-		intra_refresh.air_ref = air_ref->val;
-		intra_refresh.cir_mbs = cir_mbs->val;
+		intra_refresh.mode   = ir_mode->val;
+		intra_refresh.ir_mbs = ctrl->val;
 
 		pdata = &intra_refresh;
 		break;
@@ -1742,6 +1622,8 @@
 			enable.enable = 0;
 			break;
 		case V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME:
+			enable.enable = 1;
+			break;
 		default:
 			rc = -ENOTSUPP;
 			break;
@@ -1862,16 +1744,6 @@
 		enable.enable = ctrl->val;
 		pdata = &enable;
 		break;
-	case V4L2_CID_MPEG_VIDC_VIDEO_HIER_B_NUM_LAYERS:
-		if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC) {
-			dprintk(VIDC_ERR, "Hier B supported for HEVC only\n");
-			rc = -ENOTSUPP;
-			break;
-		}
-		property_id = HAL_PARAM_VENC_HIER_B_MAX_ENH_LAYERS;
-		hier_b_layers = ctrl->val;
-		pdata = &hier_b_layers;
-		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_HYBRID_HIERP_MODE:
 		property_id = HAL_PARAM_VENC_HIER_P_HYBRID_MODE;
 		hyb_hierp.layers = ctrl->val;
@@ -2108,7 +1980,6 @@
 	struct v4l2_ext_control *control;
 	struct hfi_device *hdev;
 	struct hal_ltr_mode ltr_mode;
-	struct hal_vc1e_perf_cfg_type search_range = { {0} };
 	u32 property_id = 0, layer_id = MSM_VIDC_ALL_LAYER_ID;
 	void *pdata = NULL;
 	struct msm_vidc_capability *cap = NULL;
@@ -2163,36 +2034,6 @@
 			property_id = HAL_PARAM_VENC_LTRMODE;
 			pdata = &ltr_mode;
 			break;
-		case V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_X_RANGE:
-			search_range.i_frame.x_subsampled = control[i].value;
-			property_id = HAL_PARAM_VENC_SEARCH_RANGE;
-			pdata = &search_range;
-			break;
-		case V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_Y_RANGE:
-			search_range.i_frame.y_subsampled = control[i].value;
-			property_id = HAL_PARAM_VENC_SEARCH_RANGE;
-			pdata = &search_range;
-			break;
-		case V4L2_CID_MPEG_VIDC_VIDEO_PFRAME_X_RANGE:
-			search_range.p_frame.x_subsampled = control[i].value;
-			property_id = HAL_PARAM_VENC_SEARCH_RANGE;
-			pdata = &search_range;
-			break;
-		case V4L2_CID_MPEG_VIDC_VIDEO_PFRAME_Y_RANGE:
-			search_range.p_frame.y_subsampled = control[i].value;
-			property_id = HAL_PARAM_VENC_SEARCH_RANGE;
-			pdata = &search_range;
-			break;
-		case V4L2_CID_MPEG_VIDC_VIDEO_BFRAME_X_RANGE:
-			search_range.b_frame.x_subsampled = control[i].value;
-			property_id = HAL_PARAM_VENC_SEARCH_RANGE;
-			pdata = &search_range;
-			break;
-		case V4L2_CID_MPEG_VIDC_VIDEO_BFRAME_Y_RANGE:
-			search_range.b_frame.y_subsampled = control[i].value;
-			property_id = HAL_PARAM_VENC_SEARCH_RANGE;
-			pdata = &search_range;
-			break;
 		case V4L2_CID_MPEG_VIDC_VENC_PARAM_SAR_WIDTH:
 			sar.aspect_width = control[i].value;
 			property_id = HAL_PROPERTY_PARAM_VENC_ASPECT_RATIO;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 9427444..114a702 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -497,11 +497,12 @@
 		return -EINVAL;
 }
 
-static inline bool is_dynamic_output_buffer_mode(struct v4l2_buffer *b,
+static inline bool is_dynamic_buffer_mode(struct v4l2_buffer *b,
 				struct msm_vidc_inst *inst)
 {
-	return b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
-		inst->buffer_mode_set[CAPTURE_PORT] == HAL_BUFFER_MODE_DYNAMIC;
+	enum vidc_ports port = b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
+		OUTPUT_PORT : CAPTURE_PORT;
+	return inst->buffer_mode_set[port] == HAL_BUFFER_MODE_DYNAMIC;
 }
 
 
@@ -558,7 +559,7 @@
 		}
 		mutex_lock(&inst->registeredbufs.lock);
 		temp = get_registered_buf(inst, b, i, &plane);
-		if (temp && !is_dynamic_output_buffer_mode(b, inst)) {
+		if (temp && !is_dynamic_buffer_mode(b, inst)) {
 			dprintk(VIDC_DBG,
 				"This memory region has already been prepared\n");
 			rc = 0;
@@ -566,7 +567,7 @@
 			goto exit;
 		}
 
-		if (temp && is_dynamic_output_buffer_mode(b, inst) && !i) {
+		if (temp && is_dynamic_buffer_mode(b, inst) && !i) {
 			/*
 			 * Buffer is already present in registered list
 			 * increment ref_count, populate new values of v4l2
@@ -599,7 +600,7 @@
 		if (rc == 1) {
 			rc = 0;
 			goto exit;
-		} else if (rc == 2) {
+		} else if (rc >= 2) {
 			rc = -EEXIST;
 			goto exit;
 		}
@@ -629,7 +630,7 @@
 		}
 
 		/* We maintain one ref count for all planes*/
-		if (!i && is_dynamic_output_buffer_mode(b, inst)) {
+		if (!i && is_dynamic_buffer_mode(b, inst)) {
 			rc = buf_ref_get(inst, binfo);
 			if (rc < 0)
 				goto exit;
@@ -812,11 +813,13 @@
 		inst->bufq[port].num_planes == b->length;
 }
 
-int msm_vidc_release_buffers(void *instance, int buffer_type)
+int msm_vidc_release_buffer(void *instance, int buffer_type,
+		unsigned int buffer_index)
 {
 	struct msm_vidc_inst *inst = instance;
 	struct buffer_info *bi, *dummy;
 	int i, rc = 0;
+	int found_buf = 0;
 
 	if (!inst)
 		return -EINVAL;
@@ -834,7 +837,8 @@
 
 	mutex_lock(&inst->registeredbufs.lock);
 	list_for_each_entry_safe(bi, dummy, &inst->registeredbufs.list, list) {
-		if (bi->type == buffer_type) {
+		if (bi->type == buffer_type && bi->v4l2_index == buffer_index) {
+			found_buf = 1;
 			list_del(&bi->list);
 			for (i = 0; i < bi->num_planes; i++) {
 				if (bi->handle[i] && bi->mapped[i]) {
@@ -845,15 +849,38 @@
 						bi->buff_off[i], bi->mapped[i]);
 					msm_comm_smem_free(inst,
 							bi->handle[i]);
+					found_buf = 2;
 				}
 			}
 			kfree(bi);
+			break;
 		}
 	}
 	mutex_unlock(&inst->registeredbufs.lock);
+
+	switch (found_buf) {
+	case 0:
+		dprintk(VIDC_WARN,
+			"%s: No buffer(type: %d) found for index %d\n",
+			__func__, buffer_type, buffer_index);
+		break;
+	case 1:
+		dprintk(VIDC_WARN,
+			"%s: Buffer(type: %d) found for index %d.",
+			__func__, buffer_type, buffer_index);
+		dprintk(VIDC_WARN, "zero planes mapped.\n");
+		break;
+	case 2:
+		dprintk(VIDC_DBG,
+			"%s: Released buffer(type: %d) for index %d\n",
+			__func__, buffer_type, buffer_index);
+		break;
+	default:
+		break;
+	}
 	return rc;
 }
-EXPORT_SYMBOL(msm_vidc_release_buffers);
+EXPORT_SYMBOL(msm_vidc_release_buffer);
 
 int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b)
 {
@@ -874,7 +901,7 @@
 	rc = map_and_register_buf(inst, b);
 	if (rc == -EEXIST) {
 		if (atomic_read(&inst->in_flush) &&
-			is_dynamic_output_buffer_mode(b, inst)) {
+			is_dynamic_buffer_mode(b, inst)) {
 			dprintk(VIDC_ERR,
 				"Flush in progress, do not hold any buffers in driver\n");
 			msm_comm_flush_dynamic_buffers(inst);
@@ -998,7 +1025,7 @@
 		return rc;
 
 
-	if (is_dynamic_output_buffer_mode(b, inst)) {
+	if (is_dynamic_buffer_mode(b, inst)) {
 		buffer_info->dequeued = true;
 
 		dprintk(VIDC_DBG, "[DEQUEUED]: fd[0] = %d\n",
@@ -1102,7 +1129,6 @@
 	.put_userptr = vidc_put_userptr,
 };
 
-
 static void msm_vidc_cleanup_buffer(struct vb2_buffer *vb)
 {
 	int rc = 0;
@@ -1136,8 +1162,7 @@
 		return;
 	}
 
-	rc = msm_vidc_release_buffers(inst,
-		vb->type);
+	rc = msm_vidc_release_buffer(inst, vb->type, vb->index);
 	if (rc)
 		dprintk(VIDC_ERR, "%s : Failed to release buffers : %d\n",
 			__func__, rc);
@@ -1389,7 +1414,7 @@
 			"Failed to move inst: %pK to start done state\n", inst);
 		goto fail_start;
 	}
-	msm_dcvs_init_load(inst);
+	msm_dcvs_init(inst);
 	if (msm_comm_get_stream_output_mode(inst) ==
 			HAL_VIDEO_DECODER_SECONDARY) {
 		rc = msm_comm_queue_output_buffers(inst);
@@ -1795,14 +1820,12 @@
 
 	switch (ctrl->id) {
 
-	case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
 	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
 	case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL:
 	case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE:
 		ctrl->val = inst->profile;
 	break;
 
-	case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
 	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
 	case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL:
 		ctrl->val = inst->level;
@@ -1910,6 +1933,7 @@
 
 	INIT_MSM_VIDC_LIST(&inst->pendingq);
 	INIT_MSM_VIDC_LIST(&inst->scratchbufs);
+	INIT_MSM_VIDC_LIST(&inst->freqs);
 	INIT_MSM_VIDC_LIST(&inst->persistbufs);
 	INIT_MSM_VIDC_LIST(&inst->pending_getpropq);
 	INIT_MSM_VIDC_LIST(&inst->outputbufs);
@@ -1920,8 +1944,9 @@
 	inst->session_type = session_type;
 	inst->state = MSM_VIDC_CORE_UNINIT_DONE;
 	inst->core = core;
+	inst->freq = 0;
 	inst->bit_depth = MSM_VIDC_BIT_DEPTH_8;
-	inst->instant_bitrate = 0;
+	inst->bitrate = 0;
 	inst->pic_struct = MSM_VIDC_PIC_STRUCT_PROGRESSIVE;
 	inst->colour_space = MSM_VIDC_BT601_6_525;
 	inst->profile = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
@@ -1950,7 +1975,6 @@
 	if (rc)
 		goto fail_bufq_capture;
 
-	msm_dcvs_init(inst);
 	rc = vb2_bufq_init(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
 			session_type);
 	if (rc) {
@@ -2036,6 +2060,8 @@
 		}
 		mutex_unlock(&inst->pendingq.lock);
 
+		msm_comm_free_freq_table(inst);
+
 		if (msm_comm_release_scratch_buffers(inst, false)) {
 			dprintk(VIDC_ERR,
 				"Failed to release scratch buffers\n");
@@ -2126,10 +2152,17 @@
 	if (!inst || !inst->core)
 		return -EINVAL;
 
+	/*
+	 * Make sure that HW stop working on these buffers that
+	 * we are going to free.
+	 */
+	if (inst->state != MSM_VIDC_CORE_INVALID &&
+		inst->core->state != VIDC_CORE_INVALID)
+		rc = msm_comm_try_state(inst,
+				MSM_VIDC_RELEASE_RESOURCES_DONE);
 
 	mutex_lock(&inst->registeredbufs.lock);
 	list_for_each_entry_safe(bi, dummy, &inst->registeredbufs.list, list) {
-		if (bi->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 			int i = 0;
 
 			list_del(&bi->list);
@@ -2142,7 +2175,6 @@
 
 			kfree(bi);
 		}
-	}
 	mutex_unlock(&inst->registeredbufs.lock);
 
 	cleanup_instance(inst);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index d891644..70427d3 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -16,32 +16,250 @@
 #include "msm_vidc_debug.h"
 #include "msm_vidc_clocks.h"
 
-#define IS_VALID_DCVS_SESSION(__cur_mbpf, __min_mbpf) \
-		((__cur_mbpf) >= (__min_mbpf))
-
-static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst);
-static int msm_dcvs_enc_scale_clocks(struct msm_vidc_inst *inst);
-static int msm_dcvs_dec_scale_clocks(struct msm_vidc_inst *inst, bool fbd);
-
-int msm_dcvs_try_enable(struct msm_vidc_inst *inst)
+int msm_comm_vote_bus(struct msm_vidc_core *core)
 {
-	if (!inst) {
-		dprintk(VIDC_ERR, "%s: Invalid args: %p\n", __func__, inst);
+	int rc = 0, vote_data_count = 0, i = 0;
+	struct hfi_device *hdev;
+	struct msm_vidc_inst *inst = NULL;
+	struct vidc_bus_vote_data *vote_data = NULL;
+
+	if (!core) {
+		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, core);
 		return -EINVAL;
 	}
-	inst->dcvs_mode = msm_dcvs_check_supported(inst);
-	return 0;
+
+	hdev = core->device;
+	if (!hdev) {
+		dprintk(VIDC_ERR, "%s Invalid device handle: %pK\n",
+				__func__, hdev);
+		return -EINVAL;
+	}
+
+	mutex_lock(&core->lock);
+	list_for_each_entry(inst, &core->instances, list)
+		++vote_data_count;
+
+	vote_data = kcalloc(vote_data_count, sizeof(*vote_data),
+			GFP_TEMPORARY);
+	if (!vote_data) {
+		dprintk(VIDC_ERR, "%s: failed to allocate memory\n", __func__);
+		rc = -ENOMEM;
+		goto fail_alloc;
+	}
+
+	list_for_each_entry(inst, &core->instances, list) {
+		int codec = 0, yuv = 0;
+
+		codec = inst->session_type == MSM_VIDC_DECODER ?
+			inst->fmts[OUTPUT_PORT].fourcc :
+			inst->fmts[CAPTURE_PORT].fourcc;
+
+		yuv = inst->session_type == MSM_VIDC_DECODER ?
+			inst->fmts[CAPTURE_PORT].fourcc :
+			inst->fmts[OUTPUT_PORT].fourcc;
+
+		vote_data[i].domain = get_hal_domain(inst->session_type);
+		vote_data[i].codec = get_hal_codec(codec);
+		vote_data[i].width =  max(inst->prop.width[CAPTURE_PORT],
+				inst->prop.width[OUTPUT_PORT]);
+		vote_data[i].height = max(inst->prop.height[CAPTURE_PORT],
+				inst->prop.height[OUTPUT_PORT]);
+
+		if (inst->operating_rate)
+			vote_data[i].fps = (inst->operating_rate >> 16) ?
+				inst->operating_rate >> 16 : 1;
+		else
+			vote_data[i].fps = inst->prop.fps;
+
+		/*
+		 * TODO: support for OBP-DBP split mode hasn't been yet
+		 * implemented, once it is, this part of code needs to be
+		 * revisited since passing in accurate information to the bus
+		 * governor will drastically reduce bandwidth
+		 */
+		//vote_data[i].color_formats[0] = get_hal_uncompressed(yuv);
+		vote_data[i].num_formats = 1;
+		i++;
+	}
+	mutex_unlock(&core->lock);
+
+	rc = call_hfi_op(hdev, vote_bus, hdev->hfi_device_data, vote_data,
+			vote_data_count);
+	if (rc)
+		dprintk(VIDC_ERR, "Failed to scale bus: %d\n", rc);
+
+	kfree(vote_data);
+	return rc;
+
+fail_alloc:
+	mutex_unlock(&core->lock);
+	return rc;
 }
 
+static inline int get_pending_bufs_fw(struct msm_vidc_inst *inst)
+{
+	int fw_out_qsize = 0, buffers_in_driver = 0;
+
+	/*
+	 * DCVS always operates on Uncompressed buffers.
+	 * For Decoders, FTB and Encoders, ETB.
+	 */
+
+	if (inst->state >= MSM_VIDC_OPEN_DONE &&
+			inst->state < MSM_VIDC_STOP_DONE) {
+		if (inst->session_type == MSM_VIDC_DECODER)
+			fw_out_qsize = inst->count.ftb - inst->count.fbd;
+		else
+			fw_out_qsize = inst->count.etb - inst->count.ebd;
+
+		buffers_in_driver = inst->buffers_held_in_driver;
+	}
+
+	return fw_out_qsize + buffers_in_driver;
+}
+
+static int msm_dcvs_scale_clocks(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	int fw_pending_bufs = 0;
+	int total_output_buf = 0;
+	int buffers_outside_fw = 0;
+	struct msm_vidc_core *core;
+	struct hal_buffer_requirements *output_buf_req;
+	struct dcvs_stats *dcvs;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
+		return -EINVAL;
+	}
+	if (!inst->dcvs_mode) {
+		dprintk(VIDC_DBG, "DCVS is not enabled\n");
+		return 0;
+	}
+
+	dcvs = &inst->dcvs;
+
+	core = inst->core;
+	mutex_lock(&inst->lock);
+	fw_pending_bufs = get_pending_bufs_fw(inst);
+
+	output_buf_req = get_buff_req_buffer(inst,
+			dcvs->buffer_type);
+	mutex_unlock(&inst->lock);
+	if (!output_buf_req) {
+		dprintk(VIDC_ERR,
+				"%s: No buffer requirement for buffer type %x\n",
+				__func__, dcvs->buffer_type);
+		return -EINVAL;
+	}
+
+	/* Total number of output buffers */
+	total_output_buf = output_buf_req->buffer_count_actual;
+
+	/* Buffers outside FW are with display */
+	buffers_outside_fw = total_output_buf - fw_pending_bufs;
+	dprintk(VIDC_DBG,
+		"Counts : total_output_buf = %d fw_pending_bufs = %d buffers_outside_fw = %d\n",
+		total_output_buf, fw_pending_bufs, buffers_outside_fw);
+
+	if (buffers_outside_fw >=  dcvs->min_threshold &&
+			dcvs->load > dcvs->load_low) {
+		dcvs->load = dcvs->load_low;
+	} else if (buffers_outside_fw < dcvs->min_threshold &&
+			dcvs->load == dcvs->load_low) {
+		dcvs->load = dcvs->load_high;
+	}
+	return rc;
+}
+
+static void msm_vidc_update_freq_entry(struct msm_vidc_inst *inst,
+	unsigned long freq, ion_phys_addr_t device_addr)
+{
+	struct vidc_freq_data *temp, *next;
+	bool found = false;
+
+	mutex_lock(&inst->freqs.lock);
+	list_for_each_entry_safe(temp, next, &inst->freqs.list, list) {
+		if (temp->device_addr == device_addr) {
+			temp->freq = freq;
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		temp = kzalloc(sizeof(*temp), GFP_KERNEL);
+		temp->freq = freq;
+		temp->device_addr = device_addr;
+		list_add_tail(&temp->list, &inst->freqs.list);
+	}
+	mutex_unlock(&inst->freqs.lock);
+}
+
+// TODO this needs to be removed later and use queued_list
+
+void msm_vidc_clear_freq_entry(struct msm_vidc_inst *inst,
+	ion_phys_addr_t device_addr)
+{
+	struct vidc_freq_data *temp, *next;
+
+	mutex_lock(&inst->freqs.lock);
+	list_for_each_entry_safe(temp, next, &inst->freqs.list, list) {
+		if (temp->device_addr == device_addr)
+			temp->freq = 0;
+	}
+	mutex_unlock(&inst->freqs.lock);
+
+	inst->dcvs.buffer_counter++;
+}
+
+
+static unsigned long msm_vidc_adjust_freq(struct msm_vidc_inst *inst)
+{
+	struct vidc_freq_data *temp;
+	unsigned long freq = 0;
+
+	mutex_lock(&inst->freqs.lock);
+	list_for_each_entry(temp, &inst->freqs.list, list) {
+		freq = max(freq, temp->freq);
+	}
+	mutex_unlock(&inst->freqs.lock);
+
+	/* If current requirement is within DCVS limits, try DCVS. */
+
+	if (freq < inst->dcvs.load_high) {
+		dprintk(VIDC_DBG, "Calling DCVS now\n");
+		// TODO calling DCVS here may reduce the residency. Re-visit.
+		msm_dcvs_scale_clocks(inst);
+		freq = inst->dcvs.load;
+	}
+
+	return freq;
+}
+
+void msm_comm_free_freq_table(struct msm_vidc_inst *inst)
+{
+	struct vidc_freq_data *temp, *next;
+
+	mutex_lock(&inst->freqs.lock);
+	list_for_each_entry_safe(temp, next, &inst->freqs.list, list) {
+		list_del(&temp->list);
+		kfree(temp);
+	}
+	INIT_LIST_HEAD(&inst->freqs.list);
+	mutex_unlock(&inst->freqs.lock);
+}
+
+
 static inline int msm_dcvs_get_mbs_per_frame(struct msm_vidc_inst *inst)
 {
 	int height, width;
 
 	if (!inst->in_reconfig) {
 		height = max(inst->prop.height[CAPTURE_PORT],
-				inst->prop.height[OUTPUT_PORT]);
+			inst->prop.height[OUTPUT_PORT]);
 		width = max(inst->prop.width[CAPTURE_PORT],
-				inst->prop.width[OUTPUT_PORT]);
+			inst->prop.width[OUTPUT_PORT]);
 	} else {
 		height = inst->reconfig_height;
 		width = inst->reconfig_width;
@@ -50,31 +268,174 @@
 	return NUM_MBS_PER_FRAME(height, width);
 }
 
-static inline int msm_dcvs_count_active_instances(struct msm_vidc_core *core,
-	enum session_type session_type)
+static unsigned long msm_vidc_calc_freq(struct msm_vidc_inst *inst,
+	u32 filled_len)
 {
-	int active_instances = 0;
-	struct msm_vidc_inst *temp = NULL;
+	unsigned long freq = 0;
+	unsigned long vpp_cycles = 0, vsp_cycles = 0;
+	u32 vpp_cycles_per_mb;
+	u32 mbs_per_frame;
 
-	if (!core) {
-		dprintk(VIDC_ERR, "%s: Invalid args: %pK\n", __func__, core);
+	mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
+
+	/*
+	 * Calculate vpp, vsp cycles separately for encoder and decoder.
+	 * Even though, most part is common now, in future it may change
+	 * between them.
+	 */
+
+	if (inst->session_type == MSM_VIDC_ENCODER) {
+		vpp_cycles_per_mb = inst->flags & VIDC_LOW_POWER ?
+			inst->entry->low_power_cycles :
+			inst->entry->vpp_cycles;
+
+		vsp_cycles = mbs_per_frame * inst->entry->vsp_cycles;
+
+		/* 10 / 7 is overhead factor */
+		vsp_cycles += (inst->bitrate * 10) / 7;
+	} else if (inst->session_type == MSM_VIDC_DECODER) {
+		vpp_cycles = mbs_per_frame * inst->entry->vpp_cycles;
+
+		vsp_cycles = mbs_per_frame * inst->entry->vsp_cycles;
+		/* 10 / 7 is overhead factor */
+		vsp_cycles += (inst->prop.fps * filled_len * 8 * 10) / 7;
+
+	} else {
+		// TODO return Min or Max ?
+		dprintk(VIDC_ERR, "Unknown session type = %s\n", __func__);
+		return freq;
+	}
+
+	freq = max(vpp_cycles, vsp_cycles);
+
+	return freq;
+}
+
+static int msm_vidc_set_clocks(struct msm_vidc_core *core)
+{
+	struct hfi_device *hdev;
+	unsigned long freq = 0, rate = 0;
+	struct msm_vidc_inst *temp = NULL;
+	int rc = 0, i = 0;
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
+
+	hdev = core->device;
+	allowed_clks_tbl = core->resources.allowed_clks_tbl;
+	if (!hdev || !allowed_clks_tbl) {
+		dprintk(VIDC_ERR,
+			"%s Invalid parameters\n", __func__);
 		return -EINVAL;
 	}
 
-	/* DCVS condition is as following
-	 * Decoder DCVS : Only for ONE decoder session.
-	 * Encoder DCVS : Only for ONE encoder session + ONE decoder session
-	 */
 	mutex_lock(&core->lock);
 	list_for_each_entry(temp, &core->instances, list) {
-		if (temp->state >= MSM_VIDC_OPEN_DONE &&
-			temp->state < MSM_VIDC_STOP_DONE &&
-			(temp->session_type == session_type ||
-			 temp->session_type == MSM_VIDC_ENCODER))
-			active_instances++;
+		freq += temp->freq;
+	}
+	for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) {
+		rate = allowed_clks_tbl[i].clock_rate;
+		if (rate >= freq)
+			break;
 	}
 	mutex_unlock(&core->lock);
-	return active_instances;
+
+	core->freq = rate;
+	dprintk(VIDC_PROF, "Voting for freq = %lu", freq);
+	rc = call_hfi_op(hdev, scale_clocks,
+			hdev->hfi_device_data, rate);
+
+	return rc;
+}
+
+static unsigned long msm_vidc_max_freq(struct msm_vidc_inst *inst)
+{
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
+	unsigned long freq = 0;
+
+	allowed_clks_tbl = inst->core->resources.allowed_clks_tbl;
+	freq = allowed_clks_tbl[0].clock_rate;
+	dprintk(VIDC_PROF, "Max rate = %lu", freq);
+
+	return freq;
+}
+
+int msm_comm_scale_clocks(struct msm_vidc_inst *inst)
+{
+	struct vb2_buf_entry *temp, *next;
+	unsigned long freq = 0;
+	u32 filled_len = 0;
+	ion_phys_addr_t device_addr = 0;
+
+	if (inst->dcvs.buffer_counter < DCVS_FTB_WINDOW) {
+		freq = msm_vidc_max_freq(inst);
+		goto decision_done;
+	}
+
+	mutex_lock(&inst->pendingq.lock);
+	list_for_each_entry_safe(temp, next, &inst->pendingq.list, list) {
+		if (temp->vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+			filled_len = max(filled_len,
+				temp->vb->planes[0].bytesused);
+			device_addr = temp->vb->planes[0].m.userptr;
+		}
+	}
+	mutex_unlock(&inst->pendingq.lock);
+
+	if (!filled_len || !device_addr) {
+		freq = inst->freq;
+		goto decision_done;
+	}
+
+	freq = msm_vidc_calc_freq(inst, filled_len);
+
+	msm_vidc_update_freq_entry(inst, freq, device_addr);
+
+	freq = msm_vidc_adjust_freq(inst);
+
+decision_done:
+	inst->freq = freq;
+	msm_vidc_set_clocks(inst->core);
+	return 0;
+}
+
+int msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst)
+{
+	struct msm_vidc_core *core;
+	struct hfi_device *hdev;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
+		return -EINVAL;
+	}
+	core = inst->core;
+	hdev = core->device;
+
+	if (msm_comm_scale_clocks(inst)) {
+		dprintk(VIDC_WARN,
+			"Failed to scale clocks. Performance might be impacted\n");
+	}
+	if (msm_comm_vote_bus(core)) {
+		dprintk(VIDC_WARN,
+			"Failed to scale DDR bus. Performance might be impacted\n");
+	}
+	return 0;
+}
+
+int msm_dcvs_try_enable(struct msm_vidc_inst *inst)
+{
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s: Invalid args: %p\n", __func__, inst);
+		return -EINVAL;
+	}
+	if (inst->flags & VIDC_THUMBNAIL) {
+		dprintk(VIDC_PROF, "Thumbnail sessions don't need DCVS : %pK\n",
+			inst);
+		return false;
+	}
+	inst->dcvs_mode = true;
+
+	// TODO : Update with proper number based on on-target tuning.
+	inst->dcvs.extra_buffer_count = DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
+	return true;
 }
 
 static bool msm_dcvs_check_codec_supported(int fourcc,
@@ -104,90 +465,41 @@
 	return codec_type && session_type;
 }
 
-static void msm_dcvs_update_dcvs_params(int idx, struct msm_vidc_inst *inst)
+int msm_comm_init_clocks_and_bus_data(struct msm_vidc_inst *inst)
 {
-	struct dcvs_stats *dcvs = NULL;
-	struct msm_vidc_platform_resources *res = NULL;
-	struct dcvs_table *table = NULL;
+	int rc = 0, j = 0;
+	struct clock_freq_table *clk_freq_tbl = NULL;
+	struct clock_profile_entry *entry = NULL;
+	int fourcc;
 
-	if (!inst || !inst->core) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, inst);
-		return;
-	}
+	clk_freq_tbl = &inst->core->resources.clock_freq_tbl;
+	fourcc = inst->session_type == MSM_VIDC_DECODER ?
+		inst->fmts[OUTPUT_PORT].fourcc :
+		inst->fmts[CAPTURE_PORT].fourcc;
 
-	dcvs = &inst->dcvs;
-	res = &inst->core->resources;
-	table = res->dcvs_tbl;
+	for (j = 0; j < clk_freq_tbl->count; j++) {
+		bool matched = false;
 
-	dcvs->load_low = table[idx].load_low;
-	dcvs->load_high = table[idx].load_high;
-	dcvs->supported_codecs = table[idx].supported_codecs;
-}
+		entry = &clk_freq_tbl->clk_prof_entries[j];
 
-static void msm_dcvs_enc_check_and_scale_clocks(struct msm_vidc_inst *inst)
-{
-	int rc = 0;
+		matched = msm_dcvs_check_codec_supported(
+				fourcc,
+				entry->codec_mask,
+				inst->session_type);
 
-	if (inst->session_type == MSM_VIDC_ENCODER &&
-		msm_vidc_enc_dcvs_mode) {
-		rc = msm_dcvs_enc_scale_clocks(inst);
-		if (rc) {
-			dprintk(VIDC_DBG,
-				"ENC_DCVS: error while scaling clocks\n");
+		if (matched) {
+			inst->entry = entry;
+			break;
 		}
 	}
-}
 
-static void msm_dcvs_dec_check_and_scale_clocks(struct msm_vidc_inst *inst)
-{
-	int rc = 0;
-
-	if (inst->session_type == MSM_VIDC_DECODER &&
-		msm_vidc_dec_dcvs_mode) {
-		msm_dcvs_monitor_buffer(inst);
-		rc = msm_dcvs_dec_scale_clocks(inst, false);
-		if (rc) {
-			dprintk(VIDC_ERR,
-					"%s: Failed to scale clocks in DCVS: %d\n",
-					__func__, rc);
-		}
-	}
-}
-
-void msm_dcvs_check_and_scale_clocks(struct msm_vidc_inst *inst, bool is_etb)
-{
-	if (!inst) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, inst);
-		return;
-	}
-	msm_dcvs_try_enable(inst);
-	if (!inst->dcvs_mode) {
-		dprintk(VIDC_DBG, "DCVS is not enabled\n");
-		return;
+	if (j == clk_freq_tbl->count) {
+		dprintk(VIDC_ERR,
+			"Failed : No matching clock entry found\n");
+		rc = -EINVAL;
 	}
 
-	if (is_etb)
-		msm_dcvs_enc_check_and_scale_clocks(inst);
-	else
-		msm_dcvs_dec_check_and_scale_clocks(inst);
-}
-
-static inline int get_pending_bufs_fw(struct msm_vidc_inst *inst)
-{
-	int fw_out_qsize = 0, buffers_in_driver = 0;
-
-	if (!inst) {
-		dprintk(VIDC_ERR, "%s Invalid args\n", __func__);
-		return -EINVAL;
-	}
-
-	if (inst->state >= MSM_VIDC_OPEN_DONE &&
-		inst->state < MSM_VIDC_STOP_DONE) {
-		fw_out_qsize = inst->count.ftb - inst->count.fbd;
-		buffers_in_driver = inst->buffers_held_in_driver;
-	}
-
-	return fw_out_qsize + buffers_in_driver;
+	return rc;
 }
 
 static inline void msm_dcvs_print_dcvs_stats(struct dcvs_stats *dcvs)
@@ -198,23 +510,18 @@
 		dcvs->load_high);
 
 	dprintk(VIDC_DBG,
-		"DCVS: ThrDispBufLow %d, ThrDispBufHigh %d\n",
-		dcvs->threshold_disp_buf_low,
-		dcvs->threshold_disp_buf_high);
-
-	dprintk(VIDC_DBG,
 		"DCVS: min_threshold %d, max_threshold %d\n",
 		dcvs->min_threshold, dcvs->max_threshold);
 }
 
-void msm_dcvs_init_load(struct msm_vidc_inst *inst)
+void msm_dcvs_init(struct msm_vidc_inst *inst)
 {
 	struct msm_vidc_core *core;
-	struct hal_buffer_requirements *output_buf_req;
+	int i = 0;
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
+	u64 total_freq = 0, rate = 0, load;
+	int cycles;
 	struct dcvs_stats *dcvs;
-	struct dcvs_table *table;
-	struct msm_vidc_platform_resources *res = NULL;
-	int i, num_rows, fourcc;
 
 	dprintk(VIDC_DBG, "Init DCVS Load\n");
 
@@ -225,414 +532,38 @@
 
 	core = inst->core;
 	dcvs = &inst->dcvs;
-	res = &core->resources;
-	dcvs->load = msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS);
-
-	num_rows = res->dcvs_tbl_size;
-	table = res->dcvs_tbl;
-
-	if (!num_rows || !table) {
-		dprintk(VIDC_ERR,
-				"%s: Dcvs table entry not found.\n", __func__);
-		return;
-	}
-
-	fourcc = inst->session_type == MSM_VIDC_DECODER ?
-				inst->fmts[OUTPUT_PORT].fourcc :
-				inst->fmts[CAPTURE_PORT].fourcc;
-
-	for (i = 0; i < num_rows; i++) {
-		bool matches = msm_dcvs_check_codec_supported(
-					fourcc,
-					table[i].supported_codecs,
-					inst->session_type);
-		if (!matches)
-			continue;
-
-		if (dcvs->load > table[i].load) {
-			msm_dcvs_update_dcvs_params(i, inst);
-			break;
-		}
-	}
-
-	if (inst->session_type == MSM_VIDC_ENCODER)
-		goto print_stats;
-
-	output_buf_req = get_buff_req_buffer(inst,
-		msm_comm_get_hal_output_buffer(inst));
-
-	if (!output_buf_req) {
-		dprintk(VIDC_ERR,
-			"%s: No buffer requirement for buffer type %x\n",
-			__func__, HAL_BUFFER_OUTPUT);
-		return;
-	}
-
-	dcvs->transition_turbo = false;
-
-	/* calculating the min and max threshold */
-	if (output_buf_req->buffer_count_actual) {
-		dcvs->min_threshold = output_buf_req->buffer_count_actual -
-			output_buf_req->buffer_count_min -
-			msm_dcvs_get_extra_buff_count(inst) + 1;
-		dcvs->max_threshold = output_buf_req->buffer_count_actual;
-		if (dcvs->max_threshold <= dcvs->min_threshold)
-			dcvs->max_threshold =
-				dcvs->min_threshold + DCVS_BUFFER_SAFEGUARD;
-		dcvs->threshold_disp_buf_low = dcvs->min_threshold;
-		dcvs->threshold_disp_buf_high = dcvs->max_threshold;
-	}
-
-print_stats:
-	msm_dcvs_print_dcvs_stats(dcvs);
-}
-
-void msm_dcvs_init(struct msm_vidc_inst *inst)
-{
-	dprintk(VIDC_DBG, "Init DCVS Struct\n");
-
-	if (!inst) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, inst);
-		return;
-	}
-
-	inst->dcvs = (struct dcvs_stats){ {0} };
-	inst->dcvs.threshold_disp_buf_high = DCVS_NOMINAL_THRESHOLD;
-	inst->dcvs.threshold_disp_buf_low = DCVS_TURBO_THRESHOLD;
-}
-
-void msm_dcvs_monitor_buffer(struct msm_vidc_inst *inst)
-{
-	int new_ftb, i, prev_buf_count;
-	int fw_pending_bufs, total_output_buf, buffers_outside_fw;
-	struct dcvs_stats *dcvs;
-	struct hal_buffer_requirements *output_buf_req;
-
-	if (!inst) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, inst);
-		return;
-	}
-	dcvs = &inst->dcvs;
-
-	mutex_lock(&inst->lock);
-	output_buf_req = get_buff_req_buffer(inst,
-				msm_comm_get_hal_output_buffer(inst));
-	if (!output_buf_req) {
-		dprintk(VIDC_ERR, "%s : Get output buffer req failed %pK\n",
-			__func__, inst);
-		mutex_unlock(&inst->lock);
-		return;
-	}
-
-	total_output_buf = output_buf_req->buffer_count_actual;
-	fw_pending_bufs = get_pending_bufs_fw(inst);
-	mutex_unlock(&inst->lock);
-
-	buffers_outside_fw = total_output_buf - fw_pending_bufs;
-	dcvs->num_ftb[dcvs->ftb_index] = buffers_outside_fw;
-	dcvs->ftb_index = (dcvs->ftb_index + 1) % DCVS_FTB_WINDOW;
-
-	if (dcvs->ftb_counter < DCVS_FTB_WINDOW)
-		dcvs->ftb_counter++;
-
-	dprintk(VIDC_PROF,
-		"DCVS: ftb_counter %d\n", dcvs->ftb_counter);
-
-	if (dcvs->ftb_counter == DCVS_FTB_WINDOW) {
-		new_ftb = 0;
-		for (i = 0; i < dcvs->ftb_counter; i++) {
-			if (dcvs->num_ftb[i] > new_ftb)
-				new_ftb = dcvs->num_ftb[i];
-		}
-
-		dcvs->threshold_disp_buf_high = new_ftb;
-		if (dcvs->threshold_disp_buf_high <=
-			dcvs->threshold_disp_buf_low +
-			DCVS_BUFFER_SAFEGUARD) {
-			dcvs->threshold_disp_buf_high =
-				dcvs->threshold_disp_buf_low +
-				DCVS_BUFFER_SAFEGUARD
-				+ (DCVS_BUFFER_SAFEGUARD == 0);
-		}
-
-		dcvs->threshold_disp_buf_high =
-			clamp(dcvs->threshold_disp_buf_high,
-				dcvs->min_threshold,
-				dcvs->max_threshold);
-	}
-
-	if (dcvs->ftb_counter == DCVS_FTB_WINDOW &&
-			dcvs->load == dcvs->load_low) {
-		prev_buf_count =
-			dcvs->num_ftb[((dcvs->ftb_index - 2 +
-				DCVS_FTB_WINDOW) % DCVS_FTB_WINDOW)];
-		if (prev_buf_count == dcvs->threshold_disp_buf_low &&
-			buffers_outside_fw <= dcvs->threshold_disp_buf_low) {
-			dcvs->transition_turbo = true;
-		} else if (buffers_outside_fw > dcvs->threshold_disp_buf_low &&
-			(buffers_outside_fw -
-			 (prev_buf_count - buffers_outside_fw))
-			< dcvs->threshold_disp_buf_low){
-			dcvs->transition_turbo = true;
-		}
-	}
-
-	dprintk(VIDC_PROF,
-		"DCVS: total_output_buf %d buffers_outside_fw %d load %d transition_turbo %d\n",
-		total_output_buf, buffers_outside_fw, dcvs->load_low,
-		dcvs->transition_turbo);
-}
-
-static int msm_dcvs_enc_scale_clocks(struct msm_vidc_inst *inst)
-{
-	int rc = 0, fw_pending_bufs = 0, total_input_buf = 0;
-	struct msm_vidc_core *core;
-	struct dcvs_stats *dcvs;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
-		return -EINVAL;
-	}
-
-	core = inst->core;
-	dcvs = &inst->dcvs;
-
-	mutex_lock(&inst->lock);
-	total_input_buf = inst->buff_req.buffer[0].buffer_count_actual;
-	fw_pending_bufs = (inst->count.etb - inst->count.ebd);
-	mutex_unlock(&inst->lock);
-
-	dprintk(VIDC_PROF,
-		"DCVS: total_input_buf %d, fw_pending_bufs %d\n",
-		total_input_buf, fw_pending_bufs);
-
-	if (dcvs->etb_counter < total_input_buf) {
-		dcvs->etb_counter++;
-		if (dcvs->etb_counter != total_input_buf)
-			return rc;
-	}
-
-	dprintk(VIDC_PROF,
-		"DCVS: total_input_buf %d, fw_pending_bufs %d etb_counter %d  dcvs->load %d\n",
-		total_input_buf, fw_pending_bufs,
-		dcvs->etb_counter, dcvs->load);
-
-	if (fw_pending_bufs <= DCVS_ENC_LOW_THR &&
-		dcvs->load > dcvs->load_low) {
-		dcvs->load = dcvs->load_low;
-		dcvs->prev_freq_lowered = true;
-	} else {
-		dcvs->prev_freq_lowered = false;
-	}
-
-	if (fw_pending_bufs >= DCVS_ENC_HIGH_THR &&
-		dcvs->load <= dcvs->load_low) {
-		dcvs->load = dcvs->load_high;
-		dcvs->prev_freq_increased = true;
-	} else {
-		dcvs->prev_freq_increased = false;
-	}
-
-	if (dcvs->prev_freq_lowered || dcvs->prev_freq_increased) {
-		dprintk(VIDC_PROF,
-			"DCVS: (Scaling Clock %s)  etb clock set = %d total_input_buf = %d fw_pending_bufs %d\n",
-			dcvs->prev_freq_lowered ? "Lower" : "Higher",
-			dcvs->load, total_input_buf, fw_pending_bufs);
-
-		rc = msm_comm_scale_clocks_load(core, dcvs->load,
-				LOAD_CALC_NO_QUIRKS);
-		if (rc) {
-			dprintk(VIDC_PROF,
-				"Failed to set clock rate in FBD: %d\n", rc);
-		}
-	} else {
-		dprintk(VIDC_PROF,
-			"DCVS: etb clock load_old = %d total_input_buf = %d fw_pending_bufs %d\n",
-			dcvs->load, total_input_buf, fw_pending_bufs);
-	}
-
-	return rc;
-}
-
-
-/*
- * In DCVS scale_clocks will be done both in qbuf and FBD
- * 1 indicates call made from fbd that lowers clock
- * 0 indicates call made from qbuf that increases clock
- * based on DCVS algorithm
- */
-
-static int msm_dcvs_dec_scale_clocks(struct msm_vidc_inst *inst, bool fbd)
-{
-	int rc = 0;
-	int fw_pending_bufs = 0;
-	int total_output_buf = 0;
-	int buffers_outside_fw = 0;
-	struct msm_vidc_core *core;
-	struct hal_buffer_requirements *output_buf_req;
-	struct dcvs_stats *dcvs;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
-		return -EINVAL;
-	}
-	core = inst->core;
-	dcvs = &inst->dcvs;
-	mutex_lock(&inst->lock);
-	fw_pending_bufs = get_pending_bufs_fw(inst);
-
-	output_buf_req = get_buff_req_buffer(inst,
-		msm_comm_get_hal_output_buffer(inst));
-	mutex_unlock(&inst->lock);
-	if (!output_buf_req) {
-		dprintk(VIDC_ERR,
-			"%s: No buffer requirement for buffer type %x\n",
-			__func__, HAL_BUFFER_OUTPUT);
-		return -EINVAL;
-	}
-
-	/* Total number of output buffers */
-	total_output_buf = output_buf_req->buffer_count_actual;
-
-	/* Buffers outside FW are with display */
-	buffers_outside_fw = total_output_buf - fw_pending_bufs;
-
-	if (buffers_outside_fw >= dcvs->threshold_disp_buf_high &&
-		!dcvs->prev_freq_increased &&
-		dcvs->load > dcvs->load_low) {
-		dcvs->load = dcvs->load_low;
-		dcvs->prev_freq_lowered = true;
-		dcvs->prev_freq_increased = false;
-	} else if (dcvs->transition_turbo && dcvs->load == dcvs->load_low) {
-		dcvs->load = dcvs->load_high;
-		dcvs->prev_freq_increased = true;
-		dcvs->prev_freq_lowered = false;
-		dcvs->transition_turbo = false;
-	} else {
-		dcvs->prev_freq_increased = false;
-		dcvs->prev_freq_lowered = false;
-	}
-
-	if (dcvs->prev_freq_lowered || dcvs->prev_freq_increased) {
-		dprintk(VIDC_PROF,
-			"DCVS: clock set = %d tot_output_buf = %d buffers_outside_fw %d threshold_high %d transition_turbo %d\n",
-			dcvs->load, total_output_buf, buffers_outside_fw,
-			dcvs->threshold_disp_buf_high, dcvs->transition_turbo);
-
-		rc = msm_comm_scale_clocks_load(core, dcvs->load,
-				LOAD_CALC_NO_QUIRKS);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"Failed to set clock rate in FBD: %d\n", rc);
-		}
-	} else {
-		dprintk(VIDC_PROF,
-			"DCVS: clock old = %d tot_output_buf = %d buffers_outside_fw %d threshold_high %d transition_turbo %d\n",
-			dcvs->load, total_output_buf, buffers_outside_fw,
-			dcvs->threshold_disp_buf_high, dcvs->transition_turbo);
-	}
-	return rc;
-}
-
-static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst)
-{
-	int num_mbs_per_frame = 0, instance_count = 0;
-	long int instance_load = 0;
-	long int dcvs_limit = 0;
-	struct msm_vidc_inst *temp = NULL;
-	struct msm_vidc_core *core;
-	struct hal_buffer_requirements *output_buf_req;
-	struct dcvs_stats *dcvs;
-	bool is_codec_supported = false;
-	bool is_dcvs_supported = true;
-	struct msm_vidc_platform_resources *res = NULL;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_WARN, "%s: Invalid parameter\n", __func__);
-		return -EINVAL;
-	}
-
-	core = inst->core;
-	dcvs = &inst->dcvs;
-	res = &core->resources;
-
-	if (!res->dcvs_limit) {
-		dprintk(VIDC_WARN,
-				"%s: dcvs limit table not found\n", __func__);
-		return false;
-	}
-	instance_count = msm_dcvs_count_active_instances(core,
-		inst->session_type);
-	num_mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
-	instance_load = msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS);
-	dcvs_limit =
-		(long int)res->dcvs_limit[inst->session_type].min_mbpf *
-		res->dcvs_limit[inst->session_type].fps;
-	inst->dcvs.extra_buffer_count = 0;
-
-	if (!IS_VALID_DCVS_SESSION(num_mbs_per_frame,
-				res->dcvs_limit[inst->session_type].min_mbpf)) {
-		inst->dcvs.extra_buffer_count = 0;
-		is_dcvs_supported = false;
-		goto dcvs_decision_done;
-
-	}
-
-	if (inst->session_type == MSM_VIDC_DECODER) {
-		inst->dcvs.extra_buffer_count = DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
-		output_buf_req = get_buff_req_buffer(inst,
-				msm_comm_get_hal_output_buffer(inst));
-		if (!output_buf_req) {
-			dprintk(VIDC_ERR,
-					"%s: No buffer requirement for buffer type %x\n",
-					__func__, HAL_BUFFER_OUTPUT);
-			return false;
-		}
-		is_codec_supported =
-			msm_dcvs_check_codec_supported(
-				inst->fmts[OUTPUT_PORT].fourcc,
-				inst->dcvs.supported_codecs,
-				inst->session_type);
-		if (!is_codec_supported ||
-				!msm_vidc_dec_dcvs_mode) {
-			inst->dcvs.extra_buffer_count = 0;
-			is_dcvs_supported = false;
-			goto dcvs_decision_done;
-		}
-		if (msm_comm_turbo_session(inst) ||
-			!IS_VALID_DCVS_SESSION(instance_load, dcvs_limit) ||
-			instance_count > 1)
-			is_dcvs_supported = false;
-	}
+	inst->dcvs = (struct dcvs_stats){0};
+	load = msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS);
+	cycles = inst->entry->vpp_cycles;
+	allowed_clks_tbl = core->resources.allowed_clks_tbl;
 	if (inst->session_type == MSM_VIDC_ENCODER) {
-		inst->dcvs.extra_buffer_count = DCVS_ENC_EXTRA_OUTPUT_BUFFERS;
-		is_codec_supported =
-			msm_dcvs_check_codec_supported(
-				inst->fmts[CAPTURE_PORT].fourcc,
-				inst->dcvs.supported_codecs,
-				inst->session_type);
-		if (!is_codec_supported ||
-				!msm_vidc_enc_dcvs_mode) {
-			inst->dcvs.extra_buffer_count = 0;
-			is_dcvs_supported = false;
-			goto dcvs_decision_done;
-		}
-		if (msm_comm_turbo_session(inst) ||
-			!IS_VALID_DCVS_SESSION(instance_load, dcvs_limit) ||
-				instance_count > 1)
-			is_dcvs_supported = false;
+		cycles = inst->flags & VIDC_LOW_POWER ?
+			inst->entry->low_power_cycles :
+			cycles;
+
+		dcvs->buffer_type = HAL_BUFFER_INPUT;
+		// TODO : Update with proper no based on Buffer counts change.
+		dcvs->min_threshold = 7;
+	} else if (inst->session_type == MSM_VIDC_DECODER) {
+		dcvs->buffer_type = msm_comm_get_hal_output_buffer(inst);
+		// TODO : Update with proper no based on Buffer counts change.
+		dcvs->min_threshold = 4;
+	} else {
+		return;
 	}
-dcvs_decision_done:
-	if (!is_dcvs_supported) {
-		msm_comm_scale_clocks(core);
-		if (instance_count > 1) {
-			mutex_lock(&core->lock);
-			list_for_each_entry(temp, &core->instances, list)
-				temp->dcvs_mode = false;
-			mutex_unlock(&core->lock);
-		}
+
+	total_freq = cycles * load;
+
+	for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) {
+		rate = allowed_clks_tbl[i].clock_rate;
+		if (rate >= total_freq)
+			break;
 	}
-	return is_dcvs_supported;
+
+	dcvs->load = dcvs->load_high = rate;
+	dcvs->load_low = allowed_clks_tbl[i+1].clock_rate;
+
+	msm_dcvs_print_dcvs_stats(dcvs);
 }
 
 int msm_dcvs_get_extra_buff_count(struct msm_vidc_inst *inst)
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
index 383c27e1..0229ccbb 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
@@ -32,9 +32,12 @@
 #define DCVS_BUFFER_SAFEGUARD (DCVS_DEC_EXTRA_OUTPUT_BUFFERS - 1)
 
 void msm_dcvs_init(struct msm_vidc_inst *inst);
-void msm_dcvs_init_load(struct msm_vidc_inst *inst);
-void msm_dcvs_monitor_buffer(struct msm_vidc_inst *inst);
-void msm_dcvs_check_and_scale_clocks(struct msm_vidc_inst *inst, bool is_etb);
 int  msm_dcvs_get_extra_buff_count(struct msm_vidc_inst *inst);
 int msm_dcvs_try_enable(struct msm_vidc_inst *inst);
+int msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst);
+int msm_comm_init_clocks_and_bus_data(struct msm_vidc_inst *inst);
+void msm_comm_free_freq_table(struct msm_vidc_inst *inst);
+void msm_vidc_clear_freq_entry(struct msm_vidc_inst *inst,
+	ion_phys_addr_t device_addr);
+
 #endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 14725de..5e49f42 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -52,8 +52,6 @@
 	"Extradata none",
 	"Extradata MB Quantization",
 	"Extradata Interlace Video",
-	"Extradata VC1 Framedisp",
-	"Extradata VC1 Seqdisp",
 	"Extradata timestamp",
 	"Extradata S3D Frame Packing",
 	"Extradata Frame Rate",
@@ -304,6 +302,8 @@
 			return HAL_H264_LEVEL_51;
 		case V4L2_MPEG_VIDEO_H264_LEVEL_5_2:
 			return HAL_H264_LEVEL_52;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_UNKNOWN:
+			return HAL_H264_LEVEL_UNKNOWN;
 		default:
 			goto unknown_value;
 		}
@@ -405,6 +405,8 @@
 			return HAL_HEVC_HIGH_TIER_LEVEL_6;
 		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_6_1:
 			return HAL_HEVC_HIGH_TIER_LEVEL_6_1;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_UNKNOWN:
+			return HAL_HEVC_TIER_LEVEL_UNKNOWN;
 		default:
 			goto unknown_value;
 		}
@@ -711,22 +713,13 @@
 	case V4L2_PIX_FMT_H264_MVC:
 		codec = HAL_VIDEO_CODEC_MVC;
 		break;
-	case V4L2_PIX_FMT_H263:
-		codec = HAL_VIDEO_CODEC_H263;
-		break;
+
 	case V4L2_PIX_FMT_MPEG1:
 		codec = HAL_VIDEO_CODEC_MPEG1;
 		break;
 	case V4L2_PIX_FMT_MPEG2:
 		codec = HAL_VIDEO_CODEC_MPEG2;
 		break;
-	case V4L2_PIX_FMT_MPEG4:
-		codec = HAL_VIDEO_CODEC_MPEG4;
-		break;
-	case V4L2_PIX_FMT_VC1_ANNEX_G:
-	case V4L2_PIX_FMT_VC1_ANNEX_L:
-		codec = HAL_VIDEO_CODEC_VC1;
-		break;
 	case V4L2_PIX_FMT_VP8:
 		codec = HAL_VIDEO_CODEC_VP8;
 		break;
@@ -762,9 +755,6 @@
 	case V4L2_PIX_FMT_NV12_TP10_UBWC:
 		format = HAL_COLOR_FORMAT_NV12_TP10_UBWC;
 		break;
-	case V4L2_PIX_FMT_RGB32:
-		format = HAL_COLOR_FORMAT_RGBA8888;
-		break;
 	default:
 		format = HAL_UNUSED_COLOR;
 		break;
@@ -773,103 +763,6 @@
 	return format;
 }
 
-static int msm_comm_vote_bus(struct msm_vidc_core *core)
-{
-	int rc = 0, vote_data_count = 0, i = 0;
-	struct hfi_device *hdev;
-	struct msm_vidc_inst *inst = NULL;
-	struct vidc_bus_vote_data *vote_data = NULL;
-	unsigned long core_freq = 0;
-
-	if (!core) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, core);
-		return -EINVAL;
-	}
-
-	hdev = core->device;
-	if (!hdev) {
-		dprintk(VIDC_ERR, "%s Invalid device handle: %pK\n",
-			__func__, hdev);
-		return -EINVAL;
-	}
-
-	mutex_lock(&core->lock);
-	list_for_each_entry(inst, &core->instances, list)
-		++vote_data_count;
-
-	vote_data = kcalloc(vote_data_count, sizeof(*vote_data),
-			GFP_TEMPORARY);
-	if (!vote_data) {
-		dprintk(VIDC_ERR, "%s: failed to allocate memory\n", __func__);
-		rc = -ENOMEM;
-		goto fail_alloc;
-	}
-
-	core_freq = call_hfi_op(hdev, get_core_clock_rate,
-			hdev->hfi_device_data, 0);
-
-	list_for_each_entry(inst, &core->instances, list) {
-		int codec = 0, yuv = 0;
-
-		codec = inst->session_type == MSM_VIDC_DECODER ?
-			inst->fmts[OUTPUT_PORT].fourcc :
-			inst->fmts[CAPTURE_PORT].fourcc;
-
-		yuv = inst->session_type == MSM_VIDC_DECODER ?
-			inst->fmts[CAPTURE_PORT].fourcc :
-			inst->fmts[OUTPUT_PORT].fourcc;
-
-		vote_data[i].domain = get_hal_domain(inst->session_type);
-		vote_data[i].codec = get_hal_codec(codec);
-		vote_data[i].width =  max(inst->prop.width[CAPTURE_PORT],
-			inst->prop.width[OUTPUT_PORT]);
-		vote_data[i].height = max(inst->prop.height[CAPTURE_PORT],
-			inst->prop.height[OUTPUT_PORT]);
-
-		if (inst->operating_rate)
-			vote_data[i].fps = (inst->operating_rate >> 16) ?
-				inst->operating_rate >> 16 : 1;
-		else
-			vote_data[i].fps = inst->prop.fps;
-
-		if (msm_comm_turbo_session(inst))
-			vote_data[i].power_mode = VIDC_POWER_TURBO;
-		else if (is_low_power_session(inst))
-			vote_data[i].power_mode = VIDC_POWER_LOW;
-		else
-			vote_data[i].power_mode = VIDC_POWER_NORMAL;
-		if (i == 0) {
-			vote_data[i].imem_ab_tbl = core->resources.imem_ab_tbl;
-			vote_data[i].imem_ab_tbl_size =
-				core->resources.imem_ab_tbl_size;
-			vote_data[i].core_freq = core_freq;
-		}
-
-		/*
-		 * TODO: support for OBP-DBP split mode hasn't been yet
-		 * implemented, once it is, this part of code needs to be
-		 * revisited since passing in accurate information to the bus
-		 * governor will drastically reduce bandwidth
-		 */
-		vote_data[i].color_formats[0] = get_hal_uncompressed(yuv);
-		vote_data[i].num_formats = 1;
-		i++;
-	}
-	mutex_unlock(&core->lock);
-
-	rc = call_hfi_op(hdev, vote_bus, hdev->hfi_device_data, vote_data,
-			vote_data_count);
-	if (rc)
-		dprintk(VIDC_ERR, "Failed to scale bus: %d\n", rc);
-
-	kfree(vote_data);
-	return rc;
-
-fail_alloc:
-	mutex_unlock(&core->lock);
-	return rc;
-}
-
 struct msm_vidc_core *get_vidc_core(int core_id)
 {
 	struct msm_vidc_core *core;
@@ -1622,9 +1515,6 @@
 		inst->prop.width[OUTPUT_PORT] = event_notify->width;
 	}
 
-	if (inst->session_type == MSM_VIDC_DECODER)
-		msm_dcvs_init_load(inst);
-
 	rc = msm_vidc_check_session_supported(inst);
 	if (!rc) {
 		seq_changed_event.type = event;
@@ -2163,6 +2053,43 @@
 	return vb;
 }
 
+static void handle_dynamic_buffer(struct msm_vidc_inst *inst,
+		ion_phys_addr_t device_addr, u32 flags)
+{
+	struct buffer_info *binfo = NULL, *temp = NULL;
+
+	/*
+	 * Update reference count and release OR queue back the buffer,
+	 * only when firmware is not holding a reference.
+	 */
+	binfo = device_to_uvaddr(&inst->registeredbufs, device_addr);
+	if (!binfo) {
+		dprintk(VIDC_ERR,
+			"%s buffer not found in registered list\n",
+			__func__);
+		return;
+	}
+	if (flags & HAL_BUFFERFLAG_READONLY) {
+		dprintk(VIDC_DBG,
+			"FBD fd[0] = %d -> Reference with f/w, addr: %pa\n",
+			binfo->fd[0], &device_addr);
+	} else {
+		dprintk(VIDC_DBG,
+			"FBD fd[0] = %d -> FBD_ref_released, addr: %pa\n",
+			binfo->fd[0], &device_addr);
+
+		mutex_lock(&inst->registeredbufs.lock);
+		list_for_each_entry(temp, &inst->registeredbufs.list,
+				list) {
+			if (temp == binfo) {
+				buf_ref_put(inst, binfo);
+				break;
+			}
+		}
+		mutex_unlock(&inst->registeredbufs.lock);
+	}
+}
+
 static void handle_ebd(enum hal_command_response cmd, void *data)
 {
 	struct msm_vidc_cb_data_done *response = data;
@@ -2182,6 +2109,9 @@
 		dprintk(VIDC_WARN, "Got a response for an inactive session\n");
 		return;
 	}
+	if (inst->buffer_mode_set[OUTPUT_PORT] == HAL_BUFFER_MODE_DYNAMIC)
+		handle_dynamic_buffer(inst,
+			response->input_done.packet_buffer, 0);
 
 	vb = get_vb_from_device_addr(&inst->bufq[OUTPUT_PORT],
 			response->input_done.packet_buffer);
@@ -2221,6 +2151,8 @@
 			empty_buf_done->alloc_len, empty_buf_done->status,
 			empty_buf_done->picture_type, empty_buf_done->flags);
 
+		msm_vidc_clear_freq_entry(inst, empty_buf_done->packet_buffer);
+
 		mutex_lock(&inst->bufq[OUTPUT_PORT].lock);
 		vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
 		mutex_unlock(&inst->bufq[OUTPUT_PORT].lock);
@@ -2239,11 +2171,7 @@
 
 	atomic_inc(&binfo->ref_count);
 	cnt = atomic_read(&binfo->ref_count);
-	if (cnt > 2) {
-		dprintk(VIDC_DBG, "%s: invalid ref_cnt: %d\n", __func__, cnt);
-		cnt = -EINVAL;
-	}
-	if (cnt == 2)
+	if (cnt >= 2)
 		inst->buffers_held_in_driver++;
 
 	dprintk(VIDC_DBG, "REF_GET[%d] fd[0] = %d\n", cnt, binfo->fd[0]);
@@ -2266,7 +2194,7 @@
 	dprintk(VIDC_DBG, "REF_PUT[%d] fd[0] = %d\n", cnt, binfo->fd[0]);
 	if (!cnt)
 		release_buf = true;
-	else if (cnt == 1)
+	else if (cnt >= 1)
 		qbuf_again = true;
 	else {
 		dprintk(VIDC_DBG, "%s: invalid ref_cnt: %d\n", __func__, cnt);
@@ -2297,45 +2225,6 @@
 	return cnt;
 }
 
-static void handle_dynamic_buffer(struct msm_vidc_inst *inst,
-		ion_phys_addr_t device_addr, u32 flags)
-{
-	struct buffer_info *binfo = NULL, *temp = NULL;
-
-	/*
-	 * Update reference count and release OR queue back the buffer,
-	 * only when firmware is not holding a reference.
-	 */
-	if (inst->buffer_mode_set[CAPTURE_PORT] == HAL_BUFFER_MODE_DYNAMIC) {
-		binfo = device_to_uvaddr(&inst->registeredbufs, device_addr);
-		if (!binfo) {
-			dprintk(VIDC_ERR,
-				"%s buffer not found in registered list\n",
-				__func__);
-			return;
-		}
-		if (flags & HAL_BUFFERFLAG_READONLY) {
-			dprintk(VIDC_DBG,
-				"FBD fd[0] = %d -> Reference with f/w, addr: %pa\n",
-				binfo->fd[0], &device_addr);
-		} else {
-			dprintk(VIDC_DBG,
-				"FBD fd[0] = %d -> FBD_ref_released, addr: %pa\n",
-				binfo->fd[0], &device_addr);
-
-			mutex_lock(&inst->registeredbufs.lock);
-			list_for_each_entry(temp, &inst->registeredbufs.list,
-							list) {
-				if (temp == binfo) {
-					buf_ref_put(inst, binfo);
-					break;
-				}
-			}
-			mutex_unlock(&inst->registeredbufs.lock);
-		}
-	}
-}
-
 static int handle_multi_stream_buffers(struct msm_vidc_inst *inst,
 		ion_phys_addr_t dev_addr)
 {
@@ -2459,6 +2348,8 @@
 			vb->planes[extra_idx].data_offset = 0;
 		}
 
+		if (inst->buffer_mode_set[CAPTURE_PORT] ==
+			HAL_BUFFER_MODE_DYNAMIC)
 		handle_dynamic_buffer(inst, fill_buf_done->packet_buffer1,
 					fill_buf_done->flags1);
 		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_READONLY)
@@ -2529,55 +2420,6 @@
 	put_inst(inst);
 }
 
-static void handle_seq_hdr_done(enum hal_command_response cmd, void *data)
-{
-	struct msm_vidc_cb_data_done *response = data;
-	struct msm_vidc_inst *inst;
-	struct vb2_buffer *vb;
-	struct vidc_hal_fbd *fill_buf_done;
-	struct vb2_v4l2_buffer *vbuf;
-
-	if (!response) {
-		dprintk(VIDC_ERR, "Invalid response from vidc_hal\n");
-		return;
-	}
-
-	inst = get_inst(get_vidc_core(response->device_id),
-			response->session_id);
-	if (!inst) {
-		dprintk(VIDC_WARN, "Got a response for an inactive session\n");
-		return;
-	}
-
-	fill_buf_done = (struct vidc_hal_fbd *)&response->output_done;
-	vb = get_vb_from_device_addr(&inst->bufq[CAPTURE_PORT],
-				fill_buf_done->packet_buffer1);
-	if (!vb) {
-		dprintk(VIDC_ERR,
-				"Failed to find video buffer for seq_hdr_done: %pa\n",
-				&fill_buf_done->packet_buffer1);
-		goto err_seq_hdr_done;
-	}
-	vbuf = to_vb2_v4l2_buffer(vb);
-	vb->timestamp = 0;
-
-	vb->planes[0].bytesused = fill_buf_done->filled_len1;
-	vb->planes[0].data_offset = fill_buf_done->offset1;
-
-	vbuf->flags = V4L2_QCOM_BUF_FLAG_CODECCONFIG;
-
-	dprintk(VIDC_DBG, "Filled length = %d; offset = %d; flags %x\n",
-				vb->planes[0].bytesused,
-				vb->planes[0].data_offset,
-				vbuf->flags);
-	mutex_lock(&inst->bufq[CAPTURE_PORT].lock);
-	vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
-	mutex_unlock(&inst->bufq[CAPTURE_PORT].lock);
-
-err_seq_hdr_done:
-	put_inst(inst);
-}
-
 void handle_cmd_response(enum hal_command_response cmd, void *data)
 {
 	dprintk(VIDC_DBG, "Command response = %d\n", cmd);
@@ -2622,9 +2464,6 @@
 	case HAL_SESSION_FLUSH_DONE:
 		handle_session_flush(cmd, data);
 		break;
-	case HAL_SESSION_GET_SEQ_HDR_DONE:
-		handle_seq_hdr_done(cmd, data);
-		break;
 	case HAL_SYS_WATCHDOG_TIMEOUT:
 	case HAL_SYS_ERROR:
 		handle_sys_error(cmd, data);
@@ -2641,127 +2480,6 @@
 	}
 }
 
-int msm_comm_scale_clocks(struct msm_vidc_core *core)
-{
-	int num_mbs_per_sec, enc_mbs_per_sec, dec_mbs_per_sec;
-
-	enc_mbs_per_sec =
-		msm_comm_get_load(core, MSM_VIDC_ENCODER, LOAD_CALC_NO_QUIRKS);
-	dec_mbs_per_sec	=
-		msm_comm_get_load(core, MSM_VIDC_DECODER, LOAD_CALC_NO_QUIRKS);
-
-	if (enc_mbs_per_sec >= dec_mbs_per_sec) {
-	/*
-	 * If Encoder load is higher, use that load. Encoder votes for higher
-	 * clock. Since Encoder and Deocder run on parallel cores, this clock
-	 * should suffice decoder usecases.
-	 */
-		num_mbs_per_sec = enc_mbs_per_sec;
-	} else {
-	/*
-	 * If Decoder load is higher, it's tricky to decide clock. Decoder
-	 * higher load might results less clocks than Encoder smaller load.
-	 * At this point driver doesn't know which clock to vote. Hence use
-	 * total load.
-	 */
-		num_mbs_per_sec = enc_mbs_per_sec + dec_mbs_per_sec;
-	}
-
-	return msm_comm_scale_clocks_load(core, num_mbs_per_sec,
-			LOAD_CALC_NO_QUIRKS);
-}
-
-int msm_comm_scale_clocks_load(struct msm_vidc_core *core,
-		int num_mbs_per_sec, enum load_calc_quirks quirks)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-	struct msm_vidc_inst *inst = NULL;
-	unsigned long instant_bitrate = 0;
-	int num_sessions = 0;
-	struct vidc_clk_scale_data clk_scale_data = { {0} };
-	int codec = 0;
-
-	if (!core) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, core);
-		return -EINVAL;
-	}
-
-	hdev = core->device;
-	if (!hdev) {
-		dprintk(VIDC_ERR, "%s Invalid device handle: %pK\n",
-			__func__, hdev);
-		return -EINVAL;
-	}
-
-	mutex_lock(&core->lock);
-	list_for_each_entry(inst, &core->instances, list) {
-
-		codec = inst->session_type == MSM_VIDC_DECODER ?
-			inst->fmts[OUTPUT_PORT].fourcc :
-			inst->fmts[CAPTURE_PORT].fourcc;
-
-		if (msm_comm_turbo_session(inst))
-			clk_scale_data.power_mode[num_sessions] =
-				VIDC_POWER_TURBO;
-		else if (is_low_power_session(inst))
-			clk_scale_data.power_mode[num_sessions] =
-				VIDC_POWER_LOW;
-		else
-			clk_scale_data.power_mode[num_sessions] =
-				VIDC_POWER_NORMAL;
-
-		if (inst->dcvs_mode)
-			clk_scale_data.load[num_sessions] = inst->dcvs.load;
-		else
-			clk_scale_data.load[num_sessions] =
-				msm_comm_get_inst_load(inst, quirks);
-
-		clk_scale_data.session[num_sessions] =
-				VIDC_VOTE_DATA_SESSION_VAL(
-				get_hal_codec(codec),
-				get_hal_domain(inst->session_type));
-		num_sessions++;
-
-		if (inst->instant_bitrate > instant_bitrate)
-			instant_bitrate = inst->instant_bitrate;
-
-	}
-	clk_scale_data.num_sessions = num_sessions;
-	mutex_unlock(&core->lock);
-
-
-	rc = call_hfi_op(hdev, scale_clocks,
-		hdev->hfi_device_data, num_mbs_per_sec,
-		&clk_scale_data, instant_bitrate);
-	if (rc)
-		dprintk(VIDC_ERR, "Failed to set clock rate: %d\n", rc);
-
-	return rc;
-}
-
-void msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst)
-{
-	struct msm_vidc_core *core;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
-		return;
-	}
-	core = inst->core;
-	hdev = core->device;
-
-	if (msm_comm_scale_clocks(core)) {
-		dprintk(VIDC_WARN,
-				"Failed to scale clocks. Performance might be impacted\n");
-	}
-	if (msm_comm_vote_bus(core)) {
-		dprintk(VIDC_WARN,
-				"Failed to scale DDR bus. Performance might be impacted\n");
-	}
-}
-
 static inline enum msm_vidc_thermal_level msm_comm_vidc_thermal_level(int level)
 {
 	switch (level) {
@@ -2776,33 +2494,16 @@
 	}
 }
 
-static unsigned long msm_comm_get_clock_rate(struct msm_vidc_core *core)
-{
-	struct hfi_device *hdev;
-	unsigned long freq = 0;
-
-	if (!core || !core->device) {
-		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
-		return -EINVAL;
-	}
-	hdev = core->device;
-
-	freq = call_hfi_op(hdev, get_core_clock_rate, hdev->hfi_device_data, 1);
-	dprintk(VIDC_DBG, "clock freq %ld\n", freq);
-
-	return freq;
-}
-
 static bool is_core_turbo(struct msm_vidc_core *core, unsigned long freq)
 {
 	int i = 0;
-	struct msm_vidc_platform_resources *res = &core->resources;
-	struct load_freq_table *table = res->load_freq_tbl;
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
 	u32 max_freq = 0;
 
-	for (i = 0; i < res->load_freq_tbl_size; i++) {
-		if (max_freq < table[i].freq)
-			max_freq = table[i].freq;
+	allowed_clks_tbl = core->resources.allowed_clks_tbl;
+	for (i = 0; i < core->resources.allowed_clks_tbl_size; i++) {
+		if (max_freq < allowed_clks_tbl[i].clock_rate)
+			max_freq = allowed_clks_tbl[i].clock_rate;
 	}
 	return freq >= max_freq;
 }
@@ -2824,7 +2525,7 @@
 	}
 
 	tl = msm_comm_vidc_thermal_level(vidc_driver->thermal_level);
-	freq = msm_comm_get_clock_rate(core);
+	freq = core->freq;
 
 	is_turbo = is_core_turbo(core, freq);
 	dprintk(VIDC_DBG,
@@ -3042,6 +2743,8 @@
 core_already_inited:
 	change_inst_state(inst, MSM_VIDC_CORE_INIT);
 	mutex_unlock(&core->lock);
+
+	rc = msm_comm_scale_clocks_and_bus(inst);
 	return rc;
 
 fail_core_init:
@@ -3135,6 +2838,8 @@
 		return -EINVAL;
 	}
 
+	msm_comm_init_clocks_and_bus_data(inst);
+
 	rc = call_hfi_op(hdev, session_init, hdev->hfi_device_data,
 			inst, get_hal_domain(inst->session_type),
 			get_hal_codec(fourcc),
@@ -4065,6 +3770,7 @@
 static void log_frame(struct msm_vidc_inst *inst, struct vidc_frame_data *data,
 		enum v4l2_buf_type type)
 {
+
 	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 		dprintk(VIDC_DBG,
 			"Sending etb (%pa) to hal: filled: %d, ts: %lld, flags = %#x\n",
@@ -4072,13 +3778,6 @@
 			data->timestamp, data->flags);
 		msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_ETB);
 
-		if (msm_vidc_bitrate_clock_scaling &&
-			inst->session_type == MSM_VIDC_DECODER &&
-			!inst->dcvs_mode)
-			inst->instant_bitrate =
-				data->filled_len * 8 * inst->prop.fps;
-		else
-			inst->instant_bitrate = 0;
 	} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
 		dprintk(VIDC_DBG,
 			"Sending ftb (%pa) to hal: size: %d, ts: %lld, flags = %#x\n",
@@ -4086,20 +3785,6 @@
 			data->timestamp, data->flags);
 		msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_FTB);
 	}
-
-	msm_dcvs_check_and_scale_clocks(inst,
-			type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
-
-	if (msm_vidc_bitrate_clock_scaling && !inst->dcvs_mode &&
-		type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
-		inst->session_type == MSM_VIDC_DECODER)
-		if (msm_comm_scale_clocks(inst->core))
-			dprintk(VIDC_WARN,
-				"Failed to scale clocks. Performance might be impacted\n");
-
-	if (msm_comm_vote_bus(inst->core))
-		dprintk(VIDC_WARN,
-			"Failed to scale bus. Performance might be impacted\n");
 }
 
 /*
@@ -4182,6 +3867,8 @@
 		return 0;
 	}
 
+	rc = msm_comm_scale_clocks_and_bus(inst);
+
 	dprintk(VIDC_DBG, "%sing %d etbs and %d ftbs\n",
 			batch_mode ? "Batch" : "Process",
 			output_count, capture_count);
@@ -4854,20 +4541,22 @@
 	 * driver should not queue any new buffer it has been holding.
 	 *
 	 * Each dynamic o/p buffer can have one of following ref_count:
-	 * ref_count : 0 - f/w has released reference and sent fbd back.
-	 *		  The buffer has been returned back to client.
+	 * ref_count : 0   - f/w has released reference and sent dynamic
+	 *                   buffer back. The buffer has been returned
+	 *                   back to client.
 	 *
-	 * ref_count : 1 - f/w is holding reference. f/w may have released
-	 *                 fbd as read_only OR fbd is pending. f/w will
-	 *		  release reference before sending flush_done.
+	 * ref_count : 1   - f/w is holding reference. f/w may have released
+	 *                   dynamic buffer as read_only OR dynamic buffer is
+	 *                   pending. f/w will release reference before sending
+	 *                   flush_done.
 	 *
-	 * ref_count : 2 - f/w is holding reference, f/w has released fbd as
-	 *                 read_only, which client has queued back to driver.
-	 *                 driver holds this buffer and will queue back
-	 *                 only when f/w releases the reference. During
-	 *		  flush_done, f/w will release the reference but driver
-	 *		  should not queue back the buffer to f/w.
-	 *		  Flush all buffers with ref_count 2.
+	 * ref_count : >=2 - f/w is holding reference, f/w has released dynamic
+	 *                   buffer as read_only, which client has queued back
+	 *                   to driver. Driver holds this buffer and will queue
+	 *                   back only when f/w releases the reference. During
+	 *                   flush_done, f/w will release the reference but
+	 *                   driver should not queue back the buffer to f/w.
+	 *                   Flush all buffers with ref_count >= 2.
 	 */
 	mutex_lock(&inst->registeredbufs.lock);
 	if (!list_empty(&inst->registeredbufs.list)) {
@@ -4876,7 +4565,7 @@
 
 		list_for_each_entry(binfo, &inst->registeredbufs.list, list) {
 			if (binfo->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
-				atomic_read(&binfo->ref_count) == 2) {
+				atomic_read(&binfo->ref_count) >= 2) {
 
 				atomic_dec(&binfo->ref_count);
 				buf_event.type =
@@ -4968,6 +4657,10 @@
 		return 0;
 	}
 
+	// Finish FLUSH As Soon As Possible.
+	inst->dcvs.buffer_counter = 0;
+	msm_comm_scale_clocks_and_bus(inst);
+
 	msm_comm_flush_dynamic_buffers(inst);
 
 	if (inst->state == MSM_VIDC_CORE_INVALID ||
@@ -5303,9 +4996,6 @@
 		return -ENOTSUPP;
 	}
 
-	if (!rc)
-		msm_dcvs_try_enable(inst);
-
 	if (!rc) {
 		if (inst->prop.width[CAPTURE_PORT] < capability->width.min ||
 			inst->prop.height[CAPTURE_PORT] <
@@ -5622,11 +5312,7 @@
 			if (rc)
 				dprintk(VIDC_WARN,
 					"Failed to set frame rate %d\n", rc);
-		} else {
-			msm_dcvs_init_load(inst);
 		}
-		msm_comm_scale_clocks_and_bus(inst);
-		msm_dcvs_try_enable(inst);
 	}
 exit:
 	return rc;
@@ -5710,8 +5396,7 @@
 	}
 	core = inst->core;
 
-	dprintk(VIDC_ERR, "Venus core frequency = %lu",
-		msm_comm_get_clock_rate(core));
+	dprintk(VIDC_ERR, "Venus core frequency = %lu", core->freq);
 	mutex_lock(&core->lock);
 	dprintk(VIDC_ERR, "Printing instance info that caused Error\n");
 	msm_comm_print_inst_info(inst);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index d898682..39a28b3 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -46,10 +46,6 @@
 int msm_comm_set_output_buffers(struct msm_vidc_inst *inst);
 int msm_comm_queue_output_buffers(struct msm_vidc_inst *inst);
 int msm_comm_qbuf(struct msm_vidc_inst *inst, struct vb2_buffer *vb);
-void msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst);
-int msm_comm_scale_clocks(struct msm_vidc_core *core);
-int msm_comm_scale_clocks_load(struct msm_vidc_core *core,
-		int num_mbs_per_sec, enum load_calc_quirks quirks);
 void msm_comm_flush_dynamic_buffers(struct msm_vidc_inst *inst);
 int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags);
 int msm_comm_release_scratch_buffers(struct msm_vidc_inst *inst,
@@ -101,5 +97,4 @@
 void msm_comm_print_inst_info(struct msm_vidc_inst *inst);
 int msm_comm_v4l2_to_hal(int id, int value);
 int msm_comm_hal_to_v4l2(int id, int value);
-
 #endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index 5514c66..15ee8a8 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -50,7 +50,7 @@
 })
 
 #define DYNAMIC_BUF_OWNER(__binfo) ({ \
-	atomic_read(&__binfo->ref_count) == 2 ? "video driver" : "firmware";\
+	atomic_read(&__binfo->ref_count) >= 2 ? "video driver" : "firmware";\
 })
 
 static int core_info_open(struct inode *inode, struct file *file)
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index bebb5de..8562e8f 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -139,6 +139,12 @@
 	MAX_OWNER
 };
 
+struct vidc_freq_data {
+	struct list_head list;
+	ion_phys_addr_t device_addr;
+	unsigned long freq;
+};
+
 struct internal_buf {
 	struct list_head list;
 	enum hal_buffer buffer_type;
@@ -200,23 +206,14 @@
 };
 
 struct dcvs_stats {
-	int num_ftb[DCVS_FTB_WINDOW];
-	bool transition_turbo;
-	int ftb_index;
-	int ftb_counter;
-	bool prev_freq_lowered;
-	bool prev_freq_increased;
-	int threshold_disp_buf_high;
-	int threshold_disp_buf_low;
+	int buffer_counter;
 	int load;
 	int load_low;
 	int load_high;
 	int min_threshold;
 	int max_threshold;
-	int etb_counter;
-	bool is_power_save_mode;
 	unsigned int extra_buffer_count;
-	u32 supported_codecs;
+	enum hal_buffer buffer_type;
 };
 
 struct profile_data {
@@ -261,6 +258,7 @@
 	struct msm_vidc_capability *capabilities;
 	struct delayed_work fw_unload_work;
 	bool smmu_fault_handled;
+	unsigned long freq;
 };
 
 struct msm_vidc_inst {
@@ -274,6 +272,7 @@
 	struct msm_vidc_format fmts[MAX_PORT_NUM];
 	struct buf_queue bufq[MAX_PORT_NUM];
 	struct msm_vidc_list pendingq;
+	struct msm_vidc_list freqs;
 	struct msm_vidc_list scratchbufs;
 	struct msm_vidc_list persistbufs;
 	struct msm_vidc_list pending_getpropq;
@@ -302,7 +301,8 @@
 	bool dcvs_mode;
 	enum msm_vidc_pixel_depth bit_depth;
 	struct kref kref;
-	unsigned long instant_bitrate;
+	unsigned long bitrate;
+	unsigned long freq;
 	u32 buffers_held_in_driver;
 	atomic_t in_flush;
 	u32 pic_struct;
@@ -311,6 +311,7 @@
 	u32 profile;
 	u32 level;
 	u32 entropy_mode;
+	struct clock_profile_entry *entry;
 };
 
 extern struct msm_vidc_drv *vidc_driver;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 97a625b..8b9018c 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -90,24 +90,6 @@
 	res->pf_ver_tbl = NULL;
 }
 
-static inline void msm_vidc_free_freq_table(
-		struct msm_vidc_platform_resources *res)
-{
-	res->load_freq_tbl = NULL;
-}
-
-static inline void msm_vidc_free_dcvs_table(
-		struct msm_vidc_platform_resources *res)
-{
-	res->dcvs_tbl = NULL;
-}
-
-static inline void msm_vidc_free_dcvs_limit(
-		struct msm_vidc_platform_resources *res)
-{
-	res->dcvs_limit = NULL;
-}
-
 static inline void msm_vidc_free_imem_ab_table(
 		struct msm_vidc_platform_resources *res)
 {
@@ -168,10 +150,7 @@
 {
 	msm_vidc_free_clock_table(res);
 	msm_vidc_free_regulator_table(res);
-	msm_vidc_free_freq_table(res);
 	msm_vidc_free_platform_version_table(res);
-	msm_vidc_free_dcvs_table(res);
-	msm_vidc_free_dcvs_limit(res);
 	msm_vidc_free_cycles_per_mb_table(res);
 	msm_vidc_free_allowed_clocks_table(res);
 	msm_vidc_free_reg_table(res);
@@ -411,6 +390,14 @@
 	int rc = 0;
 	struct platform_device *pdev = res->pdev;
 
+	/* A comparator to compare loads (needed later on) */
+	int cmp(const void *a, const void *b)
+	{
+		/* want to sort in reverse so flip the comparison */
+		return ((struct allowed_clock_rates_table *)b)->clock_rate -
+			((struct allowed_clock_rates_table *)a)->clock_rate;
+	}
+
 	if (!of_find_property(pdev->dev.of_node,
 			"qcom,allowed-clock-rates", NULL)) {
 		dprintk(VIDC_DBG, "qcom,allowed-clock-rates not found\n");
@@ -428,6 +415,9 @@
 		return rc;
 	}
 
+	sort(res->allowed_clks_tbl, res->allowed_clks_tbl_size,
+		 sizeof(*res->allowed_clks_tbl), cmp, NULL);
+
 	return 0;
 }
 
@@ -490,34 +480,51 @@
 		}
 		dprintk(VIDC_DBG, "codec_mask %#x\n", entry->codec_mask);
 
-		if (of_find_property(child_node, "qcom,cycles-per-mb", NULL)) {
+		if (of_find_property(child_node,
+				"qcom,vsp-cycles-per-mb", NULL)) {
 			rc = of_property_read_u32(child_node,
-					"qcom,cycles-per-mb", &entry->cycles);
+					"qcom,vsp-cycles-per-mb",
+					&entry->vsp_cycles);
 			if (rc) {
 				dprintk(VIDC_ERR,
-					"qcom,cycles-per-mb not found\n");
+					"qcom,vsp-cycles-per-mb not found\n");
 				goto error;
 			}
 		} else {
-			entry->cycles = 0;
+			entry->vsp_cycles = 0;
 		}
-		dprintk(VIDC_DBG, "cycles_per_mb %d\n", entry->cycles);
+		dprintk(VIDC_DBG, "vsp cycles_per_mb %d\n", entry->vsp_cycles);
 
 		if (of_find_property(child_node,
-				"qcom,low-power-mode-factor", NULL)) {
+				"qcom,vpp-cycles-per-mb", NULL)) {
 			rc = of_property_read_u32(child_node,
-					"qcom,low-power-mode-factor",
-					&entry->low_power_factor);
+					"qcom,vpp-cycles-per-mb",
+					&entry->vsp_cycles);
 			if (rc) {
 				dprintk(VIDC_ERR,
-					"qcom,low-power-mode-factor not found\n");
+					"qcom,vpp-cycles-per-mb not found\n");
 				goto error;
 			}
 		} else {
-			entry->low_power_factor = 0;
+			entry->vpp_cycles = 0;
+		}
+		dprintk(VIDC_DBG, "vpp cycles_per_mb %d\n", entry->vpp_cycles);
+
+		if (of_find_property(child_node,
+				"qcom,low-power-cycles-per-mb", NULL)) {
+			rc = of_property_read_u32(child_node,
+					"qcom,low-power-cycles-per-mb",
+					&entry->low_power_cycles);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"qcom,low-power-cycles-per-mb not found\n");
+				goto error;
+			}
+		} else {
+			entry->low_power_cycles = 0;
 		}
 		dprintk(VIDC_DBG, "low_power_factor %d\n",
-				entry->low_power_factor);
+				entry->low_power_cycles);
 
 		i++;
 	}
@@ -526,155 +533,6 @@
 	return rc;
 }
 
-static int msm_vidc_load_freq_table(struct msm_vidc_platform_resources *res)
-{
-	int rc = 0;
-	int num_elements = 0;
-	struct platform_device *pdev = res->pdev;
-
-	/* A comparator to compare loads (needed later on) */
-	int cmp(const void *a, const void *b)
-	{
-		/* want to sort in reverse so flip the comparison */
-		return ((struct load_freq_table *)b)->load -
-			((struct load_freq_table *)a)->load;
-	}
-
-	if (!of_find_property(pdev->dev.of_node, "qcom,load-freq-tbl", NULL)) {
-		/*
-		 * qcom,load-freq-tbl is an optional property.  It likely won't
-		 * be present on cores that we can't clock scale on.
-		 */
-		dprintk(VIDC_DBG, "qcom,load-freq-tbl not found\n");
-		return 0;
-	}
-
-	num_elements = get_u32_array_num_elements(pdev->dev.of_node,
-			"qcom,load-freq-tbl");
-	num_elements /= sizeof(*res->load_freq_tbl) / sizeof(u32);
-	if (!num_elements) {
-		dprintk(VIDC_ERR, "no elements in frequency table\n");
-		return rc;
-	}
-
-	res->load_freq_tbl = devm_kzalloc(&pdev->dev, num_elements *
-			sizeof(*res->load_freq_tbl), GFP_KERNEL);
-	if (!res->load_freq_tbl) {
-		dprintk(VIDC_ERR,
-				"%s Failed to alloc load_freq_tbl\n",
-				__func__);
-		return -ENOMEM;
-	}
-
-	if (of_property_read_u32_array(pdev->dev.of_node,
-		"qcom,load-freq-tbl", (u32 *)res->load_freq_tbl,
-		num_elements * sizeof(*res->load_freq_tbl) / sizeof(u32))) {
-		dprintk(VIDC_ERR, "Failed to read frequency table\n");
-		msm_vidc_free_freq_table(res);
-		return -EINVAL;
-	}
-
-	res->load_freq_tbl_size = num_elements;
-
-	/* The entries in the DT might not be sorted (for aesthetic purposes).
-	 * Given that we expect the loads in descending order for our scaling
-	 * logic to work, just sort it ourselves
-	 */
-	sort(res->load_freq_tbl, res->load_freq_tbl_size,
-			sizeof(*res->load_freq_tbl), cmp, NULL);
-	return rc;
-}
-
-static int msm_vidc_load_dcvs_table(struct msm_vidc_platform_resources *res)
-{
-	int rc = 0;
-	int num_elements = 0;
-	struct platform_device *pdev = res->pdev;
-
-	if (!of_find_property(pdev->dev.of_node, "qcom,dcvs-tbl", NULL)) {
-		/*
-		 * qcom,dcvs-tbl is an optional property. Incase qcom,dcvs-limit
-		 * property is present, it becomes mandatory. It likely won't
-		 * be present on targets that does not support the feature
-		 */
-		dprintk(VIDC_DBG, "qcom,dcvs-tbl not found\n");
-		return 0;
-	}
-
-	num_elements = get_u32_array_num_elements(pdev->dev.of_node,
-			"qcom,dcvs-tbl");
-	num_elements /= sizeof(*res->dcvs_tbl) / sizeof(u32);
-	if (!num_elements) {
-		dprintk(VIDC_ERR, "no elements in dcvs table\n");
-		return rc;
-	}
-
-	res->dcvs_tbl = devm_kzalloc(&pdev->dev, num_elements *
-			sizeof(*res->dcvs_tbl), GFP_KERNEL);
-	if (!res->dcvs_tbl) {
-		dprintk(VIDC_ERR,
-				"%s Failed to alloc dcvs_tbl\n",
-				__func__);
-		return -ENOMEM;
-	}
-
-	if (of_property_read_u32_array(pdev->dev.of_node,
-		"qcom,dcvs-tbl", (u32 *)res->dcvs_tbl,
-		num_elements * sizeof(*res->dcvs_tbl) / sizeof(u32))) {
-		dprintk(VIDC_ERR, "Failed to read dcvs table\n");
-		msm_vidc_free_dcvs_table(res);
-		return -EINVAL;
-	}
-	res->dcvs_tbl_size = num_elements;
-
-	return rc;
-}
-
-static int msm_vidc_load_dcvs_limit(struct msm_vidc_platform_resources *res)
-{
-	int rc = 0;
-	int num_elements = 0;
-	struct platform_device *pdev = res->pdev;
-
-	if (!of_find_property(pdev->dev.of_node, "qcom,dcvs-limit", NULL)) {
-		/*
-		 * qcom,dcvs-limit is an optional property. Incase qcom,dcvs-tbl
-		 * property is present, it becomes mandatory. It likely won't
-		 * be present on targets that does not support the feature
-		 */
-		dprintk(VIDC_DBG, "qcom,dcvs-limit not found\n");
-		return 0;
-	}
-
-	num_elements = get_u32_array_num_elements(pdev->dev.of_node,
-			"qcom,dcvs-limit");
-	num_elements /= sizeof(*res->dcvs_limit) / sizeof(u32);
-	if (!num_elements) {
-		dprintk(VIDC_ERR, "no elements in dcvs limit\n");
-		res->dcvs_limit = NULL;
-		return rc;
-	}
-
-	res->dcvs_limit = devm_kzalloc(&pdev->dev, num_elements *
-			sizeof(*res->dcvs_limit), GFP_KERNEL);
-	if (!res->dcvs_limit) {
-		dprintk(VIDC_ERR,
-				"%s Failed to alloc dcvs_limit\n",
-				__func__);
-		return -ENOMEM;
-	}
-	if (of_property_read_u32_array(pdev->dev.of_node,
-		"qcom,dcvs-limit", (u32 *)res->dcvs_limit,
-		num_elements * sizeof(*res->dcvs_limit) / sizeof(u32))) {
-		dprintk(VIDC_ERR, "Failed to read dcvs limit\n");
-		msm_vidc_free_dcvs_limit(res);
-		return -EINVAL;
-	}
-
-	return rc;
-}
-
-
 static int msm_vidc_populate_bus(struct device *dev,
 		struct msm_vidc_platform_resources *res)
 {
@@ -952,11 +810,8 @@
 
 		if (clock_props[c] & CLOCK_PROP_HAS_SCALING) {
 			vc->has_scaling = true;
-			vc->count = res->load_freq_tbl_size;
-			vc->load_freq_tbl = res->load_freq_tbl;
 		} else {
 			vc->count = 0;
-			vc->load_freq_tbl = NULL;
 			vc->has_scaling = false;
 		}
 
@@ -1016,7 +871,7 @@
 			&res->fw_name);
 	if (rc) {
 		dprintk(VIDC_ERR, "Failed to read firmware name: %d\n", rc);
-		goto err_load_freq_table;
+		goto err_load_reg_table;
 	}
 	dprintk(VIDC_DBG, "Firmware filename: %s\n", res->fw_name);
 
@@ -1029,20 +884,6 @@
 	if (rc)
 		dprintk(VIDC_ERR, "Failed to load pf version table: %d\n", rc);
 
-	rc = msm_vidc_load_freq_table(res);
-	if (rc) {
-		dprintk(VIDC_ERR, "Failed to load freq table: %d\n", rc);
-		goto err_load_freq_table;
-	}
-
-	rc = msm_vidc_load_dcvs_table(res);
-	if (rc)
-		dprintk(VIDC_WARN, "Failed to load dcvs table: %d\n", rc);
-
-	rc = msm_vidc_load_dcvs_limit(res);
-	if (rc)
-		dprintk(VIDC_WARN, "Failed to load dcvs limit: %d\n", rc);
-
 	rc = msm_vidc_load_imem_ab_table(res);
 	if (rc)
 		dprintk(VIDC_WARN, "Failed to load freq table: %d\n", rc);
@@ -1157,8 +998,6 @@
 err_load_buffer_usage_table:
 	msm_vidc_free_reg_table(res);
 err_load_reg_table:
-	msm_vidc_free_freq_table(res);
-err_load_freq_table:
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index 4f152fb..8fd43006 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -24,12 +24,6 @@
 	u32 version_shift;
 };
 
-struct load_freq_table {
-	u32 load;
-	u32 freq;
-	u32 supported_codecs;
-};
-
 struct dcvs_table {
 	u32 load;
 	u32 load_low;
@@ -101,7 +95,6 @@
 struct clock_info {
 	const char *name;
 	struct clk *clk;
-	struct load_freq_table *load_freq_tbl;
 	u32 count;
 	bool has_scaling;
 	bool has_mem_retention;
@@ -142,8 +135,9 @@
 
 struct clock_profile_entry {
 	u32 codec_mask;
-	u32 cycles;
-	u32 low_power_factor;
+	u32 vpp_cycles;
+	u32 vsp_cycles;
+	u32 low_power_cycles;
 };
 
 struct clock_freq_table {
@@ -160,8 +154,6 @@
 	struct allowed_clock_rates_table *allowed_clks_tbl;
 	u32 allowed_clks_tbl_size;
 	struct clock_freq_table clock_freq_tbl;
-	struct load_freq_table *load_freq_tbl;
-	uint32_t load_freq_tbl_size;
 	struct dcvs_table *dcvs_tbl;
 	uint32_t dcvs_tbl_size;
 	struct dcvs_limit *dcvs_limit;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index aabf2d3..eb36b33 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -85,13 +85,11 @@
 static int __enable_regulators(struct venus_hfi_device *device);
 static inline int __prepare_enable_clks(struct venus_hfi_device *device);
 static inline void __disable_unprepare_clks(struct venus_hfi_device *device);
-static int __scale_clocks_load(struct venus_hfi_device *device, int load,
-		struct vidc_clk_scale_data *data,
-		unsigned long instant_bitrate);
 static void __flush_debug_queue(struct venus_hfi_device *device, u8 *packet);
 static int __initialize_packetization(struct venus_hfi_device *device);
 static struct hal_session *__get_session(struct venus_hfi_device *device,
 		u32 session_id);
+static int __set_clocks(struct venus_hfi_device *device, u32 freq);
 static int __iface_cmdq_write(struct venus_hfi_device *device,
 					void *pkt);
 static int __load_fw(struct venus_hfi_device *device);
@@ -541,7 +539,8 @@
 
 	*pb_tx_req_is_set = (queue->qhdr_tx_req == 1) ? 1 : 0;
 
-	if (msm_vidc_debug & VIDC_PKT) {
+	if ((msm_vidc_debug & VIDC_PKT) &&
+		!(queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q)) {
 		dprintk(VIDC_PKT, "%s: %pK\n", __func__, qinfo);
 		__dump_packet(packet, VIDC_PKT);
 	}
@@ -1137,162 +1136,6 @@
 	return rc;
 }
 
-static struct clock_info *__get_clock(struct venus_hfi_device *device,
-		char *name)
-{
-	struct clock_info *vc;
-
-	venus_hfi_for_each_clock(device, vc) {
-		if (!strcmp(vc->name, name))
-			return vc;
-	}
-
-	dprintk(VIDC_WARN, "%s Clock %s not found\n", __func__, name);
-
-	return NULL;
-}
-
-static unsigned long __get_clock_rate(struct clock_info *clock,
-	int num_mbs_per_sec, struct vidc_clk_scale_data *data)
-{
-	int num_rows = clock->count;
-	struct load_freq_table *table = clock->load_freq_tbl;
-	unsigned long freq = table[0].freq, max_freq = 0;
-	int i = 0, j = 0;
-	unsigned long instance_freq[VIDC_MAX_SESSIONS] = {0};
-
-	if (!data && !num_rows) {
-		freq = 0;
-		goto print_clk;
-	}
-
-	if ((!num_mbs_per_sec || !data) && num_rows) {
-
-		/* When no data is given, vote for the highest frequency. */
-
-		freq = table[0].freq;
-		goto print_clk;
-	}
-
-	for (i = 0; i < num_rows; i++) {
-		if (num_mbs_per_sec > table[i].load)
-			break;
-		for (j = 0; j < data->num_sessions; j++) {
-			bool matches = __is_session_supported(
-				table[i].supported_codecs, data->session[j]);
-
-			if (!matches)
-				continue;
-			instance_freq[j] = table[i].freq;
-		}
-	}
-	for (i = 0; i < data->num_sessions; i++)
-		max_freq = max(instance_freq[i], max_freq);
-
-	freq = max_freq ? : freq;
-print_clk:
-	dprintk(VIDC_PROF, "Required clock rate = %lu num_mbs_per_sec %d\n",
-					freq, num_mbs_per_sec);
-	return freq;
-}
-
-static unsigned long __get_clock_rate_with_bitrate(struct clock_info *clock,
-		int num_mbs_per_sec, struct vidc_clk_scale_data *data,
-		unsigned long instant_bitrate)
-{
-	int num_rows = clock->count;
-	struct load_freq_table *table = clock->load_freq_tbl;
-	unsigned long freq = table[0].freq, max_freq = 0;
-	unsigned long base_freq, supported_clk[VIDC_MAX_SESSIONS] = {0};
-	int i, j;
-
-	if (!data && !num_rows) {
-		freq = 0;
-		goto print_clk;
-	}
-	if ((!num_mbs_per_sec || !data) && num_rows) {
-		freq = table[num_rows - 1].freq;
-		goto print_clk;
-	}
-
-	/* Get clock rate based on current load only */
-	base_freq = __get_clock_rate(clock, num_mbs_per_sec, data);
-
-	/*
-	 * Supported bitrate = 40% of clock frequency
-	 * Check if the instant bitrate is supported by the base frequency.
-	 * If not, move on to the next frequency which supports the bitrate.
-	 */
-
-	for (j = 0; j < data->num_sessions; j++) {
-		unsigned long supported_bitrate = 0;
-
-		for (i = num_rows - 1; i >= 0; i--) {
-			bool matches = __is_session_supported(
-				table[i].supported_codecs, data->session[j]);
-
-			if (!matches)
-				continue;
-			freq = table[i].freq;
-
-			supported_bitrate = freq * 40/100;
-			/*
-			 * Store this frequency for each instance, we need
-			 * to select the maximum freq among all the instances.
-			 */
-			if (freq >= base_freq &&
-				supported_bitrate >= instant_bitrate) {
-				supported_clk[j] = freq;
-				break;
-			}
-		}
-
-		/*
-		 * Current bitrate is higher than max supported load.
-		 * Select max frequency to handle this load.
-		 */
-		if (i < 0)
-			supported_clk[j] = table[0].freq;
-	}
-
-	for (i = 0; i < data->num_sessions; i++)
-		max_freq = max(supported_clk[i], max_freq);
-
-	freq = max_freq ? : base_freq;
-
-	if (base_freq == freq)
-		dprintk(VIDC_DBG, "Stay at base freq: %lu bitrate = %lu\n",
-			freq, instant_bitrate);
-	else
-		dprintk(VIDC_DBG, "Move up clock freq: %lu bitrate = %lu\n",
-			freq, instant_bitrate);
-print_clk:
-	dprintk(VIDC_PROF, "Required clock rate = %lu num_mbs_per_sec %d\n",
-					freq, num_mbs_per_sec);
-	return freq;
-}
-
-static unsigned long venus_hfi_get_core_clock_rate(void *dev, bool actual_rate)
-{
-	struct venus_hfi_device *device = (struct venus_hfi_device *) dev;
-	struct clock_info *vc;
-
-	if (!device) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, device);
-		return -EINVAL;
-	}
-
-	if (actual_rate) {
-		vc = __get_clock(device, "core_clk");
-		if (vc)
-			return clk_get_rate(vc->clk);
-		else
-			return 0;
-	} else {
-		return device->scaled_rate;
-	}
-}
-
 static int venus_hfi_suspend(void *dev)
 {
 	int rc = 0;
@@ -1390,167 +1233,31 @@
 	return rc;
 }
 
-static int __scale_clocks_cycles_per_mb(struct venus_hfi_device *device,
-		struct vidc_clk_scale_data *data, unsigned long instant_bitrate)
-{
-	int rc = 0, i = 0, j = 0;
-	struct clock_info *cl;
-	struct clock_freq_table *clk_freq_tbl = NULL;
-	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
-	struct clock_profile_entry *entry = NULL;
-	u64 total_freq = 0, rate = 0;
-
-	clk_freq_tbl = &device->res->clock_freq_tbl;
-	allowed_clks_tbl = device->res->allowed_clks_tbl;
-
-	if (!data) {
-		dprintk(VIDC_DBG, "%s: NULL scale data\n", __func__);
-		total_freq = device->clk_freq;
-		goto get_clock_freq;
-	}
-
-	device->clk_bitrate = instant_bitrate;
-
-	for (i = 0; i < data->num_sessions; i++) {
-		/*
-		 * for each active session iterate through all possible
-		 * sessions and get matching session's cycles per mb
-		 * from dtsi and multiply with the session's load to
-		 * get the frequency required for the session.
-		 * accumulate all session's frequencies to get the
-		 * total clock frequency.
-		 */
-		for (j = 0; j < clk_freq_tbl->count; j++) {
-			bool matched = false;
-			u64 freq = 0;
-
-			entry = &clk_freq_tbl->clk_prof_entries[j];
-
-			matched = __is_session_supported(entry->codec_mask,
-					data->session[i]);
-			if (!matched)
-				continue;
-
-			freq = entry->cycles * data->load[i];
-
-			if (data->power_mode[i] == VIDC_POWER_LOW &&
-					entry->low_power_factor) {
-				/* low_power_factor is in Q16 format */
-				freq = (freq * entry->low_power_factor) >> 16;
-			}
-
-			total_freq += freq;
-
-			dprintk(VIDC_DBG,
-				"session[%d] %#x: cycles (%d), load (%d), freq (%llu), factor (%d)\n",
-				i, data->session[i], entry->cycles,
-				data->load[i], freq,
-				entry->low_power_factor);
-		}
-	}
-
-get_clock_freq:
-	/*
-	 * get required clock rate from allowed clock rates table
-	 */
-	for (i = device->res->allowed_clks_tbl_size - 1; i >= 0; i--) {
-		rate = allowed_clks_tbl[i].clock_rate;
-		if (rate >= total_freq)
-			break;
-	}
-
-	venus_hfi_for_each_clock(device, cl) {
-		if (!cl->has_scaling)
-			continue;
-
-		device->clk_freq = rate;
-		rc = clk_set_rate(cl->clk, rate);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"%s: Failed to set clock rate %llu %s: %d\n",
-				__func__, rate, cl->name, rc);
-			return rc;
-		}
-		if (!strcmp(cl->name, "core_clk"))
-			device->scaled_rate = rate;
-
-		dprintk(VIDC_DBG,
-			"scaling clock %s to %llu (required freq %llu)\n",
-			cl->name, rate, total_freq);
-	}
-
-	return rc;
-}
-
-static int __scale_clocks_load(struct venus_hfi_device *device, int load,
-		struct vidc_clk_scale_data *data, unsigned long instant_bitrate)
+static int __set_clocks(struct venus_hfi_device *device, u32 freq)
 {
 	struct clock_info *cl;
-
-	device->clk_bitrate = instant_bitrate;
+	int rc = 0;
 
 	venus_hfi_for_each_clock(device, cl) {
-		if (cl->has_scaling) {
-
-			unsigned long rate = 0;
-			int rc;
-			/*
-			 * load_fw and power_on needs to be addressed.
-			 * differently. Below check enforces the same.
-			 */
-			if (!device->clk_bitrate && !data && !load &&
-				device->clk_freq)
-				rate = device->clk_freq;
-
-			if (!rate) {
-				if (!device->clk_bitrate)
-					rate = __get_clock_rate(cl, load,
-							data);
-				else
-					rate = __get_clock_rate_with_bitrate(cl,
-							load, data,
-							instant_bitrate);
-			}
-			device->clk_freq = rate;
-			rc = clk_set_rate(cl->clk, rate);
+		if (cl->has_scaling) {/* has_scaling */
+			device->clk_freq = freq;
+			rc = clk_set_rate(cl->clk, freq);
 			if (rc) {
 				dprintk(VIDC_ERR,
-					"Failed to set clock rate %lu %s: %d\n",
-					rate, cl->name, rc);
+					"Failed to set clock rate %u %s: %d %s\n",
+					freq, cl->name, rc, __func__);
 				return rc;
 			}
 
-			if (!strcmp(cl->name, "core_clk"))
-				device->scaled_rate = rate;
-
-			dprintk(VIDC_PROF, "Scaling clock %s to %lu\n",
-					cl->name, rate);
+			dprintk(VIDC_PROF, "Scaling clock %s to %u\n",
+					cl->name, freq);
 		}
 	}
 
 	return 0;
 }
 
-static int __scale_clocks(struct venus_hfi_device *device,
-		int load, struct vidc_clk_scale_data *data,
-		unsigned long instant_bitrate)
-{
-	int rc = 0;
-
-	if (device->res->clock_freq_tbl.clk_prof_entries &&
-			device->res->allowed_clks_tbl)
-		rc = __scale_clocks_cycles_per_mb(device,
-				data, instant_bitrate);
-	else if (device->res->load_freq_tbl)
-		rc = __scale_clocks_load(device, load, data, instant_bitrate);
-	else
-		dprintk(VIDC_DBG, "Clock scaling is not supported\n");
-
-	return rc;
-}
-static int venus_hfi_scale_clocks(void *dev, int load,
-					struct vidc_clk_scale_data *data,
-					unsigned long instant_bitrate)
+static int venus_hfi_scale_clocks(void *dev, u32 freq)
 {
 	int rc = 0;
 	struct venus_hfi_device *device = dev;
@@ -1568,9 +1275,28 @@
 		goto exit;
 	}
 
-	rc = __scale_clocks(device, load, data, instant_bitrate);
+	rc = __set_clocks(device, freq);
 exit:
 	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static int __scale_clocks(struct venus_hfi_device *device)
+{
+	int rc = 0;
+	struct clock_freq_table *clk_freq_tbl = NULL;
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
+	u32 rate = 0;
+
+	clk_freq_tbl = &device->res->clock_freq_tbl;
+	allowed_clks_tbl = device->res->allowed_clks_tbl;
+
+	dprintk(VIDC_DBG, "%s: NULL scale data\n", __func__);
+	rate = device->clk_freq ? device->clk_freq :
+		allowed_clks_tbl[0].clock_rate;
+
+	rc = __set_clocks(device, rate);
 	return rc;
 }
 
@@ -3451,14 +3177,12 @@
 		case HAL_SESSION_RESUME_DONE:
 		case HAL_SESSION_SET_PROP_DONE:
 		case HAL_SESSION_GET_PROP_DONE:
-		case HAL_SESSION_PARSE_SEQ_HDR_DONE:
 		case HAL_SESSION_RELEASE_BUFFER_DONE:
 		case HAL_SESSION_RELEASE_RESOURCE_DONE:
 		case HAL_SESSION_PROPERTY_INFO:
 			session_id = &info->response.cmd.session_id;
 			break;
 		case HAL_SESSION_ERROR:
-		case HAL_SESSION_GET_SEQ_HDR_DONE:
 		case HAL_SESSION_ETB_DONE:
 		case HAL_SESSION_FTB_DONE:
 			session_id = &info->response.data.session_id;
@@ -3672,17 +3396,9 @@
 	}
 
 	venus_hfi_for_each_clock(device, cl) {
-		int i = 0;
 
 		dprintk(VIDC_DBG, "%s: scalable? %d, count %d\n",
 				cl->name, cl->has_scaling, cl->count);
-		for (i = 0; i < cl->count; ++i) {
-			dprintk(VIDC_DBG,
-				"\tload = %d, freq = %d codecs supported %#x\n",
-				cl->load_freq_tbl[i].load,
-				cl->load_freq_tbl[i].freq,
-				cl->load_freq_tbl[i].supported_codecs);
-		}
 	}
 
 	venus_hfi_for_each_clock(device, cl) {
@@ -4141,7 +3857,7 @@
 		goto fail_enable_clks;
 	}
 
-	rc = __scale_clocks(device, 0, NULL, 0);
+	rc = __scale_clocks(device);
 	if (rc) {
 		dprintk(VIDC_WARN,
 				"Failed to scale clocks, performance might be affected\n");
@@ -4624,7 +4340,6 @@
 	hdev->get_core_capabilities = venus_hfi_get_core_capabilities;
 	hdev->suspend = venus_hfi_suspend;
 	hdev->flush_debug_queue = venus_hfi_flush_debug_queue;
-	hdev->get_core_clock_rate = venus_hfi_get_core_clock_rate;
 	hdev->get_default_properties = venus_hfi_get_default_properties;
 }
 
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index 7caff53..2a833dc 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -73,8 +73,6 @@
 #define HFI_EXTRADATA_NONE					0x00000000
 #define HFI_EXTRADATA_MB_QUANTIZATION		0x00000001
 #define HFI_EXTRADATA_INTERLACE_VIDEO		0x00000002
-#define HFI_EXTRADATA_VC1_FRAMEDISP			0x00000003
-#define HFI_EXTRADATA_VC1_SEQDISP			0x00000004
 #define HFI_EXTRADATA_TIMESTAMP				0x00000005
 #define HFI_EXTRADATA_S3D_FRAME_PACKING		0x00000006
 #define HFI_EXTRADATA_FRAME_RATE			0x00000007
@@ -132,8 +130,6 @@
 	(HFI_PROPERTY_PARAM_OX_START + 0x001)
 #define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO	\
 	(HFI_PROPERTY_PARAM_OX_START + 0x002)
-#define HFI_PROPERTY_PARAM_EXTRA_DATA_HEADER_CONFIG		\
-	(HFI_PROPERTY_PARAM_OX_START + 0x005)
 #define HFI_PROPERTY_PARAM_INDEX_EXTRADATA             \
 	(HFI_PROPERTY_PARAM_OX_START + 0x006)
 #define HFI_PROPERTY_PARAM_S3D_FRAME_PACKING_EXTRADATA	\
@@ -175,10 +171,6 @@
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00C)
 #define HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE   \
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00D)
-#define HFI_PROPERTY_PARAM_VDEC_VC1_FRAMEDISP_EXTRADATA		\
-	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x011)
-#define HFI_PROPERTY_PARAM_VDEC_VC1_SEQDISP_EXTRADATA		\
-	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x012)
 #define HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA			\
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x013)
 #define HFI_PROPERTY_PARAM_VDEC_INTERLACE_VIDEO_EXTRADATA	\
@@ -206,8 +198,6 @@
 
 #define HFI_PROPERTY_CONFIG_VDEC_OX_START				\
 	(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x4000)
-#define HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER	\
-	(HFI_PROPERTY_CONFIG_VDEC_OX_START + 0x001)
 #define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING	\
 	(HFI_PROPERTY_CONFIG_VDEC_OX_START + 0x002)
 #define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP			\
@@ -279,14 +269,6 @@
 	u32 picture_type;
 };
 
-struct hfi_extra_data_header_config {
-	u32 type;
-	u32 buffer_type;
-	u32 version;
-	u32 port_index;
-	u32 client_extra_data_id;
-};
-
 struct hfi_mb_error_map {
 	u32 error_map_size;
 	u8 rg_error_map[1];
@@ -720,35 +702,6 @@
 	u8 rg_mb_qp[1];
 };
 
-struct hfi_extradata_vc1_pswnd {
-	u32 ps_wnd_h_offset;
-	u32 ps_wnd_v_offset;
-	u32 ps_wnd_width;
-	u32 ps_wnd_height;
-};
-
-struct hfi_extradata_vc1_framedisp_payload {
-	u32 res_pic;
-	u32 ref;
-	u32 range_map_present;
-	u32 range_map_y;
-	u32 range_map_uv;
-	u32 num_pan_scan_wnds;
-	struct hfi_extradata_vc1_pswnd rg_ps_wnd[1];
-};
-
-struct hfi_extradata_vc1_seqdisp_payload {
-	u32 prog_seg_frm;
-	u32 uv_sampling_fmt;
-	u32 color_fmt_flag;
-	u32 color_primaries;
-	u32 transfer_char;
-	u32 mat_coeff;
-	u32 aspect_ratio;
-	u32 aspect_horiz;
-	u32 aspect_vert;
-};
-
 struct hfi_extradata_timestamp_payload {
 	u32 time_stamp_low;
 	u32 time_stamp_high;
@@ -836,10 +789,6 @@
 	u32 aspect_width;
 	u32 aspect_height;
 };
-struct hfi_extradata_panscan_wndw_payload {
-	u32 num_window;
-	struct hfi_extradata_vc1_pswnd wnd[1];
-};
 
 struct hfi_extradata_frame_type_payload {
 	u32 frame_rate;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 151a82a..8aa0bbb 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -98,8 +98,6 @@
 	HAL_EXTRADATA_NONE,
 	HAL_EXTRADATA_MB_QUANTIZATION,
 	HAL_EXTRADATA_INTERLACE_VIDEO,
-	HAL_EXTRADATA_VC1_FRAMEDISP,
-	HAL_EXTRADATA_VC1_SEQDISP,
 	HAL_EXTRADATA_TIMESTAMP,
 	HAL_EXTRADATA_S3D_FRAME_PACKING,
 	HAL_EXTRADATA_FRAME_RATE,
@@ -134,7 +132,6 @@
 	HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT,
 	HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO,
 	HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO,
-	HAL_PARAM_EXTRA_DATA_HEADER_CONFIG,
 	HAL_PARAM_INDEX_EXTRADATA,
 	HAL_PARAM_FRAME_SIZE,
 	HAL_CONFIG_REALTIME,
@@ -144,22 +141,16 @@
 	HAL_PARAM_VDEC_OUTPUT_ORDER,
 	HAL_PARAM_VDEC_PICTURE_TYPE_DECODE,
 	HAL_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO,
-	HAL_CONFIG_VDEC_POST_LOOP_DEBLOCKER,
 	HAL_PARAM_VDEC_MULTI_STREAM,
 	HAL_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT,
-	HAL_PARAM_DIVX_FORMAT,
 	HAL_CONFIG_VDEC_MB_ERROR_MAP_REPORTING,
 	HAL_PARAM_VDEC_CONTINUE_DATA_TRANSFER,
 	HAL_CONFIG_VDEC_MB_ERROR_MAP,
 	HAL_CONFIG_VENC_REQUEST_IFRAME,
-	HAL_PARAM_VENC_MPEG4_SHORT_HEADER,
-	HAL_PARAM_VENC_MPEG4_AC_PREDICTION,
 	HAL_CONFIG_VENC_TARGET_BITRATE,
 	HAL_PARAM_PROFILE_LEVEL_CURRENT,
 	HAL_PARAM_VENC_H264_ENTROPY_CONTROL,
 	HAL_PARAM_VENC_RATE_CONTROL,
-	HAL_PARAM_VENC_MPEG4_TIME_RESOLUTION,
-	HAL_PARAM_VENC_MPEG4_HEADER_EXTENSION,
 	HAL_PARAM_VENC_H264_DEBLOCK_CONTROL,
 	HAL_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF,
 	HAL_PARAM_VENC_SESSION_QP_RANGE,
@@ -190,7 +181,6 @@
 	HAL_PARAM_VDEC_NUM_CONCEALED_MB,
 	HAL_PARAM_VDEC_H264_ENTROPY_SWITCHING,
 	HAL_PARAM_VENC_SLICE_DELIVERY_MODE,
-	HAL_PARAM_VENC_MPEG4_DATA_PARTITIONING,
 	HAL_CONFIG_BUFFER_COUNT_ACTUAL,
 	HAL_CONFIG_VDEC_MULTI_STREAM,
 	HAL_PARAM_VENC_MULTI_SLICE_INFO,
@@ -204,12 +194,10 @@
 	HAL_PARAM_VENC_MAX_NUM_B_FRAMES,
 	HAL_PARAM_BUFFER_ALLOC_MODE,
 	HAL_PARAM_VDEC_FRAME_ASSEMBLY,
-	HAL_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC,
 	HAL_PARAM_VENC_PRESERVE_TEXT_QUALITY,
 	HAL_PARAM_VDEC_CONCEAL_COLOR,
 	HAL_PARAM_VDEC_SCS_THRESHOLD,
 	HAL_PARAM_GET_BUFFER_REQUIREMENTS,
-	HAL_PARAM_MVC_BUFFER_LAYOUT,
 	HAL_PARAM_VENC_LTRMODE,
 	HAL_CONFIG_VENC_MARKLTRFRAME,
 	HAL_CONFIG_VENC_USELTRFRAME,
@@ -221,7 +209,6 @@
 	HAL_PARAM_VPE_COLOR_SPACE_CONVERSION,
 	HAL_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE,
 	HAL_CONFIG_VENC_PERF_MODE,
-	HAL_PARAM_VENC_HIER_B_MAX_ENH_LAYERS,
 	HAL_PARAM_VDEC_NON_SECURE_OUTPUT2,
 	HAL_PARAM_VENC_HIER_P_HYBRID_MODE,
 	HAL_PARAM_VENC_MBI_STATISTICS_MODE,
@@ -232,9 +219,7 @@
 	HAL_PROPERTY_PARAM_VENC_ASPECT_RATIO,
 	HAL_CONFIG_VDEC_ENTROPY,
 	HAL_PARAM_VENC_BITRATE_TYPE,
-	HAL_PARAM_VENC_H264_PIC_ORDER_CNT,
 	HAL_PARAM_VENC_LOW_LATENCY,
-	HAL_PARAM_VENC_CONSTRAINED_INTRA_PRED,
 	HAL_CONFIG_VENC_BLUR_RESOLUTION,
 	HAL_PARAM_VENC_H264_TRANSFORM_8x8,
 	HAL_PARAM_VENC_VIDEO_SIGNAL_INFO,
@@ -290,31 +275,6 @@
 	HAL_UNUSED_CODEC = 0x10000000,
 };
 
-enum hal_h263_profile {
-	HAL_H263_PROFILE_BASELINE           = 0x00000001,
-	HAL_H263_PROFILE_H320CODING         = 0x00000002,
-	HAL_H263_PROFILE_BACKWARDCOMPATIBLE = 0x00000004,
-	HAL_H263_PROFILE_ISWV2              = 0x00000008,
-	HAL_H263_PROFILE_ISWV3              = 0x00000010,
-	HAL_H263_PROFILE_HIGHCOMPRESSION    = 0x00000020,
-	HAL_H263_PROFILE_INTERNET           = 0x00000040,
-	HAL_H263_PROFILE_INTERLACE          = 0x00000080,
-	HAL_H263_PROFILE_HIGHLATENCY        = 0x00000100,
-	HAL_UNUSED_H263_PROFILE = 0x10000000,
-};
-
-enum hal_h263_level {
-	HAL_H263_LEVEL_10 = 0x00000001,
-	HAL_H263_LEVEL_20 = 0x00000002,
-	HAL_H263_LEVEL_30 = 0x00000004,
-	HAL_H263_LEVEL_40 = 0x00000008,
-	HAL_H263_LEVEL_45 = 0x00000010,
-	HAL_H263_LEVEL_50 = 0x00000020,
-	HAL_H263_LEVEL_60 = 0x00000040,
-	HAL_H263_LEVEL_70 = 0x00000080,
-	HAL_UNUSED_H263_LEVEL = 0x10000000,
-};
-
 enum hal_mpeg2_profile {
 	HAL_MPEG2_PROFILE_SIMPLE  = 0x00000001,
 	HAL_MPEG2_PROFILE_MAIN    = 0x00000002,
@@ -333,44 +293,6 @@
 	HAL_UNUSED_MEPG2_LEVEL = 0x10000000,
 };
 
-enum hal_mpeg4_profile {
-	HAL_MPEG4_PROFILE_SIMPLE           = 0x00000001,
-	HAL_MPEG4_PROFILE_ADVANCEDSIMPLE   = 0x00000002,
-	HAL_MPEG4_PROFILE_CORE             = 0x00000004,
-	HAL_MPEG4_PROFILE_MAIN             = 0x00000008,
-	HAL_MPEG4_PROFILE_NBIT             = 0x00000010,
-	HAL_MPEG4_PROFILE_SCALABLETEXTURE  = 0x00000020,
-	HAL_MPEG4_PROFILE_SIMPLEFACE       = 0x00000040,
-	HAL_MPEG4_PROFILE_SIMPLEFBA        = 0x00000080,
-	HAL_MPEG4_PROFILE_BASICANIMATED    = 0x00000100,
-	HAL_MPEG4_PROFILE_HYBRID           = 0x00000200,
-	HAL_MPEG4_PROFILE_ADVANCEDREALTIME = 0x00000400,
-	HAL_MPEG4_PROFILE_CORESCALABLE     = 0x00000800,
-	HAL_MPEG4_PROFILE_ADVANCEDCODING   = 0x00001000,
-	HAL_MPEG4_PROFILE_ADVANCEDCORE     = 0x00002000,
-	HAL_MPEG4_PROFILE_ADVANCEDSCALABLE = 0x00004000,
-	HAL_MPEG4_PROFILE_SIMPLESCALABLE   = 0x00008000,
-	HAL_UNUSED_MPEG4_PROFILE = 0x10000000,
-};
-
-enum hal_mpeg4_level {
-	HAL_MPEG4_LEVEL_0  = 0x00000001,
-	HAL_MPEG4_LEVEL_0b = 0x00000002,
-	HAL_MPEG4_LEVEL_1  = 0x00000004,
-	HAL_MPEG4_LEVEL_2  = 0x00000008,
-	HAL_MPEG4_LEVEL_3  = 0x00000010,
-	HAL_MPEG4_LEVEL_4  = 0x00000020,
-	HAL_MPEG4_LEVEL_4a = 0x00000040,
-	HAL_MPEG4_LEVEL_5  = 0x00000080,
-	HAL_MPEG4_LEVEL_VENDOR_START_UNUSED = 0x7F000000,
-	HAL_MPEG4_LEVEL_6  = 0x7F000001,
-	HAL_MPEG4_LEVEL_7  = 0x7F000002,
-	HAL_MPEG4_LEVEL_8  = 0x7F000003,
-	HAL_MPEG4_LEVEL_9  = 0x7F000004,
-	HAL_MPEG4_LEVEL_3b = 0x7F000005,
-	HAL_UNUSED_MPEG4_LEVEL = 0x10000000,
-};
-
 enum hal_h264_profile {
 	HAL_H264_PROFILE_BASELINE = 0x00000001,
 	HAL_H264_PROFILE_MAIN     = 0x00000002,
@@ -385,6 +307,7 @@
 };
 
 enum hal_h264_level {
+	HAL_H264_LEVEL_UNKNOWN = 0x00000000,
 	HAL_H264_LEVEL_1  = 0x00000001,
 	HAL_H264_LEVEL_1b = 0x00000002,
 	HAL_H264_LEVEL_11 = 0x00000004,
@@ -402,7 +325,6 @@
 	HAL_H264_LEVEL_5  = 0x00004000,
 	HAL_H264_LEVEL_51 = 0x00008000,
 	HAL_H264_LEVEL_52 = 0x00010000,
-	HAL_UNUSED_H264_LEVEL = 0x10000000,
 };
 
 enum hal_hevc_profile {
@@ -413,6 +335,7 @@
 };
 
 enum hal_hevc_level {
+	HAL_HEVC_TIER_LEVEL_UNKNOWN     = 0x00000000,
 	HAL_HEVC_MAIN_TIER_LEVEL_1      = 0x10000001,
 	HAL_HEVC_MAIN_TIER_LEVEL_2      = 0x10000002,
 	HAL_HEVC_MAIN_TIER_LEVEL_2_1    = 0x10000004,
@@ -439,7 +362,6 @@
 	HAL_HEVC_HIGH_TIER_LEVEL_6      = 0x20000400,
 	HAL_HEVC_HIGH_TIER_LEVEL_6_1    = 0x20000800,
 	HAL_HEVC_HIGH_TIER_LEVEL_6_2    = 0x20001000,
-	HAL_UNUSED_HEVC_TIER_LEVEL      = 0x80000000,
 };
 
 enum hal_hevc_tier {
@@ -458,66 +380,6 @@
 	HAL_VPX_PROFILE_UNUSED = 0x10000000,
 };
 
-enum hal_vc1_profile {
-	HAL_VC1_PROFILE_SIMPLE   = 0x00000001,
-	HAL_VC1_PROFILE_MAIN     = 0x00000002,
-	HAL_VC1_PROFILE_ADVANCED = 0x00000004,
-	HAL_UNUSED_VC1_PROFILE = 0x10000000,
-};
-
-enum hal_vc1_level {
-	HAL_VC1_LEVEL_LOW    = 0x00000001,
-	HAL_VC1_LEVEL_MEDIUM = 0x00000002,
-	HAL_VC1_LEVEL_HIGH   = 0x00000004,
-	HAL_VC1_LEVEL_0      = 0x00000008,
-	HAL_VC1_LEVEL_1      = 0x00000010,
-	HAL_VC1_LEVEL_2      = 0x00000020,
-	HAL_VC1_LEVEL_3      = 0x00000040,
-	HAL_VC1_LEVEL_4      = 0x00000080,
-	HAL_UNUSED_VC1_LEVEL = 0x10000000,
-};
-
-enum hal_divx_format {
-	HAL_DIVX_FORMAT_4,
-	HAL_DIVX_FORMAT_5,
-	HAL_DIVX_FORMAT_6,
-	HAL_UNUSED_DIVX_FORMAT = 0x10000000,
-};
-
-enum hal_divx_profile {
-	HAL_DIVX_PROFILE_QMOBILE  = 0x00000001,
-	HAL_DIVX_PROFILE_MOBILE   = 0x00000002,
-	HAL_DIVX_PROFILE_MT       = 0x00000004,
-	HAL_DIVX_PROFILE_HT       = 0x00000008,
-	HAL_DIVX_PROFILE_HD       = 0x00000010,
-	HAL_UNUSED_DIVX_PROFILE = 0x10000000,
-};
-
-enum hal_mvc_profile {
-	HAL_MVC_PROFILE_STEREO_HIGH  = 0x00001000,
-	HAL_UNUSED_MVC_PROFILE = 0x10000000,
-};
-
-enum hal_mvc_level {
-	HAL_MVC_LEVEL_1  = 0x00000001,
-	HAL_MVC_LEVEL_1b = 0x00000002,
-	HAL_MVC_LEVEL_11 = 0x00000004,
-	HAL_MVC_LEVEL_12 = 0x00000008,
-	HAL_MVC_LEVEL_13 = 0x00000010,
-	HAL_MVC_LEVEL_2  = 0x00000020,
-	HAL_MVC_LEVEL_21 = 0x00000040,
-	HAL_MVC_LEVEL_22 = 0x00000080,
-	HAL_MVC_LEVEL_3  = 0x00000100,
-	HAL_MVC_LEVEL_31 = 0x00000200,
-	HAL_MVC_LEVEL_32 = 0x00000400,
-	HAL_MVC_LEVEL_4  = 0x00000800,
-	HAL_MVC_LEVEL_41 = 0x00001000,
-	HAL_MVC_LEVEL_42 = 0x00002000,
-	HAL_MVC_LEVEL_5  = 0x00004000,
-	HAL_MVC_LEVEL_51 = 0x00008000,
-	HAL_UNUSED_MVC_LEVEL = 0x10000000,
-};
-
 struct hal_frame_rate {
 	enum hal_buffer buffer_type;
 	u32 frame_rate;
@@ -586,14 +448,6 @@
 	struct hal_uncompressed_plane_constraints rg_plane_format[1];
 };
 
-struct hal_extra_data_header_config {
-	u32 type;
-	enum hal_buffer buffer_type;
-	u32 version;
-	u32 port_index;
-	u32 client_extradata_id;
-};
-
 struct hal_frame_size {
 	enum hal_buffer buffer_type;
 	u32 width;
@@ -719,14 +573,6 @@
 	HAL_UNUSED_RC = 0x10000000,
 };
 
-struct hal_mpeg4_time_resolution {
-	u32 time_increment_resolution;
-};
-
-struct hal_mpeg4_header_extension {
-	u32 header_extension;
-};
-
 enum hal_h264_db_mode {
 	HAL_H264_DB_MODE_DISABLE,
 	HAL_H264_DB_MODE_SKIP_SLICE_BOUNDARY,
@@ -793,17 +639,13 @@
 enum hal_intra_refresh_mode {
 	HAL_INTRA_REFRESH_NONE,
 	HAL_INTRA_REFRESH_CYCLIC,
-	HAL_INTRA_REFRESH_ADAPTIVE,
-	HAL_INTRA_REFRESH_CYCLIC_ADAPTIVE,
 	HAL_INTRA_REFRESH_RANDOM,
 	HAL_UNUSED_INTRA = 0x10000000,
 };
 
 struct hal_intra_refresh {
 	enum hal_intra_refresh_mode mode;
-	u32 air_mbs;
-	u32 air_ref;
-	u32 cir_mbs;
+	u32 ir_mbs;
 };
 
 enum hal_multi_slice {
@@ -951,12 +793,6 @@
 	HAL_UNUSED_BUFFER_LAYOUT = 0x10000000,
 };
 
-struct hal_mvc_buffer_layout {
-	enum hal_buffer_layout_type layout_type;
-	u32 bright_view_first;
-	u32 ngap;
-};
-
 struct hal_aspect_ratio {
 	u32 aspect_width;
 	u32 aspect_height;
@@ -990,13 +826,6 @@
 	u32 enable;
 };
 
-struct hal_vc1e_perf_cfg_type {
-	struct {
-		u32 x_subsampled;
-		u32 y_subsampled;
-	} i_frame, p_frame, b_frame;
-};
-
 struct hal_vpe_color_space_conversion {
 	u32 csc_matrix[HAL_MAX_MATRIX_COEFFS];
 	u32 csc_bias[HAL_MAX_BIAS_COEFFS];
@@ -1110,7 +939,6 @@
 enum ltr_mode {
 	HAL_LTR_MODE_DISABLE,
 	HAL_LTR_MODE_MANUAL,
-	HAL_LTR_MODE_PERIODIC,
 };
 
 struct hal_ltr_mode {
@@ -1154,7 +982,6 @@
 	struct hal_uncompressed_plane_constraints plane_constraints;
 	struct hal_uncompressed_plane_actual_constraints_info
 						plane_constraints_info;
-	struct hal_extra_data_header_config extra_data_header_config;
 	struct hal_frame_size frame_size;
 	struct hal_enable enable;
 	struct hal_buffer_count_actual buffer_count_actual;
@@ -1167,8 +994,6 @@
 	struct hal_bitrate bitrate;
 	struct hal_profile_level profile_level;
 	struct hal_profile_level_supported profile_level_supported;
-	struct hal_mpeg4_time_resolution mpeg4_time_resolution;
-	struct hal_mpeg4_header_extension mpeg4_header_extension;
 	struct hal_h264_db_control h264_db_control;
 	struct hal_temporal_spatial_tradeoff temporal_spatial_tradeoff;
 	struct hal_quantization quantization;
@@ -1232,8 +1057,6 @@
 	HAL_SESSION_RESUME_DONE,
 	HAL_SESSION_SET_PROP_DONE,
 	HAL_SESSION_GET_PROP_DONE,
-	HAL_SESSION_PARSE_SEQ_HDR_DONE,
-	HAL_SESSION_GET_SEQ_HDR_DONE,
 	HAL_SESSION_RELEASE_BUFFER_DONE,
 	HAL_SESSION_RELEASE_RESOURCE_DONE,
 	HAL_SESSION_PROPERTY_INFO,
@@ -1529,9 +1352,7 @@
 	int (*session_set_property)(void *sess, enum hal_property ptype,
 			void *pdata);
 	int (*session_get_property)(void *sess, enum hal_property ptype);
-	int (*scale_clocks)(void *dev, int load,
-			struct vidc_clk_scale_data *data,
-			unsigned long instant_bitrate);
+	int (*scale_clocks)(void *dev, u32 freq);
 	int (*vote_bus)(void *dev, struct vidc_bus_vote_data *data,
 			int num_data);
 	int (*get_fw_info)(void *dev, struct hal_fw_info *fw_info);
@@ -1539,7 +1360,6 @@
 	int (*get_core_capabilities)(void *dev);
 	int (*suspend)(void *dev);
 	int (*flush_debug_queue)(void *dev);
-	unsigned long (*get_core_clock_rate)(void *dev, bool actual_rate);
 	enum hal_default_properties (*get_default_properties)(void *dev);
 };
 
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index ad2a336..0d73410 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -81,7 +81,6 @@
 #define HFI_VIDEO_CODEC_VP8				0x00001000
 #define HFI_VIDEO_CODEC_HEVC				0x00002000
 #define HFI_VIDEO_CODEC_VP9				0x00004000
-#define HFI_VIDEO_CODEC_HEVC_HYBRID			0x80000000
 
 #define HFI_PROFILE_UNKNOWN					0x00000000
 #define HFI_H264_PROFILE_BASELINE			0x00000001
@@ -214,8 +213,6 @@
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x00C)
 #define  HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED            \
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x00E)
-#define HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT \
-	(HFI_PROPERTY_PARAM_COMMON_START + 0x00F)
 #define  HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED	    \
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x010)
 
@@ -281,8 +278,6 @@
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01E)
 #define  HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES \
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x020)
-#define HFI_PROPERTY_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC \
-	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x021)
 #define HFI_PROPERTY_PARAM_VENC_LOW_LATENCY_MODE	\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x022)
 #define HFI_PROPERTY_PARAM_VENC_PRESERVE_TEXT_QUALITY \
@@ -454,9 +449,7 @@
 
 #define HFI_INTRA_REFRESH_NONE				(HFI_COMMON_BASE + 0x1)
 #define HFI_INTRA_REFRESH_CYCLIC			(HFI_COMMON_BASE + 0x2)
-#define HFI_INTRA_REFRESH_ADAPTIVE			(HFI_COMMON_BASE + 0x3)
-#define HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE	(HFI_COMMON_BASE + 0x4)
-#define HFI_INTRA_REFRESH_RANDOM			(HFI_COMMON_BASE + 0x5)
+#define HFI_INTRA_REFRESH_RANDOM			(HFI_COMMON_BASE + 0x3)
 
 struct hfi_intra_refresh {
 	u32 mode;
@@ -551,7 +544,6 @@
 
 #define HFI_LTR_MODE_DISABLE	0x0
 #define HFI_LTR_MODE_MANUAL		0x1
-#define HFI_LTR_MODE_PERIODIC	0x2
 
 struct hfi_ltr_mode {
 	u32 ltr_mode;
@@ -746,7 +738,6 @@
 	u32 close_gop;
 	u32 h264_constrain_intra_pred;
 	u32 h264_transform_8x8_flag;
-	u32 mpeg4_qpel_enable;
 	u32 multi_refp_en;
 	u32 qmatrix_en;
 	u8 vpp_info_packet_mode;
@@ -788,15 +779,6 @@
 	u32 type;
 };
 
-#define HFI_MVC_BUFFER_LAYOUT_TOP_BOTTOM  (0)
-#define HFI_MVC_BUFFER_LAYOUT_SIDEBYSIDE  (1)
-#define HFI_MVC_BUFFER_LAYOUT_SEQ         (2)
-struct hfi_mvc_buffer_layout_descp_type {
-	u32    layout_type;
-	u32    bright_view_first;
-	u32    ngap;
-};
-
 
 #define HFI_CMD_SYS_COMMON_START			\
 (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + HFI_CMD_START_OFFSET \
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 302e284..cde43b6 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -1595,6 +1595,114 @@
 	return buffer;
 }
 
+static struct uvc_video_chain *uvc_alloc_chain(struct uvc_device *dev)
+{
+	struct uvc_video_chain *chain;
+
+	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+	if (chain == NULL)
+		return NULL;
+
+	INIT_LIST_HEAD(&chain->entities);
+	mutex_init(&chain->ctrl_mutex);
+	chain->dev = dev;
+	v4l2_prio_init(&chain->prio);
+
+	return chain;
+}
+
+/*
+ * Fallback heuristic for devices that don't connect units and terminals in a
+ * valid chain.
+ *
+ * Some devices have invalid baSourceID references, causing uvc_scan_chain()
+ * to fail, but if we just take the entities we can find and put them together
+ * in the most sensible chain we can think of, turns out they do work anyway.
+ * Note: This heuristic assumes there is a single chain.
+ *
+ * At the time of writing, devices known to have such a broken chain are
+ *  - Acer Integrated Camera (5986:055a)
+ *  - Realtek rtl157a7 (0bda:57a7)
+ */
+static int uvc_scan_fallback(struct uvc_device *dev)
+{
+	struct uvc_video_chain *chain;
+	struct uvc_entity *iterm = NULL;
+	struct uvc_entity *oterm = NULL;
+	struct uvc_entity *entity;
+	struct uvc_entity *prev;
+
+	/*
+	 * Start by locating the input and output terminals. We only support
+	 * devices with exactly one of each for now.
+	 */
+	list_for_each_entry(entity, &dev->entities, list) {
+		if (UVC_ENTITY_IS_ITERM(entity)) {
+			if (iterm)
+				return -EINVAL;
+			iterm = entity;
+		}
+
+		if (UVC_ENTITY_IS_OTERM(entity)) {
+			if (oterm)
+				return -EINVAL;
+			oterm = entity;
+		}
+	}
+
+	if (iterm == NULL || oterm == NULL)
+		return -EINVAL;
+
+	/* Allocate the chain and fill it. */
+	chain = uvc_alloc_chain(dev);
+	if (chain == NULL)
+		return -ENOMEM;
+
+	if (uvc_scan_chain_entity(chain, oterm) < 0)
+		goto error;
+
+	prev = oterm;
+
+	/*
+	 * Add all Processing and Extension Units with two pads. The order
+	 * doesn't matter much, use reverse list traversal to connect units in
+	 * UVC descriptor order as we build the chain from output to input. This
+	 * leads to units appearing in the order meant by the manufacturer for
+	 * the cameras known to require this heuristic.
+	 */
+	list_for_each_entry_reverse(entity, &dev->entities, list) {
+		if (entity->type != UVC_VC_PROCESSING_UNIT &&
+		    entity->type != UVC_VC_EXTENSION_UNIT)
+			continue;
+
+		if (entity->num_pads != 2)
+			continue;
+
+		if (uvc_scan_chain_entity(chain, entity) < 0)
+			goto error;
+
+		prev->baSourceID[0] = entity->id;
+		prev = entity;
+	}
+
+	if (uvc_scan_chain_entity(chain, iterm) < 0)
+		goto error;
+
+	prev->baSourceID[0] = iterm->id;
+
+	list_add_tail(&chain->list, &dev->chains);
+
+	uvc_trace(UVC_TRACE_PROBE,
+		  "Found a video chain by fallback heuristic (%s).\n",
+		  uvc_print_chain(chain));
+
+	return 0;
+
+error:
+	kfree(chain);
+	return -EINVAL;
+}
+
 /*
  * Scan the device for video chains and register video devices.
  *
@@ -1617,15 +1725,10 @@
 		if (term->chain.next || term->chain.prev)
 			continue;
 
-		chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+		chain = uvc_alloc_chain(dev);
 		if (chain == NULL)
 			return -ENOMEM;
 
-		INIT_LIST_HEAD(&chain->entities);
-		mutex_init(&chain->ctrl_mutex);
-		chain->dev = dev;
-		v4l2_prio_init(&chain->prio);
-
 		term->flags |= UVC_ENTITY_FLAG_DEFAULT;
 
 		if (uvc_scan_chain(chain, term) < 0) {
@@ -1639,6 +1742,9 @@
 		list_add_tail(&chain->list, &dev->chains);
 	}
 
+	if (list_empty(&dev->chains))
+		uvc_scan_fallback(dev);
+
 	if (list_empty(&dev->chains)) {
 		uvc_printk(KERN_INFO, "No valid video chain found.\n");
 		return -1;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index ce5a7dc..1d1928a 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1322,6 +1322,8 @@
 					descr = "Y/CbCr 4:2:0 P10"; break;
 	case V4L2_PIX_FMT_NV12_TP10_UBWC:
 					descr = "Y/CbCr 4:2:0 TP10 UBWC"; break;
+	case V4L2_PIX_FMT_NV12_P010_UBWC:
+					descr = "Y/CbCr 4:2:0 P010 UBWC"; break;
 
 	default:
 		/* Compressed formats */
@@ -1361,6 +1363,10 @@
 		case V4L2_PIX_FMT_JPGL:		descr = "JPEG Lite"; break;
 		case V4L2_PIX_FMT_SE401:	descr = "GSPCA SE401"; break;
 		case V4L2_PIX_FMT_S5C_UYVY_JPG:	descr = "S5C73MX interleaved UYVY/JPEG"; break;
+		case V4L2_PIX_FMT_HEVC:
+			descr = "HEVC"; break;
+		case V4L2_PIX_FMT_VP9:
+			descr = "VP9"; break;
 		default:
 			WARN(1, "Unknown pixelformat 0x%08x\n", fmt->pixelformat);
 			if (fmt->description[0])
diff --git a/drivers/mfd/msm-cdc-pinctrl.c b/drivers/mfd/msm-cdc-pinctrl.c
index 3ffd202..9622256 100644
--- a/drivers/mfd/msm-cdc-pinctrl.c
+++ b/drivers/mfd/msm-cdc-pinctrl.c
@@ -180,13 +180,15 @@
 		ret = PTR_ERR(gpio_data->pinctrl_sleep);
 		goto err_lookup_state;
 	}
-
-	/* Set pinctrl state to aud_sleep by default */
-	ret = pinctrl_select_state(gpio_data->pinctrl,
-				   gpio_data->pinctrl_sleep);
-	if (ret)
-		dev_err(&pdev->dev, "%s: set cdc gpio sleep state fail: %d\n",
-			__func__, ret);
+	/* skip setting to sleep state for LPI_TLMM GPIOs */
+	if (!of_property_read_bool(pdev->dev.of_node, "qcom,lpi-gpios")) {
+		/* Set pinctrl state to aud_sleep by default */
+		ret = pinctrl_select_state(gpio_data->pinctrl,
+					   gpio_data->pinctrl_sleep);
+		if (ret)
+			dev_err(&pdev->dev, "%s: set cdc gpio sleep state fail: %d\n",
+				__func__, ret);
+	}
 
 	gpio_data->gpio = of_get_named_gpio(pdev->dev.of_node,
 					    "qcom,cdc-rst-n-gpio", 0);
diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c
index f0126dd..d143536 100644
--- a/drivers/mfd/wcd9xxx-core.c
+++ b/drivers/mfd/wcd9xxx-core.c
@@ -505,6 +505,7 @@
 
 	mutex_init(&wcd9xxx->io_lock);
 	mutex_init(&wcd9xxx->xfer_lock);
+	mutex_init(&wcd9xxx->reset_lock);
 
 	ret = wcd9xxx_bringup(wcd9xxx->dev);
 	if (ret) {
@@ -583,6 +584,7 @@
 err_bring_up:
 	mutex_destroy(&wcd9xxx->io_lock);
 	mutex_destroy(&wcd9xxx->xfer_lock);
+	mutex_destroy(&wcd9xxx->reset_lock);
 	return ret;
 }
 
@@ -595,6 +597,7 @@
 	wcd9xxx_core_res_deinit(&wcd9xxx->core_res);
 	mutex_destroy(&wcd9xxx->io_lock);
 	mutex_destroy(&wcd9xxx->xfer_lock);
+	mutex_destroy(&wcd9xxx->reset_lock);
 	if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_SLIMBUS)
 		slim_remove_device(wcd9xxx->slim_slave);
 }
@@ -1215,11 +1218,19 @@
 {
 	struct wcd9xxx *wcd9xxx;
 	struct wcd9xxx_pdata *pdata;
+	const struct slim_device_id *device_id;
 	int ret = 0;
 	int intf_type;
 
 	intf_type = wcd9xxx_get_intf_type();
 
+	wcd9xxx = devm_kzalloc(&slim->dev, sizeof(struct wcd9xxx),
+				GFP_KERNEL);
+	if (!wcd9xxx) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
 	if (!slim) {
 		ret = -EINVAL;
 		goto err;
@@ -1228,7 +1239,8 @@
 	if (intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
 		dev_dbg(&slim->dev, "%s:Codec is detected in I2C mode\n",
 			__func__);
-		return -ENODEV;
+		ret = -ENODEV;
+		goto err;
 	}
 	if (slim->dev.of_node) {
 		dev_info(&slim->dev, "Platform data from device tree\n");
@@ -1262,21 +1274,22 @@
 		goto err;
 	}
 
-	wcd9xxx = devm_kzalloc(&slim->dev, sizeof(struct wcd9xxx),
-				GFP_KERNEL);
-	if (!wcd9xxx) {
-		ret = -ENOMEM;
-		goto err;
-	}
 	if (!slim->ctrl) {
 		dev_err(&slim->dev, "%s: Error, no SLIMBUS control data\n",
 			__func__);
 		ret = -EINVAL;
 		goto err_codec;
 	}
-	wcd9xxx->type = slim_get_device_id(slim)->driver_data;
+	device_id = slim_get_device_id(slim);
+	if (!device_id) {
+		dev_err(&slim->dev, "%s: Error, no device id\n", __func__);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	wcd9xxx->type = device_id->driver_data;
 	dev_info(&slim->dev, "%s: probing for wcd type: %d, name: %s\n",
-		 __func__, wcd9xxx->type, slim_get_device_id(slim)->name);
+		 __func__, wcd9xxx->type, device_id->name);
 
 	/* wcd9xxx members init */
 	wcd9xxx->multi_reg_write = wcd9xxx_slim_multi_reg_write;
@@ -1417,6 +1430,7 @@
 err_codec:
 	slim_set_clientdata(slim, NULL);
 err:
+	devm_kfree(&slim->dev, wcd9xxx);
 	return ret;
 }
 static int wcd9xxx_slim_remove(struct slim_device *pdev)
@@ -1470,9 +1484,11 @@
 	if (wcd9xxx->dev_up)
 		return 0;
 
+	mutex_lock(&wcd9xxx->reset_lock);
 	ret = wcd9xxx_reset(wcd9xxx->dev);
 	if (ret)
 		dev_err(wcd9xxx->dev, "%s: Resetting Codec failed\n", __func__);
+	mutex_unlock(&wcd9xxx->reset_lock);
 
 	return ret;
 }
@@ -1480,6 +1496,7 @@
 static int wcd9xxx_slim_device_up(struct slim_device *sldev)
 {
 	struct wcd9xxx *wcd9xxx = slim_get_devicedata(sldev);
+	int ret = 0;
 
 	if (!wcd9xxx) {
 		pr_err("%s: wcd9xxx is NULL\n", __func__);
@@ -1491,7 +1508,12 @@
 		return 0;
 
 	wcd9xxx->dev_up = true;
-	return wcd9xxx_device_up(wcd9xxx);
+
+	mutex_lock(&wcd9xxx->reset_lock);
+	ret = wcd9xxx_device_up(wcd9xxx);
+	mutex_unlock(&wcd9xxx->reset_lock);
+
+	return ret;
 }
 
 static int wcd9xxx_slim_device_down(struct slim_device *sldev)
@@ -1509,10 +1531,14 @@
 		return 0;
 
 	wcd9xxx->dev_up = false;
-	wcd9xxx_irq_exit(&wcd9xxx->core_res);
+
+	mutex_lock(&wcd9xxx->reset_lock);
 	if (wcd9xxx->dev_down)
 		wcd9xxx->dev_down(wcd9xxx);
+	wcd9xxx_irq_exit(&wcd9xxx->core_res);
 	wcd9xxx_reset_low(wcd9xxx->dev);
+	mutex_unlock(&wcd9xxx->reset_lock);
+
 	return 0;
 }
 
diff --git a/drivers/mfd/wcd9xxx-irq.c b/drivers/mfd/wcd9xxx-irq.c
index 1a50f37..30ad689e 100644
--- a/drivers/mfd/wcd9xxx-irq.c
+++ b/drivers/mfd/wcd9xxx-irq.c
@@ -307,6 +307,7 @@
 		goto err_disable_irq;
 	}
 
+	memset(status, 0, sizeof(status));
 	ret = regmap_bulk_read(wcd9xxx_res->wcd_core_regmap,
 		wcd9xxx_res->intr_reg[WCD9XXX_INTR_STATUS_BASE],
 		status, num_irq_regs);
@@ -704,20 +705,27 @@
 
 static int wcd9xxx_irq_probe(struct platform_device *pdev)
 {
-	int irq;
+	int irq, dir_apps_irq = -EINVAL;
 	struct wcd9xxx_irq_drv_data *data;
 	struct device_node *node = pdev->dev.of_node;
 	int ret = -EINVAL;
 
 	irq = of_get_named_gpio(node, "qcom,gpio-connect", 0);
-	if (!gpio_is_valid(irq)) {
+	if (!gpio_is_valid(irq))
+		dir_apps_irq = platform_get_irq_byname(pdev, "wcd_irq");
+
+	if (!gpio_is_valid(irq) && dir_apps_irq < 0) {
 		dev_err(&pdev->dev, "TLMM connect gpio not found\n");
 		return -EPROBE_DEFER;
 	}
-	irq = gpio_to_irq(irq);
-	if (irq < 0) {
-		dev_err(&pdev->dev, "Unable to configure irq\n");
-		return irq;
+	if (dir_apps_irq > 0) {
+		irq = dir_apps_irq;
+	} else {
+		irq = gpio_to_irq(irq);
+		if (irq < 0) {
+			dev_err(&pdev->dev, "Unable to configure irq\n");
+			return irq;
+		}
 	}
 	dev_dbg(&pdev->dev, "%s: virq = %d\n", __func__, irq);
 	data = wcd9xxx_irq_add_domain(node, node->parent);
diff --git a/drivers/mfd/wcd9xxx-slimslave.c b/drivers/mfd/wcd9xxx-slimslave.c
index d3a926c..8bf1404 100644
--- a/drivers/mfd/wcd9xxx-slimslave.c
+++ b/drivers/mfd/wcd9xxx-slimslave.c
@@ -62,6 +62,10 @@
 		goto err;
 	}
 
+	if (!rx_num || rx_num > wcd9xxx->num_rx_port) {
+		pr_err("%s: invalid rx num %d\n", __func__, rx_num);
+		return -EINVAL;
+	}
 	if (wcd9xxx->rx_chs) {
 		wcd9xxx->num_rx_port = rx_num;
 		for (i = 0; i < rx_num; i++) {
@@ -84,6 +88,10 @@
 			wcd9xxx->num_rx_port);
 	}
 
+	if (!tx_num || tx_num > wcd9xxx->num_tx_port) {
+		pr_err("%s: invalid tx num %d\n", __func__, tx_num);
+		return -EINVAL;
+	}
 	if (wcd9xxx->tx_chs) {
 		wcd9xxx->num_tx_port = tx_num;
 		for (i = 0; i < tx_num; i++) {
diff --git a/drivers/mfd/wcd9xxx-utils.c b/drivers/mfd/wcd9xxx-utils.c
index 6062fb8..8d3d4ad 100644
--- a/drivers/mfd/wcd9xxx-utils.c
+++ b/drivers/mfd/wcd9xxx-utils.c
@@ -287,7 +287,7 @@
 	return dmic_sample_rate;
 
 undefined_rate:
-	dev_info(dev, "%s: Invalid %s = %d, for mclk %d\n",
+	dev_dbg(dev, "%s: Invalid %s = %d, for mclk %d\n",
 		 __func__, dmic_rate_type, dmic_sample_rate, mclk_rate);
 	dmic_sample_rate = WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED;
 
@@ -669,7 +669,7 @@
 		return -EINVAL;
 	}
 
-	value = msm_cdc_get_gpio_state(wcd9xxx->wcd_rst_np);
+	value = msm_cdc_pinctrl_get_state(wcd9xxx->wcd_rst_np);
 	if (value > 0) {
 		wcd9xxx->avoid_cdc_rstlow = 1;
 		return 0;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 9c9d130..d593315 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -780,6 +780,15 @@
 	help
 	  Memory time statistics exported to /sys/kernel/memory_state_time
 
+config QPNP_MISC
+	tristate "QPNP Misc Peripheral"
+	depends on MFD_SPMI_PMIC
+	help
+	  Say 'y' here to include support for the QTI QPNP MISC
+	  peripheral. The MISC peripheral holds the USB ID interrupt
+	  and the driver provides an API to check if this interrupt
+	  is available on the current PMIC chip.
+
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 19f9e1d..dd12e9a 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -53,6 +53,7 @@
 obj-$(CONFIG_VEXPRESS_SYSCFG)	+= vexpress-syscfg.o
 obj-$(CONFIG_CXL_BASE)		+= cxl/
 obj-$(CONFIG_PANEL)             += panel.o
+obj-$(CONFIG_QPNP_MISC) 	+= qpnp-misc.o
 obj-y				+= qcom/
 obj-$(CONFIG_MEMORY_STATE_TIME)	+= memory_state_time.o
 
diff --git a/drivers/misc/qcom/qdsp6v2/aac_in.c b/drivers/misc/qcom/qdsp6v2/aac_in.c
index b74aa59..c0828dc 100644
--- a/drivers/misc/qcom/qdsp6v2/aac_in.c
+++ b/drivers/misc/qcom/qdsp6v2/aac_in.c
@@ -34,6 +34,8 @@
 
 #define AAC_FORMAT_ADTS 65535
 
+#define MAX_SAMPLE_RATE_384K 384000
+
 static long aac_in_ioctl_shared(struct file *file, unsigned int cmd, void *arg)
 {
 	struct q6audio_in  *audio = file->private_data;
@@ -233,6 +235,13 @@
 			break;
 		}
 
+		if (cfg->sample_rate > MAX_SAMPLE_RATE_384K) {
+			pr_err("%s: ERROR: invalid sample rate = %u",
+				__func__, cfg->sample_rate);
+			rc = -EINVAL;
+			break;
+		}
+
 		min_bitrate = ((cfg->sample_rate)*(cfg->channels))/2;
 		/* This calculation should be based on AAC mode. But we cannot
 		 * get AAC mode in this setconfig. min_bitrate's logical max
diff --git a/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c b/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c
index c1d792b..b97a584 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c
@@ -29,6 +29,8 @@
 	struct audio_client             *ac;
 	struct msm_hwacc_effects_config  config;
 
+	struct mutex			lock;
+
 	atomic_t			in_count;
 	atomic_t			out_count;
 
@@ -231,8 +233,11 @@
 		uint32_t idx = 0;
 		uint32_t size = 0;
 
+		mutex_lock(&effects->lock);
+
 		if (!effects->started) {
 			rc = -EFAULT;
+			mutex_unlock(&effects->lock);
 			goto ioctl_fail;
 		}
 
@@ -242,11 +247,13 @@
 		if (!rc) {
 			pr_err("%s: write wait_event_timeout\n", __func__);
 			rc = -EFAULT;
+			 mutex_unlock(&effects->lock);
 			goto ioctl_fail;
 		}
 		if (!atomic_read(&effects->out_count)) {
 			pr_err("%s: pcm stopped out_count 0\n", __func__);
 			rc = -EFAULT;
+			mutex_unlock(&effects->lock);
 			goto ioctl_fail;
 		}
 
@@ -256,6 +263,7 @@
 				copy_from_user(bufptr, (void *)arg,
 					effects->config.buf_cfg.output_len)) {
 				rc = -EFAULT;
+				mutex_unlock(&effects->lock);
 				goto ioctl_fail;
 			}
 			rc = q6asm_write(effects->ac,
@@ -263,6 +271,7 @@
 					 0, 0, NO_TIMESTAMP);
 			if (rc < 0) {
 				rc = -EFAULT;
+				mutex_unlock(&effects->lock);
 				goto ioctl_fail;
 			}
 			atomic_dec(&effects->out_count);
@@ -270,6 +279,7 @@
 			pr_err("%s: AUDIO_EFFECTS_WRITE: Buffer dropped\n",
 				__func__);
 		}
+		mutex_unlock(&effects->lock);
 		break;
 	}
 	case AUDIO_EFFECTS_READ: {
@@ -469,6 +479,7 @@
 		break;
 	}
 	case AUDIO_EFFECTS_SET_BUF_LEN: {
+		mutex_lock(&effects->lock);
 		if (copy_from_user(&effects->config.buf_cfg, (void *)arg,
 				   sizeof(effects->config.buf_cfg))) {
 			pr_err("%s: copy from user for AUDIO_EFFECTS_SET_BUF_LEN failed\n",
@@ -478,6 +489,7 @@
 		pr_debug("%s: write buf len: %d, read buf len: %d\n",
 			 __func__, effects->config.buf_cfg.output_len,
 			 effects->config.buf_cfg.input_len);
+		mutex_unlock(&effects->lock);
 		break;
 	}
 	case AUDIO_EFFECTS_GET_BUF_AVAIL: {
@@ -725,6 +737,7 @@
 	}
 	q6asm_audio_client_free(effects->ac);
 
+	mutex_destroy(&effects->lock);
 	kfree(effects);
 
 	pr_debug("%s: close session success\n", __func__);
@@ -752,6 +765,7 @@
 
 	init_waitqueue_head(&effects->read_wait);
 	init_waitqueue_head(&effects->write_wait);
+	mutex_init(&effects->lock);
 
 	effects->opened = 0;
 	effects->started = 0;
diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
index 73746ee..06e0dc3 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
@@ -122,7 +122,10 @@
 	list_for_each_entry(region_elt, &audio->ion_region_queue, list) {
 		if (addr >= region_elt->vaddr &&
 			addr < region_elt->vaddr + region_elt->len &&
-			addr + len <= region_elt->vaddr + region_elt->len) {
+			addr + len <= region_elt->vaddr + region_elt->len &&
+			addr + len > addr) {
+			/* to avoid integer addition overflow */
+
 			/* offset since we could pass vaddr inside a registered
 			 * ion buffer
 			 */
diff --git a/drivers/misc/qpnp-misc.c b/drivers/misc/qpnp-misc.c
new file mode 100644
index 0000000..3c11de0
--- /dev/null
+++ b/drivers/misc/qpnp-misc.c
@@ -0,0 +1,352 @@
+/* Copyright (c) 2013-2014,2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/qpnp/qpnp-misc.h>
+
+#define QPNP_MISC_DEV_NAME "qcom,qpnp-misc"
+
+#define REG_DIG_MAJOR_REV	0x01
+#define REG_SUBTYPE		0x05
+#define REG_PWM_SEL		0x49
+#define REG_GP_DRIVER_EN	0x4C
+
+#define PWM_SEL_MAX		0x03
+#define GP_DRIVER_EN_BIT	BIT(0)
+
+static DEFINE_MUTEX(qpnp_misc_dev_list_mutex);
+static LIST_HEAD(qpnp_misc_dev_list);
+
+struct qpnp_misc_version {
+	u8	subtype;
+	u8	dig_major_rev;
+};
+
+/**
+ * struct qpnp_misc_dev - holds controller device specific information
+ * @list:			Doubly-linked list parameter linking to other
+ *				qpnp_misc devices.
+ * @mutex:			Mutex lock that is used to ensure mutual
+ *				exclusion between probing and accessing misc
+ *				driver information
+ * @dev:			Device pointer to the misc device
+ * @regmap:			Regmap pointer to the misc device
+ * @version:			struct that holds the subtype and dig_major_rev
+ *				of the chip.
+ */
+struct qpnp_misc_dev {
+	struct list_head		list;
+	struct mutex			mutex;
+	struct device			*dev;
+	struct regmap			*regmap;
+	struct qpnp_misc_version	version;
+
+	u32				base;
+	u8				pwm_sel;
+	bool				enable_gp_driver;
+};
+
+static const struct of_device_id qpnp_misc_match_table[] = {
+	{ .compatible = QPNP_MISC_DEV_NAME },
+	{}
+};
+
+enum qpnp_misc_version_name {
+	INVALID,
+	PM8941,
+	PM8226,
+	PMA8084,
+	PMDCALIFORNIUM,
+};
+
+static struct qpnp_misc_version irq_support_version[] = {
+	{0x00, 0x00}, /* INVALID */
+	{0x01, 0x02}, /* PM8941 */
+	{0x07, 0x00}, /* PM8226 */
+	{0x09, 0x00}, /* PMA8084 */
+	{0x16, 0x00}, /* PMDCALIFORNIUM */
+};
+
+static int qpnp_write_byte(struct qpnp_misc_dev *mdev, u16 addr, u8 val)
+{
+	int rc;
+
+	rc = regmap_write(mdev->regmap, mdev->base + addr, val);
+	if (rc)
+		pr_err("regmap write failed rc=%d\n", rc);
+
+	return rc;
+}
+
+static int qpnp_read_byte(struct qpnp_misc_dev *mdev, u16 addr, u8 *val)
+{
+	unsigned int temp;
+	int rc;
+
+	rc = regmap_read(mdev->regmap, mdev->base + addr, &temp);
+	if (rc) {
+		pr_err("regmap read failed rc=%d\n", rc);
+		return rc;
+	}
+
+	*val = (u8)temp;
+	return rc;
+}
+
+static int get_qpnp_misc_version_name(struct qpnp_misc_dev *dev)
+{
+	int i;
+
+	for (i = 1; i < ARRAY_SIZE(irq_support_version); i++)
+		if (dev->version.subtype == irq_support_version[i].subtype &&
+		    dev->version.dig_major_rev >=
+					irq_support_version[i].dig_major_rev)
+			return i;
+
+	return INVALID;
+}
+
+static bool __misc_irqs_available(struct qpnp_misc_dev *dev)
+{
+	int version_name = get_qpnp_misc_version_name(dev);
+
+	if (version_name == INVALID)
+		return 0;
+	return 1;
+}
+
+int qpnp_misc_read_reg(struct device_node *node, u16 addr, u8 *val)
+{
+	struct qpnp_misc_dev *mdev = NULL;
+	struct qpnp_misc_dev *mdev_found = NULL;
+	int rc;
+	u8 temp;
+
+	if (IS_ERR_OR_NULL(node)) {
+		pr_err("Invalid device node pointer\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&qpnp_misc_dev_list_mutex);
+	list_for_each_entry(mdev, &qpnp_misc_dev_list, list) {
+		if (mdev->dev->of_node == node) {
+			mdev_found = mdev;
+			break;
+		}
+	}
+	mutex_unlock(&qpnp_misc_dev_list_mutex);
+
+	if (!mdev_found) {
+		/*
+		 * No MISC device was found. This API should only
+		 * be called by drivers which have specified the
+		 * misc phandle in their device tree node.
+		 */
+		pr_err("no probed misc device found\n");
+		return -EPROBE_DEFER;
+	}
+
+	rc = qpnp_read_byte(mdev, addr, &temp);
+	if (rc < 0) {
+		dev_err(mdev->dev, "Failed to read addr %x, rc=%d\n", addr, rc);
+		return rc;
+	}
+
+	*val = temp;
+	return 0;
+}
+
+int qpnp_misc_irqs_available(struct device *consumer_dev)
+{
+	struct device_node *misc_node = NULL;
+	struct qpnp_misc_dev *mdev = NULL;
+	struct qpnp_misc_dev *mdev_found = NULL;
+
+	if (IS_ERR_OR_NULL(consumer_dev)) {
+		pr_err("Invalid consumer device pointer\n");
+		return -EINVAL;
+	}
+
+	misc_node = of_parse_phandle(consumer_dev->of_node, "qcom,misc-ref", 0);
+	if (!misc_node) {
+		pr_debug("Could not find qcom,misc-ref property in %s\n",
+			consumer_dev->of_node->full_name);
+		return 0;
+	}
+
+	mutex_lock(&qpnp_misc_dev_list_mutex);
+	list_for_each_entry(mdev, &qpnp_misc_dev_list, list) {
+		if (mdev->dev->of_node == misc_node) {
+			mdev_found = mdev;
+			break;
+		}
+	}
+	mutex_unlock(&qpnp_misc_dev_list_mutex);
+
+	if (!mdev_found) {
+		/*
+		 * No MISC device was found. This API should only
+		 * be called by drivers which have specified the
+		 * misc phandle in their device tree node.
+		 */
+		pr_err("no probed misc device found\n");
+		return -EPROBE_DEFER;
+	}
+
+	return __misc_irqs_available(mdev_found);
+}
+
+static int qpnp_misc_dt_init(struct qpnp_misc_dev *mdev)
+{
+	struct device_node *node = mdev->dev->of_node;
+	u32 val;
+	int rc;
+
+	rc = of_property_read_u32(node, "reg", &mdev->base);
+	if (rc < 0 || !mdev->base) {
+		dev_err(mdev->dev, "Base address not defined or invalid\n");
+		return -EINVAL;
+	}
+
+	if (!of_property_read_u32(node, "qcom,pwm-sel", &val)) {
+		if (val > PWM_SEL_MAX) {
+			dev_err(mdev->dev, "Invalid value for pwm-sel\n");
+			return -EINVAL;
+		}
+		mdev->pwm_sel = (u8)val;
+	}
+	mdev->enable_gp_driver = of_property_read_bool(node,
+						"qcom,enable-gp-driver");
+
+	WARN((mdev->pwm_sel > 0 && !mdev->enable_gp_driver),
+			"Setting PWM source without enabling gp driver\n");
+	WARN((mdev->pwm_sel == 0 && mdev->enable_gp_driver),
+			"Enabling gp driver without setting PWM source\n");
+
+	return 0;
+}
+
+static int qpnp_misc_config(struct qpnp_misc_dev *mdev)
+{
+	int rc, version_name;
+
+	version_name = get_qpnp_misc_version_name(mdev);
+
+	switch (version_name) {
+	case PMDCALIFORNIUM:
+		if (mdev->pwm_sel > 0 && mdev->enable_gp_driver) {
+			rc = qpnp_write_byte(mdev, REG_PWM_SEL, mdev->pwm_sel);
+			if (rc < 0) {
+				dev_err(mdev->dev,
+					"Failed to write PWM_SEL reg\n");
+				return rc;
+			}
+
+			rc = qpnp_write_byte(mdev, REG_GP_DRIVER_EN,
+					GP_DRIVER_EN_BIT);
+			if (rc < 0) {
+				dev_err(mdev->dev,
+					"Failed to write GP_DRIVER_EN reg\n");
+				return rc;
+			}
+		}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int qpnp_misc_probe(struct platform_device *pdev)
+{
+	struct qpnp_misc_dev *mdev = ERR_PTR(-EINVAL);
+	int rc;
+
+	mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
+	if (!mdev)
+		return -ENOMEM;
+
+	mdev->dev = &pdev->dev;
+	mdev->regmap = dev_get_regmap(mdev->dev->parent, NULL);
+	if (!mdev->regmap) {
+		dev_err(mdev->dev, "Parent regmap is unavailable\n");
+		return -ENXIO;
+	}
+
+	rc = qpnp_misc_dt_init(mdev);
+	if (rc < 0) {
+		dev_err(mdev->dev,
+			"Error reading device tree properties, rc=%d\n", rc);
+		return rc;
+	}
+
+
+	rc = qpnp_read_byte(mdev, REG_SUBTYPE, &mdev->version.subtype);
+	if (rc < 0) {
+		dev_err(mdev->dev, "Failed to read subtype, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_read_byte(mdev, REG_DIG_MAJOR_REV,
+			&mdev->version.dig_major_rev);
+	if (rc < 0) {
+		dev_err(mdev->dev, "Failed to read dig_major_rev, rc=%d\n", rc);
+		return rc;
+	}
+
+	mutex_lock(&qpnp_misc_dev_list_mutex);
+	list_add_tail(&mdev->list, &qpnp_misc_dev_list);
+	mutex_unlock(&qpnp_misc_dev_list_mutex);
+
+	rc = qpnp_misc_config(mdev);
+	if (rc < 0) {
+		dev_err(mdev->dev,
+			"Error configuring module registers, rc=%d\n", rc);
+		return rc;
+	}
+
+	dev_info(mdev->dev, "probe successful\n");
+	return 0;
+}
+
+static struct platform_driver qpnp_misc_driver = {
+	.probe	= qpnp_misc_probe,
+	.driver	= {
+		.name		= QPNP_MISC_DEV_NAME,
+		.owner		= THIS_MODULE,
+		.of_match_table	= qpnp_misc_match_table,
+	},
+};
+
+static int __init qpnp_misc_init(void)
+{
+	return platform_driver_register(&qpnp_misc_driver);
+}
+
+static void __exit qpnp_misc_exit(void)
+{
+	return platform_driver_unregister(&qpnp_misc_driver);
+}
+
+subsys_initcall(qpnp_misc_init);
+module_exit(qpnp_misc_exit);
+
+MODULE_DESCRIPTION(QPNP_MISC_DEV_NAME);
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" QPNP_MISC_DEV_NAME);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 817fcf8..ace47ae 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -2324,10 +2324,7 @@
 			err = mmc_blk_reset(md, card->host, type);
 			if (!err)
 				break;
-			if (err == -ENODEV ||
-				mmc_packed_cmd(mq_rq->cmd_type))
-				goto cmd_abort;
-			/* Fall through */
+			goto cmd_abort;
 		}
 		case MMC_BLK_ECC_ERR:
 			if (brq->data.blocks > 1) {
@@ -2843,6 +2840,13 @@
 		  MMC_QUIRK_LONG_READ_TIME),
 
 	/*
+	 * Some Samsung MMC cards need longer data read timeout than
+	 * indicated in CSD.
+	 */
+	MMC_FIXUP("Q7XSAB", CID_MANFID_SAMSUNG, 0x100, add_quirk_mmc,
+		  MMC_QUIRK_LONG_READ_TIME),
+
+	/*
 	 * On these Samsung MoviNAND parts, performing secure erase or
 	 * secure trim can result in unrecoverable corruption due to a
 	 * firmware bug.
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index daad32f..36d9d69 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -37,3 +37,13 @@
 	  about re-trying SD init requests. This can be a useful
 	  work-around for buggy controllers and hardware. Enable
 	  if you are experiencing issues with SD detection.
+
+config MMC_CLKGATE
+	bool "MMC host clock gating"
+	help
+	  This will attempt to aggressively gate the clock to the MMC card.
+	  This is done to save power due to gating off the logic and bus
+	  noise when the MMC card is not in use. Your host driver has to
+	  support handling this in order for it to be of any use.
+
+	  If unsure, say N.
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 40ddc3e..e19d912 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -226,6 +226,8 @@
 
 		if (mrq->done)
 			mrq->done(mrq);
+
+		mmc_host_clk_release(host);
 	}
 }
 
@@ -340,6 +342,7 @@
 			mrq->stop->mrq = mrq;
 		}
 	}
+	mmc_host_clk_hold(host);
 	led_trigger_event(host->led, LED_FULL);
 	__mmc_start_request(host, mrq);
 
@@ -634,8 +637,11 @@
 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
 		 bool is_first_req)
 {
-	if (host->ops->pre_req)
+	if (host->ops->pre_req) {
+		mmc_host_clk_hold(host);
 		host->ops->pre_req(host, mrq, is_first_req);
+		mmc_host_clk_release(host);
+	}
 }
 
 /**
@@ -650,8 +656,11 @@
 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
 			 int err)
 {
-	if (host->ops->post_req)
+	if (host->ops->post_req) {
+		mmc_host_clk_hold(host);
 		host->ops->post_req(host, mrq, err);
+		mmc_host_clk_release(host);
+	}
 }
 
 /**
@@ -949,9 +958,9 @@
 		unsigned int timeout_us, limit_us;
 
 		timeout_us = data->timeout_ns / 1000;
-		if (card->host->ios.clock)
+		if (mmc_host_clk_rate(card->host))
 			timeout_us += data->timeout_clks * 1000 /
-				(card->host->ios.clock / 1000);
+				(mmc_host_clk_rate(card->host) / 1000);
 
 		if (data->flags & MMC_DATA_WRITE)
 			/*
@@ -1080,6 +1089,9 @@
 	if (pm)
 		pm_runtime_get_sync(mmc_dev(host));
 
+	if (host->ops->enable && !stop && host->claim_cnt == 1)
+		host->ops->enable(host);
+
 	return stop;
 }
 EXPORT_SYMBOL(__mmc_claim_host);
@@ -1097,6 +1109,9 @@
 
 	WARN_ON(!host->claimed);
 
+	if (host->ops->disable && host->claim_cnt == 1)
+		host->ops->disable(host);
+
 	spin_lock_irqsave(&host->lock, flags);
 	if (--host->claim_cnt) {
 		/* Release for nested claim */
@@ -1149,6 +1164,8 @@
 		 ios->power_mode, ios->chip_select, ios->vdd,
 		 1 << ios->bus_width, ios->timing);
 
+	if (ios->clock > 0)
+		mmc_set_ungated(host);
 	host->ops->set_ios(host, ios);
 }
 
@@ -1157,15 +1174,17 @@
  */
 void mmc_set_chip_select(struct mmc_host *host, int mode)
 {
+	mmc_host_clk_hold(host);
 	host->ios.chip_select = mode;
 	mmc_set_ios(host);
+	mmc_host_clk_release(host);
 }
 
 /*
  * Sets the host clock to the highest possible frequency that
  * is below "hz".
  */
-void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
 {
 	WARN_ON(hz && hz < host->f_min);
 
@@ -1176,6 +1195,68 @@
 	mmc_set_ios(host);
 }
 
+void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+{
+	mmc_host_clk_hold(host);
+	__mmc_set_clock(host, hz);
+	mmc_host_clk_release(host);
+}
+
+#ifdef CONFIG_MMC_CLKGATE
+/*
+ * This gates the clock by setting it to 0 Hz.
+ */
+void mmc_gate_clock(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clk_old = host->ios.clock;
+	host->ios.clock = 0;
+	host->clk_gated = true;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	mmc_set_ios(host);
+}
+
+/*
+ * This restores the clock from gating by using the cached
+ * clock value.
+ */
+void mmc_ungate_clock(struct mmc_host *host)
+{
+	/*
+	 * We should previously have gated the clock, so the clock shall
+	 * be 0 here! The clock may however be 0 during initialization,
+	 * when some request operations are performed before setting
+	 * the frequency. When ungate is requested in that situation
+	 * we just ignore the call.
+	 */
+	if (host->clk_old) {
+		BUG_ON(host->ios.clock);
+		/* This call will also set host->clk_gated to false */
+		__mmc_set_clock(host, host->clk_old);
+	}
+}
+
+void mmc_set_ungated(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	/*
+	 * We've been given a new frequency while the clock is gated,
+	 * so make sure we regard this as ungating it.
+	 */
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clk_gated = false;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+#else
+void mmc_set_ungated(struct mmc_host *host)
+{
+}
+#endif
+
 int mmc_execute_tuning(struct mmc_card *card)
 {
 	struct mmc_host *host = card->host;
@@ -1190,7 +1271,9 @@
 	else
 		opcode = MMC_SEND_TUNING_BLOCK;
 
+	mmc_host_clk_hold(host);
 	err = host->ops->execute_tuning(host, opcode);
+	mmc_host_clk_release(host);
 
 	if (err)
 		pr_err("%s: tuning execution failed: %d\n",
@@ -1206,8 +1289,10 @@
  */
 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
 {
+	mmc_host_clk_hold(host);
 	host->ios.bus_mode = mode;
 	mmc_set_ios(host);
+	mmc_host_clk_release(host);
 }
 
 /*
@@ -1215,8 +1300,10 @@
  */
 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
 {
+	mmc_host_clk_hold(host);
 	host->ios.bus_width = width;
 	mmc_set_ios(host);
+	mmc_host_clk_release(host);
 }
 
 /*
@@ -1671,8 +1758,11 @@
 	int old_signal_voltage = host->ios.signal_voltage;
 
 	host->ios.signal_voltage = signal_voltage;
-	if (host->ops->start_signal_voltage_switch)
+	if (host->ops->start_signal_voltage_switch) {
+		mmc_host_clk_hold(host);
 		err = host->ops->start_signal_voltage_switch(host, &host->ios);
+		mmc_host_clk_release(host);
+	}
 
 	if (err)
 		host->ios.signal_voltage = old_signal_voltage;
@@ -1706,17 +1796,20 @@
 		pr_warn("%s: cannot verify signal voltage switch\n",
 			mmc_hostname(host));
 
+	mmc_host_clk_hold(host);
+
 	cmd.opcode = SD_SWITCH_VOLTAGE;
 	cmd.arg = 0;
 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 
 	err = mmc_wait_for_cmd(host, &cmd, 0);
 	if (err)
-		return err;
+		goto err_command;
 
-	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
-		return -EIO;
-
+	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) {
+		err = -EIO;
+		goto err_command;
+	}
 	/*
 	 * The card should drive cmd and dat[0:3] low immediately
 	 * after the response of cmd11, but wait 1 ms to be sure
@@ -1765,6 +1858,9 @@
 		mmc_power_cycle(host, ocr);
 	}
 
+err_command:
+	mmc_host_clk_release(host);
+
 	return err;
 }
 
@@ -1773,8 +1869,10 @@
  */
 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
 {
+	mmc_host_clk_hold(host);
 	host->ios.timing = timing;
 	mmc_set_ios(host);
+	mmc_host_clk_release(host);
 }
 
 /*
@@ -1782,8 +1880,10 @@
  */
 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
 {
+	mmc_host_clk_hold(host);
 	host->ios.drv_type = drv_type;
 	mmc_set_ios(host);
+	mmc_host_clk_release(host);
 }
 
 int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
@@ -1791,6 +1891,7 @@
 {
 	struct mmc_host *host = card->host;
 	int host_drv_type = SD_DRIVER_TYPE_B;
+	int drive_strength;
 
 	*drv_type = 0;
 
@@ -1813,10 +1914,14 @@
 	 * information and let the hardware specific code
 	 * return what is possible given the options
 	 */
-	return host->ops->select_drive_strength(card, max_dtr,
-						host_drv_type,
-						card_drv_type,
-						drv_type);
+	mmc_host_clk_hold(host);
+	drive_strength = host->ops->select_drive_strength(card, max_dtr,
+							  host_drv_type,
+							  card_drv_type,
+							  drv_type);
+	mmc_host_clk_release(host);
+
+	return drive_strength;
 }
 
 /*
@@ -1835,6 +1940,8 @@
 	if (host->ios.power_mode == MMC_POWER_ON)
 		return;
 
+	mmc_host_clk_hold(host);
+
 	mmc_pwrseq_pre_power_on(host);
 
 	host->ios.vdd = fls(ocr) - 1;
@@ -1868,6 +1975,8 @@
 	 * time required to reach a stable voltage.
 	 */
 	mmc_delay(10);
+
+	mmc_host_clk_release(host);
 }
 
 void mmc_power_off(struct mmc_host *host)
@@ -1875,6 +1984,8 @@
 	if (host->ios.power_mode == MMC_POWER_OFF)
 		return;
 
+	mmc_host_clk_hold(host);
+
 	mmc_pwrseq_power_off(host);
 
 	host->ios.clock = 0;
@@ -1890,6 +2001,8 @@
 	 * can be successfully turned on again.
 	 */
 	mmc_delay(1);
+
+	mmc_host_clk_release(host);
 }
 
 void mmc_power_cycle(struct mmc_host *host, u32 ocr)
@@ -2103,7 +2216,7 @@
 		 */
 		timeout_clks <<= 1;
 		timeout_us += (timeout_clks * 1000) /
-			      (card->host->ios.clock / 1000);
+			      (mmc_host_clk_rate(card->host) / 1000);
 
 		erase_timeout = timeout_us / 1000;
 
@@ -2626,7 +2739,9 @@
 {
 	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
 		return;
+	mmc_host_clk_hold(host);
 	host->ops->hw_reset(host);
+	mmc_host_clk_release(host);
 }
 
 int mmc_hw_reset(struct mmc_host *host)
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 0fa86a2..c975c7a 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -40,6 +40,9 @@
 
 void mmc_set_chip_select(struct mmc_host *host, int mode);
 void mmc_set_clock(struct mmc_host *host, unsigned int hz);
+void mmc_gate_clock(struct mmc_host *host);
+void mmc_ungate_clock(struct mmc_host *host);
+void mmc_set_ungated(struct mmc_host *host);
 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
 void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index c8451ce..bf0f6ce 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -256,6 +256,11 @@
 			&mmc_clock_fops))
 		goto err_node;
 
+#ifdef CONFIG_MMC_CLKGATE
+	if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
+				root, &host->clk_delay))
+		goto err_node;
+#endif
 #ifdef CONFIG_FAIL_MMC_REQUEST
 	if (fail_request)
 		setup_fault_attr(&fail_default_attr, fail_request);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 348b58b..f18105f 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -58,6 +58,246 @@
 	class_unregister(&mmc_host_class);
 }
 
+#ifdef CONFIG_MMC_CLKGATE
+static ssize_t clkgate_delay_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+	return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay);
+}
+
+static ssize_t clkgate_delay_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+	unsigned long flags, value;
+
+	if (kstrtoul(buf, 0, &value))
+		return -EINVAL;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clkgate_delay = value;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	return count;
+}
+
+/*
+ * Enabling clock gating will make the core call out to the host
+ * once up and once down when it performs a request or card operation
+ * intermingled in any fashion. The driver will see this through
+ * set_ios() operations with ios.clock field set to 0 to gate (disable)
+ * the block clock, and to the old frequency to enable it again.
+ */
+static void mmc_host_clk_gate_delayed(struct mmc_host *host)
+{
+	unsigned long tick_ns;
+	unsigned long freq = host->ios.clock;
+	unsigned long flags;
+
+	if (!freq) {
+		pr_debug("%s: frequency set to 0 in disable function, "
+			 "this means the clock is already disabled.\n",
+			 mmc_hostname(host));
+		return;
+	}
+	/*
+	 * New requests may have appeared while we were scheduling,
+	 * then there is no reason to delay the check before
+	 * clk_disable().
+	 */
+	spin_lock_irqsave(&host->clk_lock, flags);
+
+	/*
+	 * Delay n bus cycles (at least 8 from MMC spec) before attempting
+	 * to disable the MCI block clock. The reference count may have
+	 * gone up again after this delay due to rescheduling!
+	 */
+	if (!host->clk_requests) {
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		tick_ns = DIV_ROUND_UP(1000000000, freq);
+		ndelay(host->clk_delay * tick_ns);
+	} else {
+		/* New users appeared while waiting for this work */
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		return;
+	}
+	mutex_lock(&host->clk_gate_mutex);
+	spin_lock_irqsave(&host->clk_lock, flags);
+	if (!host->clk_requests) {
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		/* This will set host->ios.clock to 0 */
+		mmc_gate_clock(host);
+		spin_lock_irqsave(&host->clk_lock, flags);
+		pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
+	}
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	mutex_unlock(&host->clk_gate_mutex);
+}
+
+/*
+ * Internal work. Work to disable the clock at some later point.
+ */
+static void mmc_host_clk_gate_work(struct work_struct *work)
+{
+	struct mmc_host *host = container_of(work, struct mmc_host,
+					      clk_gate_work.work);
+
+	mmc_host_clk_gate_delayed(host);
+}
+
+/**
+ *	mmc_host_clk_hold - ungate hardware MCI clocks
+ *	@host: host to ungate.
+ *
+ *	Makes sure the host ios.clock is restored to a non-zero value
+ *	past this call.	Increase clock reference count and ungate clock
+ *	if we're the first user.
+ */
+void mmc_host_clk_hold(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	/* cancel any clock gating work scheduled by mmc_host_clk_release() */
+	cancel_delayed_work_sync(&host->clk_gate_work);
+	mutex_lock(&host->clk_gate_mutex);
+	spin_lock_irqsave(&host->clk_lock, flags);
+	if (host->clk_gated) {
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		mmc_ungate_clock(host);
+		spin_lock_irqsave(&host->clk_lock, flags);
+		pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
+	}
+	host->clk_requests++;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	mutex_unlock(&host->clk_gate_mutex);
+}
+
+/**
+ *	mmc_host_may_gate_card - check if this card may be gated
+ *	@card: card to check.
+ */
+static bool mmc_host_may_gate_card(struct mmc_card *card)
+{
+	/* If there is no card we may gate it */
+	if (!card)
+		return true;
+	/*
+	 * Don't gate SDIO cards! These need to be clocked at all times
+	 * since they may be independent systems generating interrupts
+	 * and other events. The clock requests counter from the core will
+	 * go down to zero since the core does not need it, but we will not
+	 * gate the clock, because there is somebody out there that may still
+	 * be using it.
+	 */
+	return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING);
+}
+
+/**
+ *	mmc_host_clk_release - gate off hardware MCI clocks
+ *	@host: host to gate.
+ *
+ *	Calls the host driver with ios.clock set to zero as often as possible
+ *	in order to gate off hardware MCI clocks. Decrease clock reference
+ *	count and schedule disabling of clock.
+ */
+void mmc_host_clk_release(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clk_requests--;
+	if (mmc_host_may_gate_card(host->card) &&
+	    !host->clk_requests)
+		schedule_delayed_work(&host->clk_gate_work,
+				      msecs_to_jiffies(host->clkgate_delay));
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+/**
+ *	mmc_host_clk_rate - get current clock frequency setting
+ *	@host: host to get the clock frequency for.
+ *
+ *	Returns current clock frequency regardless of gating.
+ */
+unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+	unsigned long freq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	if (host->clk_gated)
+		freq = host->clk_old;
+	else
+		freq = host->ios.clock;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	return freq;
+}
+
+/**
+ *	mmc_host_clk_init - set up clock gating code
+ *	@host: host with potential clock to control
+ */
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+	host->clk_requests = 0;
+	/* Hold MCI clock for 8 cycles by default */
+	host->clk_delay = 8;
+	/*
+	 * Default clock gating delay is 0ms to avoid wasting power.
+	 * This value can be tuned by writing into sysfs entry.
+	 */
+	host->clkgate_delay = 0;
+	host->clk_gated = false;
+	INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
+	spin_lock_init(&host->clk_lock);
+	mutex_init(&host->clk_gate_mutex);
+}
+
+/**
+ *	mmc_host_clk_exit - shut down clock gating code
+ *	@host: host with potential clock to control
+ */
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+	/*
+	 * Wait for any outstanding gate and then make sure we're
+	 * ungated before exiting.
+	 */
+	if (cancel_delayed_work_sync(&host->clk_gate_work))
+		mmc_host_clk_gate_delayed(host);
+	if (host->clk_gated)
+		mmc_host_clk_hold(host);
+	/* There should be only one user now */
+	WARN_ON(host->clk_requests > 1);
+}
+
+static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
+{
+	host->clkgate_delay_attr.show = clkgate_delay_show;
+	host->clkgate_delay_attr.store = clkgate_delay_store;
+	sysfs_attr_init(&host->clkgate_delay_attr.attr);
+	host->clkgate_delay_attr.attr.name = "clkgate_delay";
+	host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(&host->class_dev, &host->clkgate_delay_attr))
+		pr_err("%s: Failed to create clkgate_delay sysfs entry\n",
+				mmc_hostname(host));
+}
+#else
+
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
+{
+}
+
+#endif
+
 void mmc_retune_enable(struct mmc_host *host)
 {
 	host->can_retune = 1;
@@ -382,6 +622,8 @@
 		return NULL;
 	}
 
+	mmc_host_clk_init(host);
+
 	spin_lock_init(&host->lock);
 	init_waitqueue_head(&host->wq);
 	INIT_DELAYED_WORK(&host->detect, mmc_rescan);
@@ -427,6 +669,7 @@
 #ifdef CONFIG_DEBUG_FS
 	mmc_add_host_debugfs(host);
 #endif
+	mmc_host_clk_sysfs_init(host);
 
 #ifdef CONFIG_BLOCK
 	mmc_latency_hist_sysfs_init(host);
@@ -466,6 +709,8 @@
 	device_del(&host->class_dev);
 
 	led_trigger_unregister_simple(host->led);
+
+	mmc_host_clk_exit(host);
 }
 
 EXPORT_SYMBOL(mmc_remove_host);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index f57700c..56e6355 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -617,6 +617,12 @@
 		card->ext_csd.ffu_capable =
 			(ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
 			!(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
+
+		card->ext_csd.pre_eol_info = ext_csd[EXT_CSD_PRE_EOL_INFO];
+		card->ext_csd.device_life_time_est_typ_a =
+			ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A];
+		card->ext_csd.device_life_time_est_typ_b =
+			ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B];
 	}
 out:
 	return err;
@@ -746,6 +752,11 @@
 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
 MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
+MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
+MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info);
+MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
+	card->ext_csd.device_life_time_est_typ_a,
+	card->ext_csd.device_life_time_est_typ_b);
 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
 MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
 		card->ext_csd.enhanced_area_offset);
@@ -799,6 +810,9 @@
 	&dev_attr_name.attr,
 	&dev_attr_oemid.attr,
 	&dev_attr_prv.attr,
+	&dev_attr_rev.attr,
+	&dev_attr_pre_eol_info.attr,
+	&dev_attr_life_time.attr,
 	&dev_attr_serial.attr,
 	&dev_attr_enhanced_area_offset.attr,
 	&dev_attr_enhanced_area_size.attr,
@@ -2082,11 +2096,13 @@
 
 	if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
 	     mmc_can_reset(card)) {
+		mmc_host_clk_hold(host);
 		/* If the card accept RST_n signal, send it. */
 		mmc_set_clock(host, host->f_init);
 		host->ops->hw_reset(host);
 		/* Set initial state and call mmc_set_ios */
 		mmc_set_initial_state(host);
+		mmc_host_clk_release(host);
 	} else {
 		/* Do a brute force power cycle */
 		mmc_power_cycle(host, card->ocr);
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
index ca9cade..4e65ea5 100644
--- a/drivers/mmc/core/quirks.c
+++ b/drivers/mmc/core/quirks.c
@@ -35,7 +35,25 @@
 #define SDIO_DEVICE_ID_MARVELL_8797_F0	0x9128
 #endif
 
+/*
+ * This hook just adds a quirk for all sdio devices
+ */
+static void add_quirk_for_sdio_devices(struct mmc_card *card, int data)
+{
+	if (mmc_card_sdio(card))
+		card->quirks |= data;
+}
+
 static const struct mmc_fixup mmc_fixup_methods[] = {
+	/* by default sdio devices are considered CLK_GATING broken */
+	/* good cards will be whitelisted as they are tested */
+	SDIO_FIXUP(SDIO_ANY_ID, SDIO_ANY_ID,
+		   add_quirk_for_sdio_devices,
+		   MMC_QUIRK_BROKEN_CLK_GATING),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+		   remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
 	SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
 		   add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
 
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index a0aa64e..60542b2 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -838,7 +838,9 @@
 	if (!host->ops->get_ro)
 		return -1;
 
+	mmc_host_clk_hold(host);
 	ro = host->ops->get_ro(host);
+	mmc_host_clk_release(host);
 
 	return ro;
 }
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index b5ec3c8..8e10bdc 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -976,10 +976,13 @@
 	}
 
 	if (!err && host->sdio_irqs) {
-		if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD))
+		if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
 			wake_up_process(host->sdio_irq_thread);
-		else if (host->caps & MMC_CAP_SDIO_IRQ)
+		} else if (host->caps & MMC_CAP_SDIO_IRQ) {
+			mmc_host_clk_hold(host);
 			host->ops->enable_sdio_irq(host, 1);
+			mmc_host_clk_release(host);
+		}
 	}
 
 	mmc_release_host(host);
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 91bbbfb..09cc67d 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -168,15 +168,21 @@
 		}
 
 		set_current_state(TASK_INTERRUPTIBLE);
-		if (host->caps & MMC_CAP_SDIO_IRQ)
+		if (host->caps & MMC_CAP_SDIO_IRQ) {
+			mmc_host_clk_hold(host);
 			host->ops->enable_sdio_irq(host, 1);
+			mmc_host_clk_release(host);
+		}
 		if (!kthread_should_stop())
 			schedule_timeout(period);
 		set_current_state(TASK_RUNNING);
 	} while (!kthread_should_stop());
 
-	if (host->caps & MMC_CAP_SDIO_IRQ)
+	if (host->caps & MMC_CAP_SDIO_IRQ) {
+		mmc_host_clk_hold(host);
 		host->ops->enable_sdio_irq(host, 0);
+		mmc_host_clk_release(host);
+	}
 
 	pr_debug("%s: IRQ thread exiting with code %d\n",
 		 mmc_hostname(host), ret);
@@ -202,7 +208,9 @@
 				return err;
 			}
 		} else if (host->caps & MMC_CAP_SDIO_IRQ) {
+			mmc_host_clk_hold(host);
 			host->ops->enable_sdio_irq(host, 1);
+			mmc_host_clk_release(host);
 		}
 	}
 
@@ -221,7 +229,9 @@
 			atomic_set(&host->sdio_irq_thread_abort, 1);
 			kthread_stop(host->sdio_irq_thread);
 		} else if (host->caps & MMC_CAP_SDIO_IRQ) {
+			mmc_host_clk_hold(host);
 			host->ops->enable_sdio_irq(host, 0);
+			mmc_host_clk_release(host);
 		}
 	}
 
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 5274f50..defb66e 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -396,18 +396,26 @@
 	  If unsure, say N.
 
 config MMC_SDHCI_MSM
-	tristate "Qualcomm SDHCI Controller Support"
-	depends on ARCH_QCOM || (ARM && COMPILE_TEST)
+	tristate "Qualcomm Technologies, Inc. SDHCI Controller Support"
+	depends on ARCH_QCOM || ARCH_MSM || (ARM && COMPILE_TEST)
 	depends on MMC_SDHCI_PLTFM
 	help
 	  This selects the Secure Digital Host Controller Interface (SDHCI)
-	  support present in Qualcomm SOCs. The controller supports
-	  SD/MMC/SDIO devices.
+	  support present in Qualcomm Technologies, Inc. SOCs. The controller
+	  supports SD/MMC/SDIO devices.
 
 	  If you have a controller with this interface, say Y or M here.
 
 	  If unsure, say N.
 
+config MMC_MSM
+	tristate "Qualcomm SDCC Controller Support"
+	depends on MMC && (ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50)
+	help
+	  This provides support for the SD/MMC cell found in the
+	  MSM and QSD SOCs from Qualcomm. The controller also has
+	  support for SDIO devices.
+
 config MMC_MXC
 	tristate "Freescale i.MX21/27/31 or MPC512x Multimedia Card support"
 	depends on ARCH_MXC || PPC_MPC512x
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index e2bdaaf..ef56624 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -71,8 +71,8 @@
 obj-$(CONFIG_MMC_SDHCI_OF_ESDHC)	+= sdhci-of-esdhc.o
 obj-$(CONFIG_MMC_SDHCI_OF_HLWD)		+= sdhci-of-hlwd.o
 obj-$(CONFIG_MMC_SDHCI_BCM_KONA)	+= sdhci-bcm-kona.o
-obj-$(CONFIG_MMC_SDHCI_IPROC)		+= sdhci-iproc.o
 obj-$(CONFIG_MMC_SDHCI_MSM)		+= sdhci-msm.o
+obj-$(CONFIG_MMC_SDHCI_IPROC)		+= sdhci-iproc.o
 obj-$(CONFIG_MMC_SDHCI_ST)		+= sdhci-st.o
 obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32)	+= sdhci-pic32.o
 obj-$(CONFIG_MMC_SDHCI_BRCMSTB)		+= sdhci-brcmstb.o
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 90ed2e1..1861af0 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1,7 +1,8 @@
 /*
- * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver
+ * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
+ * driver source file
  *
- * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,106 +16,218 @@
  */
 
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/gfp.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <linux/io.h>
 #include <linux/delay.h>
-#include <linux/mmc/mmc.h>
+#include <linux/scatterlist.h>
 #include <linux/slab.h>
+#include <linux/mmc/mmc.h>
 
 #include "sdhci-pltfm.h"
 
-#define CORE_MCI_VERSION		0x50
-#define CORE_VERSION_MAJOR_SHIFT	28
-#define CORE_VERSION_MAJOR_MASK		(0xf << CORE_VERSION_MAJOR_SHIFT)
-#define CORE_VERSION_MINOR_MASK		0xff
-
+#define SDHCI_VER_100		0x2B
 #define CORE_HC_MODE		0x78
 #define HC_MODE_EN		0x1
-#define CORE_POWER		0x0
-#define CORE_SW_RST		BIT(7)
 
-#define CORE_PWRCTL_STATUS	0xdc
-#define CORE_PWRCTL_MASK	0xe0
-#define CORE_PWRCTL_CLEAR	0xe4
-#define CORE_PWRCTL_CTL		0xe8
-#define CORE_PWRCTL_BUS_OFF	BIT(0)
-#define CORE_PWRCTL_BUS_ON	BIT(1)
-#define CORE_PWRCTL_IO_LOW	BIT(2)
-#define CORE_PWRCTL_IO_HIGH	BIT(3)
-#define CORE_PWRCTL_BUS_SUCCESS BIT(0)
-#define CORE_PWRCTL_IO_SUCCESS	BIT(2)
-#define REQ_BUS_OFF		BIT(0)
-#define REQ_BUS_ON		BIT(1)
-#define REQ_IO_LOW		BIT(2)
-#define REQ_IO_HIGH		BIT(3)
-#define INT_MASK		0xf
+#define CORE_POWER		0x0
+#define CORE_SW_RST		(1 << 7)
+
+#define CORE_PWRCTL_STATUS	0xDC
+#define CORE_PWRCTL_MASK	0xE0
+#define CORE_PWRCTL_CLEAR	0xE4
+#define CORE_PWRCTL_CTL		0xE8
+
+#define CORE_PWRCTL_BUS_OFF	0x01
+#define CORE_PWRCTL_BUS_ON	(1 << 1)
+#define CORE_PWRCTL_IO_LOW	(1 << 2)
+#define CORE_PWRCTL_IO_HIGH	(1 << 3)
+
+#define CORE_PWRCTL_BUS_SUCCESS	0x01
+#define CORE_PWRCTL_BUS_FAIL	(1 << 1)
+#define CORE_PWRCTL_IO_SUCCESS	(1 << 2)
+#define CORE_PWRCTL_IO_FAIL	(1 << 3)
+
+#define INT_MASK		0xF
 #define MAX_PHASES		16
-#define CORE_DLL_LOCK		BIT(7)
-#define CORE_DLL_EN		BIT(16)
-#define CORE_CDR_EN		BIT(17)
-#define CORE_CK_OUT_EN		BIT(18)
-#define CORE_CDR_EXT_EN		BIT(19)
-#define CORE_DLL_PDN		BIT(29)
-#define CORE_DLL_RST		BIT(30)
+
+#define CORE_DLL_LOCK		(1 << 7)
+#define CORE_DLL_EN		(1 << 16)
+#define CORE_CDR_EN		(1 << 17)
+#define CORE_CK_OUT_EN		(1 << 18)
+#define CORE_CDR_EXT_EN		(1 << 19)
+#define CORE_DLL_PDN		(1 << 29)
+#define CORE_DLL_RST		(1 << 30)
 #define CORE_DLL_CONFIG		0x100
+#define CORE_DLL_TEST_CTL	0x104
 #define CORE_DLL_STATUS		0x108
 
-#define CORE_VENDOR_SPEC	0x10c
-#define CORE_CLK_PWRSAVE	BIT(1)
+#define CORE_VENDOR_SPEC	0x10C
+#define CORE_CLK_PWRSAVE	(1 << 1)
+#define CORE_IO_PAD_PWR_SWITCH	(1 << 16)
 
-#define CORE_VENDOR_SPEC_CAPABILITIES0	0x11c
+/* 8KB descriptors */
+#define SDHCI_MSM_MAX_SEGMENTS  (1 << 13)
+#define SDHCI_MSM_MMC_CLK_GATE_DELAY	200 /* msecs */
 
-#define CDR_SELEXT_SHIFT	20
-#define CDR_SELEXT_MASK		(0xf << CDR_SELEXT_SHIFT)
-#define CMUX_SHIFT_PHASE_SHIFT	24
-#define CMUX_SHIFT_PHASE_MASK	(7 << CMUX_SHIFT_PHASE_SHIFT)
-
-struct sdhci_msm_host {
-	struct platform_device *pdev;
-	void __iomem *core_mem;	/* MSM SDCC mapped address */
-	int pwr_irq;		/* power irq */
-	struct clk *clk;	/* main SD/MMC bus clock */
-	struct clk *pclk;	/* SDHC peripheral bus clock */
-	struct clk *bus_clk;	/* SDHC bus voter clock */
-	struct mmc_host *mmc;
+static const u32 tuning_block_64[] = {
+	0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
+	0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
+	0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
+	0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
 };
 
-/* Platform specific tuning */
-static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
+static const u32 tuning_block_128[] = {
+	0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
+	0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
+	0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
+	0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
+	0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
+	0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
+	0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
+	0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
+};
+
+/* This structure keeps information per regulator */
+struct sdhci_msm_reg_data {
+	/* voltage regulator handle */
+	struct regulator *reg;
+	/* regulator name */
+	const char *name;
+	/* voltage level to be set */
+	u32 low_vol_level;
+	u32 high_vol_level;
+	/* Load values for low power and high power mode */
+	u32 lpm_uA;
+	u32 hpm_uA;
+
+	/* is this regulator enabled? */
+	bool is_enabled;
+	/* is this regulator needs to be always on? */
+	bool is_always_on;
+	/* is low power mode setting required for this regulator? */
+	bool lpm_sup;
+	bool set_voltage_sup;
+};
+
+/*
+ * This structure keeps information for all the
+ * regulators required for a SDCC slot.
+ */
+struct sdhci_msm_slot_reg_data {
+	/* keeps VDD/VCC regulator info */
+	struct sdhci_msm_reg_data *vdd_data;
+	 /* keeps VDD IO regulator info */
+	struct sdhci_msm_reg_data *vdd_io_data;
+};
+
+struct sdhci_msm_gpio {
+	u32 no;
+	const char *name;
+	bool is_enabled;
+};
+
+struct sdhci_msm_gpio_data {
+	struct sdhci_msm_gpio *gpio;
+	u8 size;
+};
+
+struct sdhci_msm_pin_data {
+	/*
+	 * = 1 if controller pins are using gpios
+	 * = 0 if controller has dedicated MSM pads
+	 */
+	bool cfg_sts;
+	struct sdhci_msm_gpio_data *gpio_data;
+};
+
+struct sdhci_msm_pltfm_data {
+	/* Supported UHS-I Modes */
+	u32 caps;
+
+	/* More capabilities */
+	u32 caps2;
+
+	unsigned long mmc_bus_width;
+	u32 max_clk;
+	struct sdhci_msm_slot_reg_data *vreg_data;
+	bool nonremovable;
+	struct sdhci_msm_pin_data *pin_data;
+};
+
+struct sdhci_msm_host {
+	void __iomem *core_mem;    /* MSM SDCC mapped address */
+	struct clk	 *clk;     /* main SD/MMC bus clock */
+	struct clk	 *pclk;    /* SDHC peripheral bus clock */
+	struct clk	 *bus_clk; /* SDHC bus voter clock */
+	atomic_t clks_on; /* Set if clocks are enabled */
+	struct sdhci_msm_pltfm_data *pdata;
+	struct mmc_host  *mmc;
+	struct sdhci_pltfm_data sdhci_msm_pdata;
+	wait_queue_head_t pwr_irq_wait;
+};
+
+enum vdd_io_level {
+	/* set vdd_io_data->low_vol_level */
+	VDD_IO_LOW,
+	/* set vdd_io_data->high_vol_level */
+	VDD_IO_HIGH,
+	/*
+	 * set whatever there in voltage_level (third argument) of
+	 * sdhci_msm_set_vdd_io_vol() function.
+	 */
+	VDD_IO_SET_LEVEL,
+};
+
+/* MSM platform specific tuning */
+static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
+						u8 poll)
 {
+	int rc = 0;
 	u32 wait_cnt = 50;
-	u8 ck_out_en;
+	u8 ck_out_en = 0;
 	struct mmc_host *mmc = host->mmc;
 
-	/* Poll for CK_OUT_EN bit.  max. poll time = 50us */
+	/* poll for CK_OUT_EN bit.  max. poll time = 50us */
 	ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
 			CORE_CK_OUT_EN);
 
 	while (ck_out_en != poll) {
 		if (--wait_cnt == 0) {
-			dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n",
-			       mmc_hostname(mmc), poll);
-			return -ETIMEDOUT;
+			pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
+				mmc_hostname(mmc), __func__, poll);
+			rc = -ETIMEDOUT;
+			goto out;
 		}
 		udelay(1);
 
-		ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
-				CORE_CK_OUT_EN);
+		ck_out_en = !!(readl_relaxed(host->ioaddr +
+				CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
 	}
-
-	return 0;
+out:
+	return rc;
 }
 
 static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
 {
-	int rc;
-	static const u8 grey_coded_phase_table[] = {
-		0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
-		0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8
-	};
+	int rc = 0;
+	u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
+					0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
+					0x8};
 	unsigned long flags;
 	u32 config;
 	struct mmc_host *mmc = host->mmc;
 
+	pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
 	spin_lock_irqsave(&host->lock, flags);
 
 	config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
@@ -131,10 +244,10 @@
 	 * Write the selected DLL clock output phase (0 ... 15)
 	 * to CDR_SELEXT bit field of DLL_CONFIG register.
 	 */
-	config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
-	config &= ~CDR_SELEXT_MASK;
-	config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
-	writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+	writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
+			& ~(0xF << 20))
+			| (grey_coded_phase_table[phase] << 20)),
+			host->ioaddr + CORE_DLL_CONFIG);
 
 	/* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
 	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
@@ -152,10 +265,11 @@
 	goto out;
 
 err_out:
-	dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n",
-	       mmc_hostname(mmc), phase);
+	pr_err("%s: %s: Failed to set DLL phase: %d\n",
+		mmc_hostname(mmc), __func__, phase);
 out:
 	spin_unlock_irqrestore(&host->lock, flags);
+	pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
 	return rc;
 }
 
@@ -170,19 +284,20 @@
  */
 
 static int msm_find_most_appropriate_phase(struct sdhci_host *host,
-					   u8 *phase_table, u8 total_phases)
+				u8 *phase_table, u8 total_phases)
 {
 	int ret;
 	u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
-	u8 phases_per_row[MAX_PHASES] = { 0 };
+	u8 phases_per_row[MAX_PHASES] = {0};
 	int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
 	int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
 	bool phase_0_found = false, phase_15_found = false;
 	struct mmc_host *mmc = host->mmc;
 
+	pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
 	if (!total_phases || (total_phases > MAX_PHASES)) {
-		dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n",
-		       mmc_hostname(mmc), total_phases);
+		pr_err("%s: %s: invalid argument: total_phases=%d\n",
+			mmc_hostname(mmc), __func__, total_phases);
 		return -EINVAL;
 	}
 
@@ -240,7 +355,7 @@
 		i = phases_15;
 		for (cnt = 0; cnt < phases_0; cnt++) {
 			ranges[phase_15_raw_index][i] =
-			    ranges[phase_0_raw_index][cnt];
+				ranges[phase_0_raw_index][cnt];
 			if (++i >= MAX_PHASES)
 				break;
 		}
@@ -256,24 +371,25 @@
 		}
 	}
 
-	i = (curr_max * 3) / 4;
+	i = ((curr_max * 3) / 4);
 	if (i)
 		i--;
 
-	ret = ranges[selected_row_index][i];
+	ret = (int)ranges[selected_row_index][i];
 
 	if (ret >= MAX_PHASES) {
 		ret = -EINVAL;
-		dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n",
-		       mmc_hostname(mmc), ret);
+		pr_err("%s: %s: invalid phase selected=%d\n",
+			mmc_hostname(mmc), __func__, ret);
 	}
 
+	pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
 	return ret;
 }
 
 static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
 {
-	u32 mclk_freq = 0, config;
+	u32 mclk_freq = 0;
 
 	/* Program the MCLK value to MCLK_FREQ bit field */
 	if (host->clock <= 112000000)
@@ -293,28 +409,31 @@
 	else if (host->clock <= 200000000)
 		mclk_freq = 7;
 
-	config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
-	config &= ~CMUX_SHIFT_PHASE_MASK;
-	config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT;
-	writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+	writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
+			& ~(7 << 24)) | (mclk_freq << 24)),
+			host->ioaddr + CORE_DLL_CONFIG);
 }
 
-/* Initialize the DLL (Programmable Delay Line) */
+/* Initialize the DLL (Programmable Delay Line ) */
 static int msm_init_cm_dll(struct sdhci_host *host)
 {
 	struct mmc_host *mmc = host->mmc;
-	int wait_cnt = 50;
+	int rc = 0;
 	unsigned long flags;
+	u32 wait_cnt;
 
+	pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
 	spin_lock_irqsave(&host->lock, flags);
 
 	/*
 	 * Make sure that clock is always enabled when DLL
 	 * tuning is in progress. Keeping PWRSAVE ON may
-	 * turn off the clock.
+	 * turn off the clock. So let's disable the PWRSAVE
+	 * here and re-enable it once tuning is completed.
 	 */
 	writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
-			& ~CORE_CLK_PWRSAVE), host->ioaddr + CORE_VENDOR_SPEC);
+			& ~CORE_CLK_PWRSAVE),
+			host->ioaddr + CORE_VENDOR_SPEC);
 
 	/* Write 1 to DLL_RST bit of DLL_CONFIG register */
 	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
@@ -341,69 +460,107 @@
 	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
 			| CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
 
+	wait_cnt = 50;
 	/* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
 	while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
-		 CORE_DLL_LOCK)) {
+		CORE_DLL_LOCK)) {
 		/* max. wait for 50us sec for LOCK bit to be set */
 		if (--wait_cnt == 0) {
-			dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n",
-			       mmc_hostname(mmc));
-			spin_unlock_irqrestore(&host->lock, flags);
-			return -ETIMEDOUT;
+			pr_err("%s: %s: DLL failed to LOCK\n",
+				mmc_hostname(mmc), __func__);
+			rc = -ETIMEDOUT;
+			goto out;
 		}
+		/* wait for 1us before polling again */
 		udelay(1);
 	}
 
+out:
+	/* re-enable PWRSAVE */
+	writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
+			CORE_CLK_PWRSAVE),
+			host->ioaddr + CORE_VENDOR_SPEC);
 	spin_unlock_irqrestore(&host->lock, flags);
-	return 0;
+	pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
+	return rc;
 }
 
-static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
+int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
 {
-	int tuning_seq_cnt = 3;
-	u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
+	unsigned long flags;
+	u8 phase, *data_buf, tuned_phases[16], tuned_phase_cnt = 0;
+	const u32 *tuning_block_pattern = tuning_block_64;
+	int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
 	int rc;
 	struct mmc_host *mmc = host->mmc;
-	struct mmc_ios ios = host->mmc->ios;
 
-	/*
-	 * Tuning is required for SDR104, HS200 and HS400 cards and
-	 * if clock frequency is greater than 100MHz in these modes.
-	 */
-	if (host->clock <= 100 * 1000 * 1000 ||
-	    !((ios.timing == MMC_TIMING_MMC_HS200) ||
-	      (ios.timing == MMC_TIMING_UHS_SDR104)))
-		return 0;
+	pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
+	/* Tuning is only required for SDR104 modes */
+	spin_lock_irqsave(&host->lock, flags);
 
-retry:
-	/* First of all reset the tuning block */
+	if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
+		(mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
+		tuning_block_pattern = tuning_block_128;
+		size = sizeof(tuning_block_128);
+	}
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	/* first of all reset the tuning block */
 	rc = msm_init_cm_dll(host);
 	if (rc)
-		return rc;
+		goto out;
+
+	data_buf = kmalloc(size, GFP_KERNEL);
+	if (!data_buf) {
+		rc = -ENOMEM;
+		goto out;
+	}
 
 	phase = 0;
 	do {
-		/* Set the phase in delay line hw block */
+		struct mmc_command cmd = {0};
+		struct mmc_data data = {0};
+		struct mmc_request mrq = {
+			.cmd = &cmd,
+			.data = &data
+		};
+		struct scatterlist sg;
+
+		/* set the phase in delay line hw block */
 		rc = msm_config_cm_dll_phase(host, phase);
 		if (rc)
-			return rc;
+			goto kfree;
 
-		rc = mmc_send_tuning(mmc, opcode, NULL);
-		if (!rc) {
-			/* Tuning is successful at this tuning point */
+		cmd.opcode = opcode;
+		cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+		data.blksz = size;
+		data.blocks = 1;
+		data.flags = MMC_DATA_READ;
+		data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
+
+		data.sg = &sg;
+		data.sg_len = 1;
+		sg_init_one(&sg, data_buf, size);
+		memset(data_buf, 0, size);
+		mmc_wait_for_req(mmc, &mrq);
+
+		if (!cmd.error && !data.error &&
+			!memcmp(data_buf, tuning_block_pattern, size)) {
+			/* tuning is successful at this tuning point */
 			tuned_phases[tuned_phase_cnt++] = phase;
-			dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
-				 mmc_hostname(mmc), phase);
+			pr_debug("%s: %s: found good phase = %d\n",
+				mmc_hostname(mmc), __func__, phase);
 		}
-	} while (++phase < ARRAY_SIZE(tuned_phases));
+	} while (++phase < 16);
 
 	if (tuned_phase_cnt) {
 		rc = msm_find_most_appropriate_phase(host, tuned_phases,
-						     tuned_phase_cnt);
+							tuned_phase_cnt);
 		if (rc < 0)
-			return rc;
+			goto kfree;
 		else
-			phase = rc;
+			phase = (u8)rc;
 
 		/*
 		 * Finally set the selected phase in delay
@@ -411,121 +568,737 @@
 		 */
 		rc = msm_config_cm_dll_phase(host, phase);
 		if (rc)
-			return rc;
-		dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
-			 mmc_hostname(mmc), phase);
+			goto kfree;
+		pr_debug("%s: %s: finally setting the tuning phase to %d\n",
+				mmc_hostname(mmc), __func__, phase);
 	} else {
-		if (--tuning_seq_cnt)
-			goto retry;
-		/* Tuning failed */
-		dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
-		       mmc_hostname(mmc));
-		rc = -EIO;
+		/* tuning failed */
+		pr_err("%s: %s: no tuning point found\n",
+			mmc_hostname(mmc), __func__);
+		rc = -EAGAIN;
 	}
 
+kfree:
+	kfree(data_buf);
+out:
+	pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
 	return rc;
 }
 
-static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
-					unsigned int uhs)
+static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
 {
-	struct mmc_host *mmc = host->mmc;
-	u16 ctrl_2;
+	struct sdhci_msm_gpio_data *curr;
+	int i, ret = 0;
 
-	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
-	/* Select Bus Speed Mode for host */
-	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
-	switch (uhs) {
-	case MMC_TIMING_UHS_SDR12:
-		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
-		break;
-	case MMC_TIMING_UHS_SDR25:
-		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
-		break;
-	case MMC_TIMING_UHS_SDR50:
-		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
-		break;
-	case MMC_TIMING_MMC_HS200:
-	case MMC_TIMING_UHS_SDR104:
-		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
-		break;
-	case MMC_TIMING_UHS_DDR50:
-	case MMC_TIMING_MMC_DDR52:
-		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
-		break;
+	curr = pdata->pin_data->gpio_data;
+	for (i = 0; i < curr->size; i++) {
+		if (!gpio_is_valid(curr->gpio[i].no)) {
+			ret = -EINVAL;
+			pr_err("%s: Invalid gpio = %d\n", __func__,
+					curr->gpio[i].no);
+			goto free_gpios;
+		}
+		if (enable) {
+			ret = gpio_request(curr->gpio[i].no,
+						curr->gpio[i].name);
+			if (ret) {
+				pr_err("%s: gpio_request(%d, %s) failed %d\n",
+					__func__, curr->gpio[i].no,
+					curr->gpio[i].name, ret);
+				goto free_gpios;
+			}
+			curr->gpio[i].is_enabled = true;
+		} else {
+			gpio_free(curr->gpio[i].no);
+			curr->gpio[i].is_enabled = false;
+		}
 	}
+	return ret;
 
-	/*
-	 * When clock frequency is less than 100MHz, the feedback clock must be
-	 * provided and DLL must not be used so that tuning can be skipped. To
-	 * provide feedback clock, the mode selection can be any value less
-	 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
-	 */
-	if (host->clock <= 100000000 &&
-	    (uhs == MMC_TIMING_MMC_HS400 ||
-	     uhs == MMC_TIMING_MMC_HS200 ||
-	     uhs == MMC_TIMING_UHS_SDR104))
-		ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
-
-	dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n",
-		mmc_hostname(host->mmc), host->clock, uhs, ctrl_2);
-	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+free_gpios:
+	for (i--; i >= 0; i--) {
+		gpio_free(curr->gpio[i].no);
+		curr->gpio[i].is_enabled = false;
+	}
+	return ret;
 }
 
-static void sdhci_msm_voltage_switch(struct sdhci_host *host)
+static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
 {
-	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
-	u32 irq_status, irq_ack = 0;
+	int ret = 0;
 
-	irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
-	irq_status &= INT_MASK;
+	if (!pdata->pin_data || (pdata->pin_data->cfg_sts == enable))
+		return 0;
 
-	writel_relaxed(irq_status, msm_host->core_mem + CORE_PWRCTL_CLEAR);
+	ret = sdhci_msm_setup_gpio(pdata, enable);
+	if (!ret)
+		pdata->pin_data->cfg_sts = enable;
 
-	if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
-		irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
-	if (irq_status & (CORE_PWRCTL_IO_LOW | CORE_PWRCTL_IO_HIGH))
-		irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+	return ret;
+}
+
+#define MAX_PROP_SIZE 32
+static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
+		struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
+{
+	int len, ret = 0;
+	const __be32 *prop;
+	char prop_name[MAX_PROP_SIZE];
+	struct sdhci_msm_reg_data *vreg;
+	struct device_node *np = dev->of_node;
+
+	snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
+	if (!of_parse_phandle(np, prop_name, 0)) {
+		dev_err(dev, "No vreg data found for %s\n", vreg_name);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+	if (!vreg) {
+		dev_err(dev, "No memory for vreg: %s\n", vreg_name);
+		ret = -ENOMEM;
+		return ret;
+	}
+
+	vreg->name = vreg_name;
+
+	snprintf(prop_name, MAX_PROP_SIZE,
+			"qcom,%s-always-on", vreg_name);
+	if (of_get_property(np, prop_name, NULL))
+		vreg->is_always_on = true;
+
+	snprintf(prop_name, MAX_PROP_SIZE,
+			"qcom,%s-lpm-sup", vreg_name);
+	if (of_get_property(np, prop_name, NULL))
+		vreg->lpm_sup = true;
+
+	snprintf(prop_name, MAX_PROP_SIZE,
+			"qcom,%s-voltage-level", vreg_name);
+	prop = of_get_property(np, prop_name, &len);
+	if (!prop || (len != (2 * sizeof(__be32)))) {
+		dev_warn(dev, "%s %s property\n",
+			prop ? "invalid format" : "no", prop_name);
+	} else {
+		vreg->low_vol_level = be32_to_cpup(&prop[0]);
+		vreg->high_vol_level = be32_to_cpup(&prop[1]);
+	}
+
+	snprintf(prop_name, MAX_PROP_SIZE,
+			"qcom,%s-current-level", vreg_name);
+	prop = of_get_property(np, prop_name, &len);
+	if (!prop || (len != (2 * sizeof(__be32)))) {
+		dev_warn(dev, "%s %s property\n",
+			prop ? "invalid format" : "no", prop_name);
+	} else {
+		vreg->lpm_uA = be32_to_cpup(&prop[0]);
+		vreg->hpm_uA = be32_to_cpup(&prop[1]);
+	}
+
+	*vreg_data = vreg;
+	dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
+		vreg->name, vreg->is_always_on ? "always_on," : "",
+		vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
+		vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
+
+	return ret;
+}
+
+#define GPIO_NAME_MAX_LEN 32
+static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
+		struct sdhci_msm_pltfm_data *pdata)
+{
+	int ret = 0, cnt, i;
+	struct sdhci_msm_pin_data *pin_data;
+	struct device_node *np = dev->of_node;
+
+	pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
+	if (!pin_data) {
+		dev_err(dev, "No memory for pin_data\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	cnt = of_gpio_count(np);
+	if (cnt > 0) {
+		pin_data->gpio_data = devm_kzalloc(dev,
+				sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
+		if (!pin_data->gpio_data) {
+			dev_err(dev, "No memory for gpio_data\n");
+			ret = -ENOMEM;
+			goto out;
+		}
+		pin_data->gpio_data->size = cnt;
+		pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
+				sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
+
+		if (!pin_data->gpio_data->gpio) {
+			dev_err(dev, "No memory for gpio\n");
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		for (i = 0; i < cnt; i++) {
+			const char *name = NULL;
+			char result[GPIO_NAME_MAX_LEN];
+			pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
+			of_property_read_string_index(np,
+					"qcom,gpio-names", i, &name);
+
+			snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
+					dev_name(dev), name ? name : "?");
+			pin_data->gpio_data->gpio[i].name = result;
+			dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
+					pin_data->gpio_data->gpio[i].name,
+					pin_data->gpio_data->gpio[i].no);
+			pdata->pin_data = pin_data;
+		}
+	}
+
+out:
+	if (ret)
+		dev_err(dev, "%s failed with err %d\n", __func__, ret);
+	return ret;
+}
+
+/* Parse platform data */
+static struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev)
+{
+	struct sdhci_msm_pltfm_data *pdata = NULL;
+	struct device_node *np = dev->of_node;
+	u32 bus_width = 0;
+	int len, i;
+
+	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata) {
+		dev_err(dev, "failed to allocate memory for platform data\n");
+		goto out;
+	}
+
+	of_property_read_u32(np, "qcom,bus-width", &bus_width);
+	if (bus_width == 8)
+		pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
+	else if (bus_width == 4)
+		pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
+	else {
+		dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
+		pdata->mmc_bus_width = 0;
+	}
+
+	pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
+						    sdhci_msm_slot_reg_data),
+					GFP_KERNEL);
+	if (!pdata->vreg_data) {
+		dev_err(dev, "failed to allocate memory for vreg data\n");
+		goto out;
+	}
+
+	if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
+					 "vdd")) {
+		dev_err(dev, "failed parsing vdd data\n");
+		goto out;
+	}
+	if (sdhci_msm_dt_parse_vreg_info(dev,
+					 &pdata->vreg_data->vdd_io_data,
+					 "vdd-io")) {
+		dev_err(dev, "failed parsing vdd-io data\n");
+		goto out;
+	}
+
+	if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
+		dev_err(dev, "failed parsing gpio data\n");
+		goto out;
+	}
+
+	of_property_read_u32(np, "qcom,max-clk-rate", &pdata->max_clk);
+
+	len = of_property_count_strings(np, "qcom,bus-speed-mode");
+
+	for (i = 0; i < len; i++) {
+		const char *name = NULL;
+
+		of_property_read_string_index(np,
+			"qcom,bus-speed-mode", i, &name);
+		if (!name)
+			continue;
+
+		if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
+			pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
+		else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
+			pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
+		else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
+			pdata->caps |= MMC_CAP_1_8V_DDR
+						| MMC_CAP_UHS_DDR50;
+		else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
+			pdata->caps |= MMC_CAP_1_2V_DDR
+						| MMC_CAP_UHS_DDR50;
+	}
+
+	if (of_get_property(np, "qcom,nonremovable", NULL))
+		pdata->nonremovable = true;
+
+	return pdata;
+out:
+	return NULL;
+}
+
+/* Regulator utility functions */
+static int sdhci_msm_vreg_init_reg(struct device *dev,
+					struct sdhci_msm_reg_data *vreg)
+{
+	int ret = 0;
+
+	/* check if regulator is already initialized? */
+	if (vreg->reg)
+		goto out;
+
+	/* Get the regulator handle */
+	vreg->reg = devm_regulator_get(dev, vreg->name);
+	if (IS_ERR(vreg->reg)) {
+		ret = PTR_ERR(vreg->reg);
+		pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
+			__func__, vreg->name, ret);
+		goto out;
+	}
+
+	/* sanity check */
+	if (!vreg->high_vol_level || !vreg->hpm_uA) {
+		pr_err("%s: %s invalid constraints specified\n",
+		       __func__, vreg->name);
+		ret = -EINVAL;
+	}
+
+out:
+	return ret;
+}
+
+static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
+{
+	if (vreg->reg)
+		devm_regulator_put(vreg->reg);
+}
+
+static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
+						  *vreg, int uA_load)
+{
+	int ret = 0;
+	
+	/*
+	 * regulators that do not support regulator_set_voltage also
+	 * do not support regulator_set_optimum_mode
+	 */
+	if (vreg->set_voltage_sup) {
+		ret = regulator_set_load(vreg->reg, uA_load);
+		if (ret < 0)
+			pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
+			       __func__, vreg->name, uA_load, ret);
+		else
+			/*
+			 * regulator_set_load() can return non zero
+			 * value even for success case.
+			 */
+			ret = 0;
+	}
+	return ret;
+}
+
+static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
+					int min_uV, int max_uV)
+{
+	int ret = 0;
+
+	ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
+	if (ret) {
+		pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
+			       __func__, vreg->name, min_uV, max_uV, ret);
+		}
+
+	return ret;
+}
+
+static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
+{
+	int ret = 0;
+
+	/* Put regulator in HPM (high power mode) */
+	ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
+	if (ret < 0)
+		return ret;
+
+	if (!vreg->is_enabled) {
+		/* Set voltage level */
+		ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
+						vreg->high_vol_level);
+		if (ret)
+			return ret;
+	}
+	ret = regulator_enable(vreg->reg);
+	if (ret) {
+		pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
+				__func__, vreg->name, ret);
+		return ret;
+	}
+	vreg->is_enabled = true;
+	return ret;
+}
+
+static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
+{
+	int ret = 0;
+
+	/* Never disable regulator marked as always_on */
+	if (vreg->is_enabled && !vreg->is_always_on) {
+		ret = regulator_disable(vreg->reg);
+		if (ret) {
+			pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
+				__func__, vreg->name, ret);
+			goto out;
+		}
+		vreg->is_enabled = false;
+
+		ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
+		if (ret < 0)
+			goto out;
+
+		/* Set min. voltage level to 0 */
+		ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
+		if (ret)
+			goto out;
+	} else if (vreg->is_enabled && vreg->is_always_on) {
+		if (vreg->lpm_sup) {
+			/* Put always_on regulator in LPM (low power mode) */
+			ret = sdhci_msm_vreg_set_optimum_mode(vreg,
+							      vreg->lpm_uA);
+			if (ret < 0)
+				goto out;
+		}
+	}
+out:
+	return ret;
+}
+
+static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
+			bool enable, bool is_init)
+{
+	int ret = 0, i;
+	struct sdhci_msm_slot_reg_data *curr_slot;
+	struct sdhci_msm_reg_data *vreg_table[2];
+
+	curr_slot = pdata->vreg_data;
+	if (!curr_slot) {
+		pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
+			 __func__);
+		goto out;
+	}
+
+	vreg_table[0] = curr_slot->vdd_data;
+	vreg_table[1] = curr_slot->vdd_io_data;
+
+	for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
+		if (vreg_table[i]) {
+			if (enable)
+				ret = sdhci_msm_vreg_enable(vreg_table[i]);
+			else
+				ret = sdhci_msm_vreg_disable(vreg_table[i]);
+			if (ret)
+				goto out;
+		}
+	}
+out:
+	return ret;
+}
+
+/*
+ * Reset vreg by ensuring it is off during probe. A call
+ * to enable vreg is needed to balance disable vreg
+ */
+static int sdhci_msm_vreg_reset(struct sdhci_msm_pltfm_data *pdata)
+{
+	int ret;
+
+	ret = sdhci_msm_setup_vreg(pdata, 1, true);
+	if (ret)
+		return ret;
+	ret = sdhci_msm_setup_vreg(pdata, 0, true);
+	return ret;
+}
+
+/* This init function should be called only once for each SDHC slot */
+static int sdhci_msm_vreg_init(struct device *dev,
+				struct sdhci_msm_pltfm_data *pdata,
+				bool is_init)
+{
+	int ret = 0;
+	struct sdhci_msm_slot_reg_data *curr_slot;
+	struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
+
+	curr_slot = pdata->vreg_data;
+	if (!curr_slot)
+		goto out;
+
+	curr_vdd_reg = curr_slot->vdd_data;
+	curr_vdd_io_reg = curr_slot->vdd_io_data;
+
+	if (!is_init)
+		/* Deregister all regulators from regulator framework */
+		goto vdd_io_reg_deinit;
 
 	/*
-	 * The driver has to acknowledge the interrupt, switch voltages and
-	 * report back if it succeded or not to this register. The voltage
-	 * switches are handled by the sdhci core, so just report success.
+	 * Get the regulator handle from voltage regulator framework
+	 * and then try to set the voltage level for the regulator
 	 */
-	writel_relaxed(irq_ack, msm_host->core_mem + CORE_PWRCTL_CTL);
+	if (curr_vdd_reg) {
+		ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
+		if (ret)
+			goto out;
+	}
+	if (curr_vdd_io_reg) {
+		ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
+		if (ret)
+			goto vdd_reg_deinit;
+	}
+	ret = sdhci_msm_vreg_reset(pdata);
+	if (ret)
+		dev_err(dev, "vreg reset failed (%d)\n", ret);
+	goto out;
+
+vdd_io_reg_deinit:
+	if (curr_vdd_io_reg)
+		sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
+vdd_reg_deinit:
+	if (curr_vdd_reg)
+		sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
+out:
+	return ret;
+}
+
+
+static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
+			enum vdd_io_level level,
+			unsigned int voltage_level)
+{
+	int ret = 0;
+	int set_level;
+	struct sdhci_msm_reg_data *vdd_io_reg;
+
+	if (!pdata->vreg_data)
+		return ret;
+
+	vdd_io_reg = pdata->vreg_data->vdd_io_data;
+	if (vdd_io_reg && vdd_io_reg->is_enabled) {
+		switch (level) {
+		case VDD_IO_LOW:
+			set_level = vdd_io_reg->low_vol_level;
+			break;
+		case VDD_IO_HIGH:
+			set_level = vdd_io_reg->high_vol_level;
+			break;
+		case VDD_IO_SET_LEVEL:
+			set_level = voltage_level;
+			break;
+		default:
+			pr_err("%s: invalid argument level = %d",
+					__func__, level);
+			ret = -EINVAL;
+			return ret;
+		}
+		ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
+				set_level);
+	}
+	return ret;
 }
 
 static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
 {
 	struct sdhci_host *host = (struct sdhci_host *)data;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	u8 irq_status = 0;
+	u8 irq_ack = 0;
+	int ret = 0;
 
-	sdhci_msm_voltage_switch(host);
+	irq_status = readb_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
+	pr_debug("%s: Received IRQ(%d), status=0x%x\n",
+		mmc_hostname(msm_host->mmc), irq, irq_status);
 
+	/* Clear the interrupt */
+	writeb_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
+	/*
+	 * SDHC has core_mem and hc_mem device memory and these memory
+	 * addresses do not fall within 1KB region. Hence, any update to
+	 * core_mem address space would require an mb() to ensure this gets
+	 * completed before its next update to registers within hc_mem.
+	 */
+	mb();
+
+	/* Handle BUS ON/OFF*/
+	if (irq_status & CORE_PWRCTL_BUS_ON) {
+		ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
+		if (!ret)
+			ret = sdhci_msm_setup_pins(msm_host->pdata, true);
+		if (ret)
+			irq_ack |= CORE_PWRCTL_BUS_FAIL;
+		else
+			irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
+	}
+	if (irq_status & CORE_PWRCTL_BUS_OFF) {
+		ret = sdhci_msm_setup_vreg(msm_host->pdata, false, false);
+		if (!ret)
+			ret = sdhci_msm_setup_pins(msm_host->pdata, false);
+		if (ret)
+			irq_ack |= CORE_PWRCTL_BUS_FAIL;
+		else
+			irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
+	}
+	/* Handle IO LOW/HIGH */
+	if (irq_status & CORE_PWRCTL_IO_LOW) {
+		/* Switch voltage Low */
+		ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
+		if (ret)
+			irq_ack |= CORE_PWRCTL_IO_FAIL;
+		else
+			irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+	}
+	if (irq_status & CORE_PWRCTL_IO_HIGH) {
+		/* Switch voltage High */
+		ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
+		if (ret)
+			irq_ack |= CORE_PWRCTL_IO_FAIL;
+		else
+			irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+	}
+
+	/* ACK status to the core */
+	writeb_relaxed(irq_ack, (msm_host->core_mem + CORE_PWRCTL_CTL));
+	/*
+	 * SDHC has core_mem and hc_mem device memory and these memory
+	 * addresses do not fall within 1KB region. Hence, any update to
+	 * core_mem address space would require an mb() to ensure this gets
+	 * completed before its next update to registers within hc_mem.
+	 */
+	mb();
+
+	if (irq_status & CORE_PWRCTL_IO_HIGH)
+		writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
+				~CORE_IO_PAD_PWR_SWITCH),
+				host->ioaddr + CORE_VENDOR_SPEC);
+	if (irq_status & CORE_PWRCTL_IO_LOW)
+		writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
+				CORE_IO_PAD_PWR_SWITCH),
+				host->ioaddr + CORE_VENDOR_SPEC);
+	mb();
+
+	pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
+		mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
+	wake_up_interruptible(&msm_host->pwr_irq_wait);
 	return IRQ_HANDLED;
 }
 
-static const struct of_device_id sdhci_msm_dt_match[] = {
-	{ .compatible = "qcom,sdhci-msm-v4" },
-	{},
-};
+static void sdhci_msm_check_power_status(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int ret = 0;
 
-MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
+	pr_debug("%s: %s: power status before waiting 0x%x\n",
+		mmc_hostname(host->mmc), __func__,
+		readb_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL));
 
-static const struct sdhci_ops sdhci_msm_ops = {
+	ret = wait_event_interruptible(msm_host->pwr_irq_wait,
+				       (readb_relaxed(msm_host->core_mem +
+						      CORE_PWRCTL_CTL)) != 0x0);
+	if (ret)
+		pr_warning("%s: %s: returned due to error %d\n",
+				mmc_hostname(host->mmc), __func__, ret);
+	pr_debug("%s: %s: ret %d power status after handling power IRQ 0x%x\n",
+		mmc_hostname(host->mmc), __func__, ret,
+		readb_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL));
+}
+
+static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
+{
+	if (enable)
+		writel_relaxed((readl_relaxed(host->ioaddr +
+					      CORE_DLL_CONFIG) | CORE_CDR_EN),
+			       host->ioaddr + CORE_DLL_CONFIG);
+	else
+		writel_relaxed((readl_relaxed(host->ioaddr +
+					      CORE_DLL_CONFIG) & ~CORE_CDR_EN),
+			       host->ioaddr + CORE_DLL_CONFIG);
+}
+
+static unsigned int sdhci_msm_max_segs(void)
+{
+	return SDHCI_MSM_MAX_SEGMENTS;
+}
+
+void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+	int rc;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	unsigned long flags;
+
+	if (clock && !atomic_read(&msm_host->clks_on)) {
+		pr_debug("%s: request to enable clock at rate %u\n",
+				mmc_hostname(host->mmc), clock);
+		if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
+			rc = clk_prepare_enable(msm_host->bus_clk);
+			if (rc) {
+				pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
+					mmc_hostname(host->mmc), __func__, rc);
+				goto out;
+			}
+		}
+		if (!IS_ERR(msm_host->pclk)) {
+			rc = clk_prepare_enable(msm_host->pclk);
+			if (rc) {
+				pr_err("%s: %s: failed to enable the pclk with error %d\n",
+					mmc_hostname(host->mmc), __func__, rc);
+				goto disable_bus_clk;
+			}
+		}
+		rc = clk_prepare_enable(msm_host->clk);
+		if (rc) {
+			pr_err("%s: %s: failed to enable the host-clk with error %d\n",
+				mmc_hostname(host->mmc), __func__, rc);
+			goto disable_pclk;
+		}
+		mb();
+		atomic_set(&msm_host->clks_on, 1);
+
+	} else if (!clock && atomic_read(&msm_host->clks_on)) {
+		pr_debug("%s: request to disable clocks\n",
+				mmc_hostname(host->mmc));
+		sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+		mb();
+		clk_disable_unprepare(msm_host->clk);
+		if (!IS_ERR(msm_host->pclk))
+			clk_disable_unprepare(msm_host->pclk);
+		if (!IS_ERR_OR_NULL(msm_host->bus_clk))
+			clk_disable_unprepare(msm_host->bus_clk);
+		atomic_set(&msm_host->clks_on, 0);
+	}
+	spin_lock_irqsave(&host->lock, flags);
+	host->clock = clock;
+	spin_unlock_irqrestore(&host->lock, flags);
+	goto out;
+disable_pclk:
+	if (!IS_ERR_OR_NULL(msm_host->pclk))
+		clk_disable_unprepare(msm_host->pclk);
+disable_bus_clk:
+	if (!IS_ERR_OR_NULL(msm_host->bus_clk))
+		clk_disable_unprepare(msm_host->bus_clk);
+out:
+	return;
+}
+
+static struct sdhci_ops sdhci_msm_ops = {
+	.check_power_status = sdhci_msm_check_power_status,
 	.platform_execute_tuning = sdhci_msm_execute_tuning,
-	.reset = sdhci_reset,
-	.set_clock = sdhci_set_clock,
-	.set_bus_width = sdhci_set_bus_width,
-	.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
-	.voltage_switch = sdhci_msm_voltage_switch,
-};
-
-static const struct sdhci_pltfm_data sdhci_msm_pdata = {
-	.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
-		  SDHCI_QUIRK_NO_CARD_NO_RESET |
-		  SDHCI_QUIRK_SINGLE_POWER_WRITE,
-	.ops = &sdhci_msm_ops,
+	.toggle_cdr = sdhci_msm_toggle_cdr,
+	.get_max_segments = sdhci_msm_max_segs,
+	.set_clock = sdhci_msm_set_clock,
 };
 
 static int sdhci_msm_probe(struct platform_device *pdev)
@@ -533,30 +1306,47 @@
 	struct sdhci_host *host;
 	struct sdhci_pltfm_host *pltfm_host;
 	struct sdhci_msm_host *msm_host;
-	struct resource *core_memres;
-	int ret;
-	u16 host_version, core_minor;
-	u32 core_version, caps;
-	u8 core_major;
+	struct resource *core_memres = NULL;
+	int ret = 0, pwr_irq = 0, dead = 0;
+	u32 host_version;
 
-	host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
-	if (IS_ERR(host))
-		return PTR_ERR(host);
+	pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
+	msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
+				GFP_KERNEL);
+	if (!msm_host) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	init_waitqueue_head(&msm_host->pwr_irq_wait);
+
+	msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
+	host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
+	if (IS_ERR(host)) {
+		ret = PTR_ERR(host);
+		goto out;
+	}
 
 	pltfm_host = sdhci_priv(host);
-	msm_host = sdhci_pltfm_priv(pltfm_host);
+	pltfm_host->priv = msm_host;
 	msm_host->mmc = host->mmc;
-	msm_host->pdev = pdev;
 
-	ret = mmc_of_parse(host->mmc);
-	if (ret)
+	/* Extract platform data */
+	if (pdev->dev.of_node) {
+		msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev);
+		if (!msm_host->pdata) {
+			dev_err(&pdev->dev, "DT parsing error\n");
+			goto pltfm_free;
+		}
+	} else {
+		dev_err(&pdev->dev, "No device tree node\n");
 		goto pltfm_free;
+	}
 
-	sdhci_get_of_property(pdev);
+	/* Setup Clocks */
 
 	/* Setup SDCC bus voter clock. */
-	msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
-	if (!IS_ERR(msm_host->bus_clk)) {
+	msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
+	if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
 		/* Vote for max. clk rate for max. performance */
 		ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
 		if (ret)
@@ -567,113 +1357,146 @@
 	}
 
 	/* Setup main peripheral bus clock */
-	msm_host->pclk = devm_clk_get(&pdev->dev, "iface");
-	if (IS_ERR(msm_host->pclk)) {
-		ret = PTR_ERR(msm_host->pclk);
-		dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
-		goto bus_clk_disable;
+	msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
+	if (!IS_ERR(msm_host->pclk)) {
+		ret = clk_prepare_enable(msm_host->pclk);
+		if (ret)
+			goto bus_clk_disable;
 	}
 
-	ret = clk_prepare_enable(msm_host->pclk);
-	if (ret)
-		goto bus_clk_disable;
-
 	/* Setup SDC MMC clock */
-	msm_host->clk = devm_clk_get(&pdev->dev, "core");
+	msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
 	if (IS_ERR(msm_host->clk)) {
 		ret = PTR_ERR(msm_host->clk);
-		dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
 		goto pclk_disable;
 	}
 
-	/* Vote for maximum clock rate for maximum performance */
-	ret = clk_set_rate(msm_host->clk, INT_MAX);
-	if (ret)
-		dev_warn(&pdev->dev, "core clock boost failed\n");
-
 	ret = clk_prepare_enable(msm_host->clk);
 	if (ret)
 		goto pclk_disable;
 
-	core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres);
-
-	if (IS_ERR(msm_host->core_mem)) {
-		dev_err(&pdev->dev, "Failed to remap registers\n");
-		ret = PTR_ERR(msm_host->core_mem);
+	atomic_set(&msm_host->clks_on, 1);
+	/* Setup regulators */
+	ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
+	if (ret) {
+		dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
 		goto clk_disable;
 	}
 
 	/* Reset the core and Enable SDHC mode */
-	writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_POWER) |
-		       CORE_SW_RST, msm_host->core_mem + CORE_POWER);
+	core_memres = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "core_mem");
+	msm_host->core_mem = devm_ioremap(&pdev->dev, core_memres->start,
+					resource_size(core_memres));
 
-	/* SW reset can take upto 10HCLK + 15MCLK cycles. (min 40us) */
-	usleep_range(1000, 5000);
-	if (readl(msm_host->core_mem + CORE_POWER) & CORE_SW_RST) {
-		dev_err(&pdev->dev, "Stuck in reset\n");
-		ret = -ETIMEDOUT;
-		goto clk_disable;
+	if (!msm_host->core_mem) {
+		dev_err(&pdev->dev, "Failed to remap registers\n");
+		ret = -ENOMEM;
+		goto vreg_deinit;
 	}
 
+	/* Set SW_RST bit in POWER register (Offset 0x0) */
+	writel_relaxed(CORE_SW_RST, msm_host->core_mem + CORE_POWER);
 	/* Set HC_MODE_EN bit in HC_MODE register */
 	writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
 
-	host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
+	/*
+	 * Following are the deviations from SDHC spec v3.0 -
+	 * 1. Card detection is handled using separate GPIO.
+	 * 2. Bus power control is handled by interacting with PMIC.
+	 */
+	host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+	host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
+
+	host_version = readl_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
 	dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
 		host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
-			       SDHCI_VENDOR_VER_SHIFT));
-
-	core_version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
-	core_major = (core_version & CORE_VERSION_MAJOR_MASK) >>
-		      CORE_VERSION_MAJOR_SHIFT;
-	core_minor = core_version & CORE_VERSION_MINOR_MASK;
-	dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n",
-		core_version, core_major, core_minor);
-
-	/*
-	 * Support for some capabilities is not advertised by newer
-	 * controller versions and must be explicitly enabled.
-	 */
-	if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) {
-		caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
-		caps |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
-		writel_relaxed(caps, host->ioaddr +
-			       CORE_VENDOR_SPEC_CAPABILITIES0);
+		  SDHCI_VENDOR_VER_SHIFT));
+	if (((host_version & SDHCI_VENDOR_VER_MASK) >>
+		SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
+		/*
+		 * Add 40us delay in interrupt handler when
+		 * operating at initialization frequency(400KHz).
+		 */
+		host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
+		/*
+		 * Set Software Reset for DAT line in Software
+		 * Reset Register (Bit 2).
+		 */
+		host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
 	}
 
-	/* Setup IRQ for handling power/voltage tasks with PMIC */
-	msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
-	if (msm_host->pwr_irq < 0) {
-		dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n",
-			msm_host->pwr_irq);
-		ret = msm_host->pwr_irq;
-		goto clk_disable;
+	/* Setup PWRCTL irq */
+	pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
+	if (pwr_irq < 0) {
+		dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
+				pwr_irq);
+		goto vreg_deinit;
 	}
-
-	ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
+	ret = devm_request_threaded_irq(&pdev->dev, pwr_irq, NULL,
 					sdhci_msm_pwr_irq, IRQF_ONESHOT,
 					dev_name(&pdev->dev), host);
 	if (ret) {
-		dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret);
-		goto clk_disable;
+		dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
+				pwr_irq, ret);
+		goto vreg_deinit;
 	}
 
+	/* Enable pwr irq interrupts */
+	writel_relaxed(INT_MASK, (msm_host->core_mem + CORE_PWRCTL_MASK));
+
+#ifdef CONFIG_MMC_CLKGATE
+	/* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
+	msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
+#endif
+
+	/* Set host capabilities */
+	msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
+	msm_host->mmc->caps |= msm_host->pdata->caps;
+	msm_host->mmc->caps |= MMC_CAP_HW_RESET;
+	msm_host->mmc->caps2 |= msm_host->pdata->caps2;
+	msm_host->mmc->caps2 |= MMC_CAP2_PACKED_WR;
+	msm_host->mmc->caps2 |= MMC_CAP2_PACKED_WR_CONTROL;
+
+	if (msm_host->pdata->nonremovable)
+		msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+
 	ret = sdhci_add_host(host);
-	if (ret)
-		goto clk_disable;
+	if (ret) {
+		dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
+		goto vreg_deinit;
+	}
 
-	return 0;
+	 /* Set core clk rate, optionally override from dts */
+	if (msm_host->pdata->max_clk)
+		host->max_clk = msm_host->pdata->max_clk;
+	ret = clk_set_rate(msm_host->clk, host->max_clk);
+	if (ret) {
+		dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
+		goto remove_host;
+	}
 
+	/* Successful initialization */
+	goto out;
+
+remove_host:
+	dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
+	sdhci_remove_host(host, dead);
+vreg_deinit:
+	sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
 clk_disable:
-	clk_disable_unprepare(msm_host->clk);
+	if (!IS_ERR(msm_host->clk))
+		clk_disable_unprepare(msm_host->clk);
 pclk_disable:
-	clk_disable_unprepare(msm_host->pclk);
+	if (!IS_ERR(msm_host->pclk))
+		clk_disable_unprepare(msm_host->pclk);
 bus_clk_disable:
-	if (!IS_ERR(msm_host->bus_clk))
+	if (!IS_ERR_OR_NULL(msm_host->bus_clk))
 		clk_disable_unprepare(msm_host->bus_clk);
 pltfm_free:
 	sdhci_pltfm_free(pdev);
+out:
+	pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
 	return ret;
 }
 
@@ -681,29 +1504,37 @@
 {
 	struct sdhci_host *host = platform_get_drvdata(pdev);
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
 	int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
-		    0xffffffff);
+			0xffffffff);
 
+	pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
 	sdhci_remove_host(host, dead);
-	clk_disable_unprepare(msm_host->clk);
-	clk_disable_unprepare(msm_host->pclk);
-	if (!IS_ERR(msm_host->bus_clk))
-		clk_disable_unprepare(msm_host->bus_clk);
 	sdhci_pltfm_free(pdev);
+	sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
+
+	if (pdata->pin_data)
+		sdhci_msm_setup_gpio(pdata, false);
 	return 0;
 }
 
+static const struct of_device_id sdhci_msm_dt_match[] = {
+	{.compatible = "qcom,sdhci-msm"},
+};
+MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
+
 static struct platform_driver sdhci_msm_driver = {
-	.probe = sdhci_msm_probe,
-	.remove = sdhci_msm_remove,
-	.driver = {
-		   .name = "sdhci_msm",
-		   .of_match_table = sdhci_msm_dt_match,
+	.probe		= sdhci_msm_probe,
+	.remove		= sdhci_msm_remove,
+	.driver		= {
+		.name	= "sdhci_msm",
+		.owner	= THIS_MODULE,
+		.of_match_table = sdhci_msm_dt_match,
 	},
 };
 
 module_platform_driver(sdhci_msm_driver);
 
-MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver");
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 410a55b..1cfd7f9 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -28,13 +28,9 @@
 #include "sdhci-pltfm.h"
 #include <linux/of.h>
 
-#define SDHCI_ARASAN_CLK_CTRL_OFFSET	0x2c
 #define SDHCI_ARASAN_VENDOR_REGISTER	0x78
 
 #define VENDOR_ENHANCED_STROBE		BIT(0)
-#define CLK_CTRL_TIMEOUT_SHIFT		16
-#define CLK_CTRL_TIMEOUT_MASK		(0xf << CLK_CTRL_TIMEOUT_SHIFT)
-#define CLK_CTRL_TIMEOUT_MIN_EXP	13
 
 #define PHY_CLK_TOO_SLOW_HZ		400000
 
@@ -163,15 +159,15 @@
 
 static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host)
 {
-	u32 div;
 	unsigned long freq;
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 
-	div = readl(host->ioaddr + SDHCI_ARASAN_CLK_CTRL_OFFSET);
-	div = (div & CLK_CTRL_TIMEOUT_MASK) >> CLK_CTRL_TIMEOUT_SHIFT;
+	/* SDHCI timeout clock is in kHz */
+	freq = DIV_ROUND_UP(clk_get_rate(pltfm_host->clk), 1000);
 
-	freq = clk_get_rate(pltfm_host->clk);
-	freq /= 1 << (CLK_CTRL_TIMEOUT_MIN_EXP + div);
+	/* or in MHz */
+	if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
+		freq = DIV_ROUND_UP(freq, 1000);
 
 	return freq;
 }
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index a9b7fc0..387ae1c 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -85,11 +85,30 @@
 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
 }
 
+/*
+ * In this specific implementation of the SDHCI controller, the power register
+ * needs to have a valid voltage set even when the power supply is managed by
+ * an external regulator.
+ */
+static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode,
+		     unsigned short vdd)
+{
+	if (!IS_ERR(host->mmc->supply.vmmc)) {
+		struct mmc_host *mmc = host->mmc;
+
+		spin_unlock_irq(&host->lock);
+		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+		spin_lock_irq(&host->lock);
+	}
+	sdhci_set_power_noreg(host, mode, vdd);
+}
+
 static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
 	.set_clock		= sdhci_at91_set_clock,
 	.set_bus_width		= sdhci_set_bus_width,
 	.reset			= sdhci_reset,
 	.set_uhs_signaling	= sdhci_set_uhs_signaling,
+	.set_power		= sdhci_at91_set_power,
 };
 
 static const struct sdhci_pltfm_data soc_data_sama5d2 = {
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 1d9e00a..b0b9ceb 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -412,6 +412,8 @@
 	if (mode == MMC_POWER_OFF)
 		return;
 
+	spin_unlock_irq(&host->lock);
+
 	/*
 	 * Bus power might not enable after D3 -> D0 transition due to the
 	 * present state not yet having propagated. Retry for up to 2ms.
@@ -424,6 +426,8 @@
 		reg |= SDHCI_POWER_ON;
 		sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
 	}
+
+	spin_lock_irq(&host->lock);
 }
 
 static const struct sdhci_ops sdhci_intel_byt_ops = {
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 3280f20..33b4fa6 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -23,6 +23,7 @@
 
 struct sdhci_pltfm_host {
 	struct clk *clk;
+	void *priv; /* to handle quirks across io-accessor calls */
 
 	/* migrate from sdhci_of_host */
 	unsigned int clock;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index ba637ff..e72d188 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -748,6 +748,17 @@
 	}
 }
 
+static void sdhci_set_blk_size_reg(struct sdhci_host *host, unsigned int blksz,
+				   unsigned int sdma_boundary)
+{
+	if (host->flags & SDHCI_USE_ADMA)
+		sdhci_writew(host, SDHCI_MAKE_BLKSZ(0, blksz),
+			     SDHCI_BLOCK_SIZE);
+	else
+		sdhci_writew(host, SDHCI_MAKE_BLKSZ(sdma_boundary, blksz),
+			     SDHCI_BLOCK_SIZE);
+}
+
 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
 {
 	u8 ctrl;
@@ -880,8 +891,7 @@
 	sdhci_set_transfer_irqs(host);
 
 	/* Set the DMA boundary value and block size */
-	sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
-		data->blksz), SDHCI_BLOCK_SIZE);
+	sdhci_set_blk_size_reg(host, data->blksz, SDHCI_DEFAULT_BOUNDARY_ARG);
 	sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
 }
 
@@ -1350,7 +1360,8 @@
 
 	host->mmc->actual_clock = 0;
 
-	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+	if (host->clock)
+		sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
 
 	if (clock == 0)
 		return;
@@ -1371,7 +1382,9 @@
 			return;
 		}
 		timeout--;
-		mdelay(1);
+		spin_unlock_irq(&host->lock);
+		usleep_range(900, 1100);
+		spin_lock_irq(&host->lock);
 	}
 
 	clk |= SDHCI_CLOCK_CARD_EN;
@@ -1579,22 +1592,16 @@
 		return;
 	}
 
-	/*
-	 * Reset the chip on each power off.
-	 * Should clear out any weird states.
-	 */
-	if (ios->power_mode == MMC_POWER_OFF) {
-		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
-		sdhci_reinit(host);
-	}
-
 	if (host->version >= SDHCI_SPEC_300 &&
 		(ios->power_mode == MMC_POWER_UP) &&
 		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
 		sdhci_enable_preset_value(host, false);
 
+	spin_lock_irqsave(&host->lock, flags);
 	if (!ios->clock || ios->clock != host->clock) {
+		spin_unlock_irqrestore(&host->lock, flags);
 		host->ops->set_clock(host, ios->clock);
+		spin_lock_irqsave(&host->lock, flags);
 		host->clock = ios->clock;
 
 		if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
@@ -1609,11 +1616,13 @@
 			host->mmc->max_busy_timeout /= host->timeout_clk;
 		}
 	}
+	spin_unlock_irqrestore(&host->lock, flags);
 
 	if (host->ops->set_power)
 		host->ops->set_power(host, ios->power_mode, ios->vdd);
 	else
-		sdhci_set_power(host, ios->power_mode, ios->vdd);
+		if (ios->power_mode & (MMC_POWER_UP | MMC_POWER_ON))
+			sdhci_set_power(host, ios->power_mode, ios->vdd);
 
 	if (host->ops->platform_send_init_74_clocks)
 		host->ops->platform_send_init_74_clocks(host, ios->power_mode);
@@ -1712,6 +1721,7 @@
 	} else
 		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
 
+	spin_unlock_irqrestore(&host->lock, flags);
 	/*
 	 * Some (ENE) controllers go apeshit on some ios operation,
 	 * signalling timeout and CRC errors even on CMD0. Resetting
@@ -1720,8 +1730,19 @@
 	if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
 		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
 
+	/*
+	 * Reset the chip on each power off.
+	 * Should clear out any weird states.
+	 */
+	if (ios->power_mode == MMC_POWER_OFF) {
+		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
+		sdhci_reinit(host);
+		sdhci_set_power(host, ios->power_mode, ios->vdd);
+	}
+	if (!ios->clock)
+		sdhci_set_clock(host, ios->clock);
+
 	mmiowb();
-	spin_unlock_irqrestore(&host->lock, flags);
 }
 
 static int sdhci_get_cd(struct mmc_host *mmc)
@@ -2054,14 +2075,11 @@
 		 */
 		if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
 			if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
-				sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
-					     SDHCI_BLOCK_SIZE);
+				sdhci_set_blk_size_reg(host, 128, 7);
 			else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
-				sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
-					     SDHCI_BLOCK_SIZE);
+				sdhci_set_blk_size_reg(host, 64, 7);
 		} else {
-			sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
-				     SDHCI_BLOCK_SIZE);
+			sdhci_set_blk_size_reg(host, 64, 7);
 		}
 
 		/*
@@ -2360,6 +2378,9 @@
 		sdhci_do_reset(host, SDHCI_RESET_DATA);
 
 		host->pending_reset = false;
+	} else {
+		if (host->quirks2 & SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT)
+			sdhci_reset(host, SDHCI_RESET_DATA);
 	}
 
 	if (!sdhci_has_requests(host))
@@ -2706,11 +2727,19 @@
 			result = IRQ_WAKE_THREAD;
 		}
 
-		if (intmask & SDHCI_INT_CMD_MASK)
+		if (intmask & SDHCI_INT_CMD_MASK) {
+			if ((host->quirks2 & SDHCI_QUIRK2_SLOW_INT_CLR) &&
+				(host->clock <= 400000))
+				udelay(40);
 			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
+		}
 
-		if (intmask & SDHCI_INT_DATA_MASK)
+		if (intmask & SDHCI_INT_DATA_MASK) {
+			if ((host->quirks2 & SDHCI_QUIRK2_SLOW_INT_CLR) &&
+			    (host->clock <= 400000))
+				udelay(40);
 			sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
+		}
 
 		if (intmask & SDHCI_INT_BUS_POWER)
 			pr_err("%s: Card is consuming too much power!\n",
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 2570455..b56b3c1 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -425,6 +425,17 @@
 #define SDHCI_QUIRK2_ACMD23_BROKEN			(1<<14)
 /* Broken Clock divider zero in controller */
 #define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN		(1<<15)
+/*
+ * Read Transfer Active/ Write Transfer Active may be not
+ * de-asserted after end of transaction. Issue reset for DAT line.
+ */
+#define SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT                 (1<<17)
+/*
+ * Slow interrupt clearance at 400KHz may cause
+ * host controller driver interrupt handler to
+ * be called twice.
+*/
+#define SDHCI_QUIRK2_SLOW_INT_CLR			(1<<18)
 
 	int irq;		/* Device IRQ */
 	void __iomem *ioaddr;	/* Mapped address */
@@ -558,7 +569,10 @@
 	void	(*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
 	void	(*hw_reset)(struct sdhci_host *host);
 	void    (*adma_workaround)(struct sdhci_host *host, u32 intmask);
+	unsigned int	(*get_max_segments)(void);
 	void    (*card_event)(struct sdhci_host *host);
+	void	(*toggle_cdr)(struct sdhci_host *host, bool enable);
+	void	(*check_power_status)(struct sdhci_host *host);
 	void	(*voltage_switch)(struct sdhci_host *host);
 	int	(*select_drive_strength)(struct sdhci_host *host,
 					 struct mmc_card *card,
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
index d2c386f..1d84335 100644
--- a/drivers/mmc/host/ushc.c
+++ b/drivers/mmc/host/ushc.c
@@ -426,6 +426,9 @@
 	struct ushc_data *ushc;
 	int ret;
 
+	if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);
 	if (mmc == NULL)
 		return -ENOMEM;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index bbef959..1592e1c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -917,8 +917,8 @@
 #define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH	1
 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX	1
 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH	1
-#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX	2
-#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH	1
+#define RX_PACKET_ATTRIBUTES_LAST_INDEX		2
+#define RX_PACKET_ATTRIBUTES_LAST_WIDTH		1
 #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX	3
 #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH	1
 #define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX	4
@@ -927,6 +927,8 @@
 #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH	1
 #define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX	6
 #define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH	1
+#define RX_PACKET_ATTRIBUTES_FIRST_INDEX	7
+#define RX_PACKET_ATTRIBUTES_FIRST_WIDTH	1
 
 #define RX_NORMAL_DESC0_OVT_INDEX		0
 #define RX_NORMAL_DESC0_OVT_WIDTH		16
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 1babcc1..ca106d4 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1721,10 +1721,15 @@
 
 	/* Get the header length */
 	if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
+		XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+			       FIRST, 1);
 		rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
 						      RX_NORMAL_DESC2, HL);
 		if (rdata->rx.hdr_len)
 			pdata->ext_stats.rx_split_header_packets++;
+	} else {
+		XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+			       FIRST, 0);
 	}
 
 	/* Get the RSS hash */
@@ -1747,19 +1752,16 @@
 		}
 	}
 
-	/* Get the packet length */
-	rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
-
-	if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
-		/* Not all the data has been transferred for this packet */
-		XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
-			       INCOMPLETE, 1);
+	/* Not all the data has been transferred for this packet */
+	if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
 		return 0;
-	}
 
 	/* This is the last of the data for this packet */
 	XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
-		       INCOMPLETE, 0);
+		       LAST, 1);
+
+	/* Get the packet length */
+	rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
 
 	/* Set checksum done indicator as appropriate */
 	if (netdev->features & NETIF_F_RXCSUM)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 7f9216d..0f0f3014 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1752,13 +1752,12 @@
 {
 	struct sk_buff *skb;
 	u8 *packet;
-	unsigned int copy_len;
 
 	skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
 	if (!skb)
 		return NULL;
 
-	/* Start with the header buffer which may contain just the header
+	/* Pull in the header buffer which may contain just the header
 	 * or the header plus data
 	 */
 	dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
@@ -1767,30 +1766,49 @@
 
 	packet = page_address(rdata->rx.hdr.pa.pages) +
 		 rdata->rx.hdr.pa.pages_offset;
-	copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len;
-	copy_len = min(rdata->rx.hdr.dma_len, copy_len);
-	skb_copy_to_linear_data(skb, packet, copy_len);
-	skb_put(skb, copy_len);
-
-	len -= copy_len;
-	if (len) {
-		/* Add the remaining data as a frag */
-		dma_sync_single_range_for_cpu(pdata->dev,
-					      rdata->rx.buf.dma_base,
-					      rdata->rx.buf.dma_off,
-					      rdata->rx.buf.dma_len,
-					      DMA_FROM_DEVICE);
-
-		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
-				rdata->rx.buf.pa.pages,
-				rdata->rx.buf.pa.pages_offset,
-				len, rdata->rx.buf.dma_len);
-		rdata->rx.buf.pa.pages = NULL;
-	}
+	skb_copy_to_linear_data(skb, packet, len);
+	skb_put(skb, len);
 
 	return skb;
 }
 
+static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
+				     struct xgbe_packet_data *packet)
+{
+	/* Always zero if not the first descriptor */
+	if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
+		return 0;
+
+	/* First descriptor with split header, return header length */
+	if (rdata->rx.hdr_len)
+		return rdata->rx.hdr_len;
+
+	/* First descriptor but not the last descriptor and no split header,
+	 * so the full buffer was used
+	 */
+	if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
+		return rdata->rx.hdr.dma_len;
+
+	/* First descriptor and last descriptor and no split header, so
+	 * calculate how much of the buffer was used
+	 */
+	return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
+}
+
+static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
+				     struct xgbe_packet_data *packet,
+				     unsigned int len)
+{
+	/* Always the full buffer if not the last descriptor */
+	if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
+		return rdata->rx.buf.dma_len;
+
+	/* Last descriptor so calculate how much of the buffer was used
+	 * for the last bit of data
+	 */
+	return rdata->rx.len - len;
+}
+
 static int xgbe_tx_poll(struct xgbe_channel *channel)
 {
 	struct xgbe_prv_data *pdata = channel->pdata;
@@ -1873,8 +1891,8 @@
 	struct napi_struct *napi;
 	struct sk_buff *skb;
 	struct skb_shared_hwtstamps *hwtstamps;
-	unsigned int incomplete, error, context_next, context;
-	unsigned int len, rdesc_len, max_len;
+	unsigned int last, error, context_next, context;
+	unsigned int len, buf1_len, buf2_len, max_len;
 	unsigned int received = 0;
 	int packet_count = 0;
 
@@ -1884,7 +1902,7 @@
 	if (!ring)
 		return 0;
 
-	incomplete = 0;
+	last = 0;
 	context_next = 0;
 
 	napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
@@ -1918,9 +1936,8 @@
 		received++;
 		ring->cur++;
 
-		incomplete = XGMAC_GET_BITS(packet->attributes,
-					    RX_PACKET_ATTRIBUTES,
-					    INCOMPLETE);
+		last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+				      LAST);
 		context_next = XGMAC_GET_BITS(packet->attributes,
 					      RX_PACKET_ATTRIBUTES,
 					      CONTEXT_NEXT);
@@ -1929,7 +1946,7 @@
 					 CONTEXT);
 
 		/* Earlier error, just drain the remaining data */
-		if ((incomplete || context_next) && error)
+		if ((!last || context_next) && error)
 			goto read_again;
 
 		if (error || packet->errors) {
@@ -1941,16 +1958,22 @@
 		}
 
 		if (!context) {
-			/* Length is cumulative, get this descriptor's length */
-			rdesc_len = rdata->rx.len - len;
-			len += rdesc_len;
+			/* Get the data length in the descriptor buffers */
+			buf1_len = xgbe_rx_buf1_len(rdata, packet);
+			len += buf1_len;
+			buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
+			len += buf2_len;
 
-			if (rdesc_len && !skb) {
+			if (!skb) {
 				skb = xgbe_create_skb(pdata, napi, rdata,
-						      rdesc_len);
-				if (!skb)
+						      buf1_len);
+				if (!skb) {
 					error = 1;
-			} else if (rdesc_len) {
+					goto skip_data;
+				}
+			}
+
+			if (buf2_len) {
 				dma_sync_single_range_for_cpu(pdata->dev,
 							rdata->rx.buf.dma_base,
 							rdata->rx.buf.dma_off,
@@ -1960,13 +1983,14 @@
 				skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 						rdata->rx.buf.pa.pages,
 						rdata->rx.buf.pa.pages_offset,
-						rdesc_len,
+						buf2_len,
 						rdata->rx.buf.dma_len);
 				rdata->rx.buf.pa.pages = NULL;
 			}
 		}
 
-		if (incomplete || context_next)
+skip_data:
+		if (!last || context_next)
 			goto read_again;
 
 		if (!skb)
@@ -2024,7 +2048,7 @@
 	}
 
 	/* Check if we need to save state before leaving */
-	if (received && (incomplete || context_next)) {
+	if (received && (!last || context_next)) {
 		rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
 		rdata->state_saved = 1;
 		rdata->state.skb = skb;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index a4e60e5..0975af2 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -3402,7 +3402,8 @@
 
 	bcmgenet_netif_stop(dev);
 
-	phy_suspend(priv->phydev);
+	if (!device_may_wakeup(d))
+		phy_suspend(priv->phydev);
 
 	netif_device_detach(dev);
 
@@ -3499,7 +3500,8 @@
 
 	netif_device_attach(dev);
 
-	phy_resume(priv->phydev);
+	if (!device_may_wakeup(d))
+		phy_resume(priv->phydev);
 
 	if (priv->eee.eee_enabled)
 		bcmgenet_eee_enable_set(dev, true);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index e876076..2f92819 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -220,20 +220,6 @@
 	udelay(60);
 }
 
-static void bcmgenet_internal_phy_setup(struct net_device *dev)
-{
-	struct bcmgenet_priv *priv = netdev_priv(dev);
-	u32 reg;
-
-	/* Power up PHY */
-	bcmgenet_phy_power_set(dev, true);
-	/* enable APD */
-	reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
-	reg |= EXT_PWR_DN_EN_LD;
-	bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
-	bcmgenet_mii_reset(dev);
-}
-
 static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
 {
 	u32 reg;
@@ -281,7 +267,6 @@
 
 		if (priv->internal_phy) {
 			phy_name = "internal PHY";
-			bcmgenet_internal_phy_setup(dev);
 		} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
 			phy_name = "MoCA";
 			bcmgenet_moca_phy_setup(priv);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index a36022b..03dca73 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1181,7 +1181,9 @@
 
 static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
 {
+	struct tcphdr *tcph;
 	int offset = 0;
+	int hdr_len;
 
 	/* only TCP packets will be aggregated */
 	if (skb->protocol == htons(ETH_P_IP)) {
@@ -1208,14 +1210,20 @@
 	/* if mss is not set through Large Packet bit/mss in rx buffer,
 	 * expect that the mss will be written to the tcp header checksum.
 	 */
+	tcph = (struct tcphdr *)(skb->data + offset);
 	if (lrg_pkt) {
 		skb_shinfo(skb)->gso_size = mss;
 	} else if (offset) {
-		struct tcphdr *tcph = (struct tcphdr *)(skb->data + offset);
-
 		skb_shinfo(skb)->gso_size = ntohs(tcph->check);
 		tcph->check = 0;
 	}
+
+	if (skb_shinfo(skb)->gso_size) {
+		hdr_len = offset + tcph->doff * 4;
+		skb_shinfo(skb)->gso_segs =
+				DIV_ROUND_UP(skb->len - hdr_len,
+					     skb_shinfo(skb)->gso_size);
+	}
 }
 
 static int ibmveth_poll(struct napi_struct *napi, int budget)
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 5b54254..2788a54 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -77,6 +77,10 @@
 	s32 ret_val = 0;
 	u16 phy_id;
 
+	/* ensure PHY page selection to fix misconfigured i210 */
+	if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
+		phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0);
+
 	ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
 	if (ret_val)
 		goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index bfe410e..3f51a44 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -367,6 +367,8 @@
 	case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
 	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
 	case MLX5_CMD_OP_QUERY_Q_COUNTER:
+	case MLX5_CMD_OP_SET_RATE_LIMIT:
+	case MLX5_CMD_OP_QUERY_RATE_LIMIT:
 	case MLX5_CMD_OP_ALLOC_PD:
 	case MLX5_CMD_OP_ALLOC_UAR:
 	case MLX5_CMD_OP_CONFIG_INT_MODERATION:
@@ -500,6 +502,8 @@
 	MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
 	MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
 	MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
+	MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
+	MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
 	MLX5_COMMAND_STR_CASE(ALLOC_PD);
 	MLX5_COMMAND_STR_CASE(DEALLOC_PD);
 	MLX5_COMMAND_STR_CASE(ALLOC_UAR);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index b306713..d4fa851 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -81,6 +81,7 @@
 static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
 {
 	priv->params.rq_wq_type = rq_type;
+	priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
 	switch (priv->params.rq_wq_type) {
 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
 		priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
@@ -92,6 +93,10 @@
 		break;
 	default: /* MLX5_WQ_TYPE_LINKED_LIST */
 		priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+
+		/* Extra room needed for build_skb */
+		priv->params.lro_wqe_sz -= MLX5_RX_HEADROOM +
+			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 	}
 	priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
 					       BIT(priv->params.log_rq_size));
@@ -3473,12 +3478,6 @@
 	mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
 				      MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
 
-	priv->params.lro_wqe_sz =
-		MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ -
-		/* Extra room needed for build_skb */
-		MLX5_RX_HEADROOM -
-		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
 	/* Initialize pflags */
 	MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
 			    priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
@@ -3936,6 +3935,19 @@
 	}
 }
 
+static void mlx5e_unregister_vport_rep(struct mlx5_core_dev *mdev)
+{
+	struct mlx5_eswitch *esw = mdev->priv.eswitch;
+	int total_vfs = MLX5_TOTAL_VPORTS(mdev);
+	int vport;
+
+	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+		return;
+
+	for (vport = 1; vport < total_vfs; vport++)
+		mlx5_eswitch_unregister_vport_rep(esw, vport);
+}
+
 void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
 {
 	struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3983,6 +3995,7 @@
 		return err;
 	}
 
+	mlx5e_register_vport_rep(mdev);
 	return 0;
 }
 
@@ -3994,6 +4007,7 @@
 	if (!netif_device_present(netdev))
 		return;
 
+	mlx5e_unregister_vport_rep(mdev);
 	mlx5e_detach_netdev(mdev, netdev);
 	mlx5e_destroy_mdev_resources(mdev);
 }
@@ -4012,8 +4026,6 @@
 	if (err)
 		return NULL;
 
-	mlx5e_register_vport_rep(mdev);
-
 	if (MLX5_CAP_GEN(mdev, vport_group_manager))
 		ppriv = &esw->offloads.vport_reps[0];
 
@@ -4065,13 +4077,7 @@
 
 static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
 {
-	struct mlx5_eswitch *esw = mdev->priv.eswitch;
-	int total_vfs = MLX5_TOTAL_VPORTS(mdev);
 	struct mlx5e_priv *priv = vpriv;
-	int vport;
-
-	for (vport = 1; vport < total_vfs; vport++)
-		mlx5_eswitch_unregister_vport_rep(esw, vport);
 
 	unregister_netdev(priv->netdev);
 	mlx5e_detach(mdev, vpriv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index e7b2158..7309ae3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -92,19 +92,18 @@
 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
 					struct mlx5e_cq *cq, u32 cqcc)
 {
-	u16 wqe_cnt_step;
-
 	cq->title.byte_cnt     = cq->mini_arr[cq->mini_arr_idx].byte_cnt;
 	cq->title.check_sum    = cq->mini_arr[cq->mini_arr_idx].checksum;
 	cq->title.op_own      &= 0xf0;
 	cq->title.op_own      |= 0x01 & (cqcc >> cq->wq.log_sz);
 	cq->title.wqe_counter  = cpu_to_be16(cq->decmprs_wqe_counter);
 
-	wqe_cnt_step =
-		rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
-		mpwrq_get_cqe_consumed_strides(&cq->title) : 1;
-	cq->decmprs_wqe_counter =
-		(cq->decmprs_wqe_counter + wqe_cnt_step) & rq->wq.sz_m1;
+	if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+		cq->decmprs_wqe_counter +=
+			mpwrq_get_cqe_consumed_strides(&cq->title);
+	else
+		cq->decmprs_wqe_counter =
+			(cq->decmprs_wqe_counter + 1) & rq->wq.sz_m1;
 }
 
 static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
@@ -603,6 +602,10 @@
 	if (lro_num_seg > 1) {
 		mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
 		skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
+		/* Subtract one since we already counted this as one
+		 * "regular" packet in mlx5e_complete_rx_cqe()
+		 */
+		rq->stats.packets += lro_num_seg - 1;
 		rq->stats.lro_packets++;
 		rq->stats.lro_bytes += cqe_bcnt;
 	}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index a543ea6..3fd471a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -427,14 +427,16 @@
 		}
 
 		if (is_tcf_vlan(a)) {
-			if (tcf_vlan_action(a) == VLAN_F_POP) {
+			if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
 				attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
-			} else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
+			} else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
 				if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
 					return -EOPNOTSUPP;
 
 				attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
 				attr->vlan = tcf_vlan_push_vid(a);
+			} else { /* action is TCA_VLAN_ACT_MODIFY */
+				return -EOPNOTSUPP;
 			}
 			continue;
 		}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index cfb6837..5743110 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -272,15 +272,18 @@
 			sq->stats.tso_bytes += skb->len - ihs;
 		}
 
+		sq->stats.packets += skb_shinfo(skb)->gso_segs;
 		num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
 	} else {
 		bf = sq->bf_budget &&
 		     !skb->xmit_more &&
 		     !skb_shinfo(skb)->nr_frags;
 		ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
+		sq->stats.packets++;
 		num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
 	}
 
+	sq->stats.bytes += num_bytes;
 	wi->num_bytes = num_bytes;
 
 	if (skb_vlan_tag_present(skb)) {
@@ -377,8 +380,6 @@
 	if (bf)
 		sq->bf_budget--;
 
-	sq->stats.packets++;
-	sq->stats.bytes += num_bytes;
 	return NETDEV_TX_OK;
 
 dma_unmap_wqe_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 0c9ef87..7a196a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -87,7 +87,7 @@
 	[2] = {
 		.mask		= MLX5_PROF_MASK_QP_SIZE |
 				  MLX5_PROF_MASK_MR_CACHE,
-		.log_max_qp	= 17,
+		.log_max_qp	= 18,
 		.mr_cache[0]	= {
 			.size	= 500,
 			.limit	= 250
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index e83072d..6905630 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -500,30 +500,40 @@
 mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
 			   struct mlxsw_sp_prefix_usage *req_prefix_usage)
 {
-	struct mlxsw_sp_lpm_tree *lpm_tree;
+	struct mlxsw_sp_lpm_tree *lpm_tree = vr->lpm_tree;
+	struct mlxsw_sp_lpm_tree *new_tree;
+	int err;
 
-	if (mlxsw_sp_prefix_usage_eq(req_prefix_usage,
-				     &vr->lpm_tree->prefix_usage))
+	if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage))
 		return 0;
 
-	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
+	new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
 					 vr->proto, false);
-	if (IS_ERR(lpm_tree)) {
+	if (IS_ERR(new_tree)) {
 		/* We failed to get a tree according to the required
 		 * prefix usage. However, the current tree might be still good
 		 * for us if our requirement is subset of the prefixes used
 		 * in the tree.
 		 */
 		if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
-						 &vr->lpm_tree->prefix_usage))
+						 &lpm_tree->prefix_usage))
 			return 0;
-		return PTR_ERR(lpm_tree);
+		return PTR_ERR(new_tree);
 	}
 
-	mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
-	mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
+	/* Prevent packet loss by overwriting existing binding */
+	vr->lpm_tree = new_tree;
+	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
+	if (err)
+		goto err_tree_bind;
+	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+
+	return 0;
+
+err_tree_bind:
 	vr->lpm_tree = lpm_tree;
-	return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
+	mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
+	return err;
 }
 
 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 8b4822a..3c1f89a 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1039,16 +1039,22 @@
 {
 	struct geneve_dev *geneve = netdev_priv(dev);
 	struct ip_tunnel_info *info = NULL;
+	int err;
 
 	if (geneve->collect_md)
 		info = skb_tunnel_info(skb);
 
+	rcu_read_lock();
 #if IS_ENABLED(CONFIG_IPV6)
 	if ((info && ip_tunnel_info_af(info) == AF_INET6) ||
 	    (!info && geneve->remote.sa.sa_family == AF_INET6))
-		return geneve6_xmit_skb(skb, dev, info);
+		err = geneve6_xmit_skb(skb, dev, info);
+	else
 #endif
-	return geneve_xmit_skb(skb, dev, info);
+		err = geneve_xmit_skb(skb, dev, info);
+	rcu_read_unlock();
+
+	return err;
 }
 
 static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index f424b86..201ffa5 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -611,14 +611,18 @@
  * phy_trigger_machine - trigger the state machine to run
  *
  * @phydev: the phy_device struct
+ * @sync: indicate whether we should wait for the workqueue cancelation
  *
  * Description: There has been a change in state which requires that the
  *   state machine runs.
  */
 
-static void phy_trigger_machine(struct phy_device *phydev)
+static void phy_trigger_machine(struct phy_device *phydev, bool sync)
 {
-	cancel_delayed_work_sync(&phydev->state_queue);
+	if (sync)
+		cancel_delayed_work_sync(&phydev->state_queue);
+	else
+		cancel_delayed_work(&phydev->state_queue);
 	queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
 }
 
@@ -655,7 +659,7 @@
 	phydev->state = PHY_HALTED;
 	mutex_unlock(&phydev->lock);
 
-	phy_trigger_machine(phydev);
+	phy_trigger_machine(phydev, false);
 }
 
 /**
@@ -817,7 +821,7 @@
 	}
 
 	/* reschedule state queue work to run as soon as possible */
-	phy_trigger_machine(phydev);
+	phy_trigger_machine(phydev, true);
 	return;
 
 ignore:
@@ -907,7 +911,7 @@
 	if (do_resume)
 		phy_resume(phydev);
 
-	phy_trigger_machine(phydev);
+	phy_trigger_machine(phydev, true);
 }
 EXPORT_SYMBOL(phy_start);
 
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e686b70..4b7a363 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -819,7 +819,18 @@
 /* Net device open. */
 static int tun_net_open(struct net_device *dev)
 {
+	struct tun_struct *tun = netdev_priv(dev);
+	int i;
+
 	netif_tx_start_all_queues(dev);
+
+	for (i = 0; i < tun->numqueues; i++) {
+		struct tun_file *tfile;
+
+		tfile = rtnl_dereference(tun->tfiles[i]);
+		tfile->socket.sk->sk_write_space(tfile->socket.sk);
+	}
+
 	return 0;
 }
 
@@ -1116,9 +1127,10 @@
 	if (!skb_array_empty(&tfile->tx_array))
 		mask |= POLLIN | POLLRDNORM;
 
-	if (sock_writeable(sk) ||
-	    (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
-	     sock_writeable(sk)))
+	if (tun->dev->flags & IFF_UP &&
+	    (sock_writeable(sk) ||
+	     (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
+	      sock_writeable(sk))))
 		mask |= POLLOUT | POLLWRNORM;
 
 	if (tun->dev->reg_state != NETREG_REGISTERED)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 24d5272..0d519a9 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -924,6 +924,8 @@
 	{QMI_FIXED_INTF(0x413c, 0x81a9, 8)},	/* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
 	{QMI_FIXED_INTF(0x413c, 0x81b1, 8)},	/* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
 	{QMI_FIXED_INTF(0x413c, 0x81b3, 8)},	/* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
+	{QMI_FIXED_INTF(0x413c, 0x81b6, 8)},	/* Dell Wireless 5811e */
+	{QMI_FIXED_INTF(0x413c, 0x81b6, 10)},	/* Dell Wireless 5811e */
 	{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},	/* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
 	{QMI_FIXED_INTF(0x22de, 0x9061, 3)},	/* WeTelecom WPD-600N */
 	{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)},	/* SIMCom 7230E */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 95cf1d8..a2afb8e 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -346,6 +346,7 @@
 
 static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
 {
+	int len = skb->len;
 	netdev_tx_t ret = is_ip_tx_frame(skb, dev);
 
 	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
@@ -353,7 +354,7 @@
 
 		u64_stats_update_begin(&dstats->syncp);
 		dstats->tx_pkts++;
-		dstats->tx_bytes += skb->len;
+		dstats->tx_bytes += len;
 		u64_stats_update_end(&dstats->syncp);
 	} else {
 		this_cpu_inc(dev->dstats->tx_drps);
@@ -466,8 +467,10 @@
 	}
 
 	if (rt6_local) {
-		if (rt6_local->rt6i_idev)
+		if (rt6_local->rt6i_idev) {
 			in6_dev_put(rt6_local->rt6i_idev);
+			rt6_local->rt6i_idev = NULL;
+		}
 
 		dst = &rt6_local->dst;
 		dev_put(dst->dev);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index d4f495b..3c4c2cf 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1942,7 +1942,6 @@
 	const struct iphdr *old_iph;
 	union vxlan_addr *dst;
 	union vxlan_addr remote_ip, local_ip;
-	union vxlan_addr *src;
 	struct vxlan_metadata _md;
 	struct vxlan_metadata *md = &_md;
 	__be16 src_port = 0, dst_port;
@@ -1956,11 +1955,12 @@
 
 	info = skb_tunnel_info(skb);
 
+	rcu_read_lock();
 	if (rdst) {
 		dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
 		vni = rdst->remote_vni;
 		dst = &rdst->remote_ip;
-		src = &vxlan->cfg.saddr;
+		local_ip = vxlan->cfg.saddr;
 		dst_cache = &rdst->dst_cache;
 	} else {
 		if (!info) {
@@ -1979,7 +1979,6 @@
 			local_ip.sin6.sin6_addr = info->key.u.ipv6.src;
 		}
 		dst = &remote_ip;
-		src = &local_ip;
 		dst_cache = &info->dst_cache;
 	}
 
@@ -1987,7 +1986,7 @@
 		if (did_rsc) {
 			/* short-circuited back to local bridge */
 			vxlan_encap_bypass(skb, vxlan, vxlan);
-			return;
+			goto out_unlock;
 		}
 		goto drop;
 	}
@@ -2028,7 +2027,7 @@
 		rt = vxlan_get_route(vxlan, skb,
 				     rdst ? rdst->remote_ifindex : 0, tos,
 				     dst->sin.sin_addr.s_addr,
-				     &src->sin.sin_addr.s_addr,
+				     &local_ip.sin.sin_addr.s_addr,
 				     dst_cache, info);
 		if (IS_ERR(rt)) {
 			netdev_dbg(dev, "no route to %pI4\n",
@@ -2056,7 +2055,7 @@
 			if (!dst_vxlan)
 				goto tx_error;
 			vxlan_encap_bypass(skb, vxlan, dst_vxlan);
-			return;
+			goto out_unlock;
 		}
 
 		if (!info)
@@ -2071,7 +2070,7 @@
 		if (err < 0)
 			goto xmit_tx_error;
 
-		udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr,
+		udp_tunnel_xmit_skb(rt, sk, skb, local_ip.sin.sin_addr.s_addr,
 				    dst->sin.sin_addr.s_addr, tos, ttl, df,
 				    src_port, dst_port, xnet, !udp_sum);
 #if IS_ENABLED(CONFIG_IPV6)
@@ -2087,7 +2086,7 @@
 		ndst = vxlan6_get_route(vxlan, skb,
 					rdst ? rdst->remote_ifindex : 0, tos,
 					label, &dst->sin6.sin6_addr,
-					&src->sin6.sin6_addr,
+					&local_ip.sin6.sin6_addr,
 					dst_cache, info);
 		if (IS_ERR(ndst)) {
 			netdev_dbg(dev, "no route to %pI6\n",
@@ -2117,7 +2116,7 @@
 			if (!dst_vxlan)
 				goto tx_error;
 			vxlan_encap_bypass(skb, vxlan, dst_vxlan);
-			return;
+			goto out_unlock;
 		}
 
 		if (!info)
@@ -2131,15 +2130,16 @@
 		if (err < 0) {
 			dst_release(ndst);
 			dev->stats.tx_errors++;
-			return;
+			goto out_unlock;
 		}
 		udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
-				     &src->sin6.sin6_addr,
+				     &local_ip.sin6.sin6_addr,
 				     &dst->sin6.sin6_addr, tos, ttl,
 				     label, src_port, dst_port, !udp_sum);
 #endif
 	}
-
+out_unlock:
+	rcu_read_unlock();
 	return;
 
 drop:
@@ -2155,6 +2155,7 @@
 	dev->stats.tx_errors++;
 tx_free:
 	dev_kfree_skb(skb);
+	rcu_read_unlock();
 }
 
 /* Transmit local packets over Vxlan
@@ -2637,7 +2638,7 @@
 
 	if (data[IFLA_VXLAN_ID]) {
 		__u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
-		if (id >= VXLAN_VID_MASK)
+		if (id >= VXLAN_N_VID)
 			return -ERANGE;
 	}
 
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index f00d429..91594de 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -25,3 +25,5 @@
 obj-$(CONFIG_USB_NET_RNDIS_WLAN)	+= rndis_wlan.o
 
 obj-$(CONFIG_MAC80211_HWSIM)	+= mac80211_hwsim.o
+
+obj-$(CONFIG_WCNSS_MEM_PRE_ALLOC) += cnss_prealloc/
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 133f6b5..2c48419 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -887,6 +887,10 @@
 	if (wil->hw_version == HW_VER_UNKNOWN)
 		return -ENODEV;
 
+	wil_dbg_misc(wil, "Prevent DS in BL & mark FW to set T_POWER_ON=0\n");
+	wil_s(wil, RGF_USER_USAGE_8, BIT_USER_PREVENT_DEEP_SLEEP |
+	      BIT_USER_SUPPORT_T_POWER_ON_0);
+
 	if (wil->platform_ops.notify) {
 		rc = wil->platform_ops.notify(wil->platform_handle,
 					      WIL_PLATFORM_EVT_PRE_RESET);
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index bfffc0e..4bccef3 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -140,6 +140,9 @@
 #define RGF_USER_USAGE_1		(0x880004)
 #define RGF_USER_USAGE_6		(0x880018)
 	#define BIT_USER_OOB_MODE		BIT(31)
+#define RGF_USER_USAGE_8		(0x880020)
+	#define BIT_USER_PREVENT_DEEP_SLEEP	BIT(0)
+	#define BIT_USER_SUPPORT_T_POWER_ON_0	BIT(1)
 #define RGF_USER_HW_MACHINE_STATE	(0x8801dc)
 	#define HW_MACHINE_BOOT_DONE	(0x3fffffd)
 #define RGF_USER_USER_CPU_0		(0x8801e0)
diff --git a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
index 636f466..61de231 100644
--- a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
+++ b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012,2014-2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012,2014-2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -207,6 +207,8 @@
 		print_stack_trace(&wcnss_allocs[i].trace, 1);
 	}
 }
+#else
+void wcnss_prealloc_check_memory_leak(void) {}
 #endif
 
 int wcnss_pre_alloc_reset(void)
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 3c3c4f1..7a310c4 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -2700,6 +2700,21 @@
 	schedule_work(&pcie_work);
 }
 
+static void mwifiex_pcie_free_buffers(struct mwifiex_adapter *adapter)
+{
+	struct pcie_service_card *card = adapter->card;
+	const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
+	if (reg->sleep_cookie)
+		mwifiex_pcie_delete_sleep_cookie_buf(adapter);
+
+	mwifiex_pcie_delete_cmdrsp_buf(adapter);
+	mwifiex_pcie_delete_evtbd_ring(adapter);
+	mwifiex_pcie_delete_rxbd_ring(adapter);
+	mwifiex_pcie_delete_txbd_ring(adapter);
+	card->cmdrsp_buf = NULL;
+}
+
 /*
  * This function initializes the PCI-E host memory space, WCB rings, etc.
  *
@@ -2812,13 +2827,6 @@
 
 /*
  * This function cleans up the allocated card buffers.
- *
- * The following are freed by this function -
- *      - TXBD ring buffers
- *      - RXBD ring buffers
- *      - Event BD ring buffers
- *      - Command response ring buffer
- *      - Sleep cookie buffer
  */
 static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
 {
@@ -2834,6 +2842,8 @@
 				    "Failed to write driver not-ready signature\n");
 	}
 
+	mwifiex_pcie_free_buffers(adapter);
+
 	if (pdev) {
 		pci_iounmap(pdev, card->pci_mmap);
 		pci_iounmap(pdev, card->pci_mmap1);
@@ -3080,10 +3090,7 @@
 	pci_iounmap(pdev, card->pci_mmap1);
 }
 
-/* This function cleans up the PCI-E host memory space.
- * Some code is extracted from mwifiex_unregister_dev()
- *
- */
+/* This function cleans up the PCI-E host memory space. */
 static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
 {
 	struct pcie_service_card *card = adapter->card;
@@ -3095,16 +3102,8 @@
 	adapter->seq_num = 0;
 	adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
 
-	if (card) {
-		if (reg->sleep_cookie)
-			mwifiex_pcie_delete_sleep_cookie_buf(adapter);
-
-		mwifiex_pcie_delete_cmdrsp_buf(adapter);
-		mwifiex_pcie_delete_evtbd_ring(adapter);
-		mwifiex_pcie_delete_rxbd_ring(adapter);
-		mwifiex_pcie_delete_txbd_ring(adapter);
-		card->cmdrsp_buf = NULL;
-	}
+	if (card)
+		mwifiex_pcie_free_buffers(adapter);
 
 	return;
 }
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 7810bad..52a297d 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -118,4 +118,9 @@
 config OF_NUMA
 	bool
 
+config OF_BATTERYDATA
+	def_bool y
+	help
+	  OpenFirmware BatteryData accessors
+
 endif # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 4b8dabe..b2f474a 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -15,5 +15,6 @@
 obj-$(CONFIG_OF_OVERLAY) += overlay.o
 obj-$(CONFIG_OF_SLIMBUS)        += of_slimbus.o
 obj-$(CONFIG_OF_NUMA) += of_numa.o
+obj-$(CONFIG_OF_BATTERYDATA) += of_batterydata.o
 
 obj-$(CONFIG_OF_UNITTEST) += unittest-data/
diff --git a/drivers/of/of_batterydata.c b/drivers/of/of_batterydata.c
new file mode 100644
index 0000000..43417b2
--- /dev/null
+++ b/drivers/of/of_batterydata.c
@@ -0,0 +1,457 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/batterydata-lib.h>
+#include <linux/power_supply.h>
+
+static int of_batterydata_read_lut(const struct device_node *np,
+			int max_cols, int max_rows, int *ncols, int *nrows,
+			int *col_legend_data, int *row_legend_data,
+			int *lut_data)
+{
+	struct property *prop;
+	const __be32 *data;
+	int cols, rows, size, i, j, *out_values;
+
+	prop = of_find_property(np, "qcom,lut-col-legend", NULL);
+	if (!prop) {
+		pr_err("%s: No col legend found\n", np->name);
+		return -EINVAL;
+	} else if (!prop->value) {
+		pr_err("%s: No col legend value found, np->name\n", np->name);
+		return -ENODATA;
+	} else if (prop->length > max_cols * sizeof(int)) {
+		pr_err("%s: Too many columns\n", np->name);
+		return -EINVAL;
+	}
+
+	cols = prop->length/sizeof(int);
+	*ncols = cols;
+	data = prop->value;
+	for (i = 0; i < cols; i++)
+		*col_legend_data++ = be32_to_cpup(data++);
+
+	rows = 0;
+
+	prop = of_find_property(np, "qcom,lut-row-legend", NULL);
+	if (!prop || row_legend_data == NULL) {
+		/* single row lut */
+		rows = 1;
+	} else if (!prop->value) {
+		pr_err("%s: No row legend value found\n", np->name);
+		return -ENODATA;
+	} else if (prop->length > max_rows * sizeof(int)) {
+		pr_err("%s: Too many rows\n", np->name);
+		return -EINVAL;
+	}
+
+	if (rows != 1) {
+		rows = prop->length/sizeof(int);
+		*nrows = rows;
+		data = prop->value;
+		for (i = 0; i < rows; i++)
+			*row_legend_data++ = be32_to_cpup(data++);
+	}
+
+	prop = of_find_property(np, "qcom,lut-data", NULL);
+	if (!prop) {
+		pr_err("prop 'qcom,lut-data' not found\n");
+		return -EINVAL;
+	}
+	data = prop->value;
+	size = prop->length/sizeof(int);
+	if (size != cols * rows) {
+		pr_err("%s: data size mismatch, %dx%d != %d\n",
+				np->name, cols, rows, size);
+		return -EINVAL;
+	}
+	for (i = 0; i < rows; i++) {
+		out_values = lut_data + (max_cols * i);
+		for (j = 0; j < cols; j++) {
+			*out_values++ = be32_to_cpup(data++);
+			pr_debug("Value = %d\n", *(out_values-1));
+		}
+	}
+
+	return 0;
+}
+
+static int of_batterydata_read_sf_lut(struct device_node *data_node,
+				const char *name, struct sf_lut *lut)
+{
+	struct device_node *node = of_find_node_by_name(data_node, name);
+	int rc;
+
+	if (!lut) {
+		pr_debug("No lut provided, skipping\n");
+		return 0;
+	} else if (!node) {
+		pr_err("Couldn't find %s node.\n", name);
+		return -EINVAL;
+	}
+
+	rc = of_batterydata_read_lut(node, PC_CC_COLS, PC_CC_ROWS,
+			&lut->cols, &lut->rows, lut->row_entries,
+			lut->percent, *lut->sf);
+	if (rc) {
+		pr_err("Failed to read %s node.\n", name);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int of_batterydata_read_pc_temp_ocv_lut(struct device_node *data_node,
+				const char *name, struct pc_temp_ocv_lut *lut)
+{
+	struct device_node *node = of_find_node_by_name(data_node, name);
+	int rc;
+
+	if (!lut) {
+		pr_debug("No lut provided, skipping\n");
+		return 0;
+	} else if (!node) {
+		pr_err("Couldn't find %s node.\n", name);
+		return -EINVAL;
+	}
+	rc = of_batterydata_read_lut(node, PC_TEMP_COLS, PC_TEMP_ROWS,
+			&lut->cols, &lut->rows, lut->temp, lut->percent,
+			*lut->ocv);
+	if (rc) {
+		pr_err("Failed to read %s node.\n", name);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int of_batterydata_read_ibat_temp_acc_lut(struct device_node *data_node,
+			const char *name, struct ibat_temp_acc_lut *lut)
+{
+	struct device_node *node = of_find_node_by_name(data_node, name);
+	int rc;
+
+	if (!lut) {
+		pr_debug("No lut provided, skipping\n");
+		return 0;
+	} else if (!node) {
+		pr_debug("Couldn't find %s node.\n", name);
+		return 0;
+	}
+	rc = of_batterydata_read_lut(node, ACC_TEMP_COLS, ACC_IBAT_ROWS,
+			&lut->cols, &lut->rows, lut->temp, lut->ibat,
+			*lut->acc);
+	if (rc) {
+		pr_err("Failed to read %s node.\n", name);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int of_batterydata_read_single_row_lut(struct device_node *data_node,
+				const char *name, struct single_row_lut *lut)
+{
+	struct device_node *node = of_find_node_by_name(data_node, name);
+	int rc;
+
+	if (!lut) {
+		pr_debug("No lut provided, skipping\n");
+		return 0;
+	} else if (!node) {
+		pr_err("Couldn't find %s node.\n", name);
+		return -EINVAL;
+	}
+
+	rc = of_batterydata_read_lut(node, MAX_SINGLE_LUT_COLS, 1,
+			&lut->cols, NULL, lut->x, NULL, lut->y);
+	if (rc) {
+		pr_err("Failed to read %s node.\n", name);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int of_batterydata_read_batt_id_kohm(const struct device_node *np,
+				const char *propname, struct batt_ids *batt_ids)
+{
+	struct property *prop;
+	const __be32 *data;
+	int num, i, *id_kohm = batt_ids->kohm;
+
+	prop = of_find_property(np, "qcom,batt-id-kohm", NULL);
+	if (!prop) {
+		pr_err("%s: No battery id resistor found\n", np->name);
+		return -EINVAL;
+	} else if (!prop->value) {
+		pr_err("%s: No battery id resistor value found, np->name\n",
+						np->name);
+		return -ENODATA;
+	} else if (prop->length > MAX_BATT_ID_NUM * sizeof(__be32)) {
+		pr_err("%s: Too many battery id resistors\n", np->name);
+		return -EINVAL;
+	}
+
+	num = prop->length/sizeof(__be32);
+	batt_ids->num = num;
+	data = prop->value;
+	for (i = 0; i < num; i++)
+		*id_kohm++ = be32_to_cpup(data++);
+
+	return 0;
+}
+
+#define OF_PROP_READ(property, qpnp_dt_property, node, rc, optional)	\
+do {									\
+	if (rc)								\
+		break;							\
+	rc = of_property_read_u32(node, "qcom," qpnp_dt_property,	\
+					&property);			\
+									\
+	if ((rc == -EINVAL) && optional) {				\
+		property = -EINVAL;					\
+		rc = 0;							\
+	} else if (rc) {						\
+		pr_err("Error reading " #qpnp_dt_property		\
+				" property rc = %d\n", rc);		\
+	}								\
+} while (0)
+
+static int of_batterydata_load_battery_data(struct device_node *node,
+				int best_id_kohm,
+				struct bms_battery_data *batt_data)
+{
+	int rc;
+
+	rc = of_batterydata_read_single_row_lut(node, "qcom,fcc-temp-lut",
+			batt_data->fcc_temp_lut);
+	if (rc)
+		return rc;
+
+	rc = of_batterydata_read_pc_temp_ocv_lut(node,
+			"qcom,pc-temp-ocv-lut",
+			batt_data->pc_temp_ocv_lut);
+	if (rc)
+		return rc;
+
+	rc = of_batterydata_read_sf_lut(node, "qcom,rbatt-sf-lut",
+			batt_data->rbatt_sf_lut);
+	if (rc)
+		return rc;
+
+	rc = of_batterydata_read_ibat_temp_acc_lut(node, "qcom,ibat-acc-lut",
+						batt_data->ibat_acc_lut);
+	if (rc)
+		return rc;
+
+	rc = of_property_read_string(node, "qcom,battery-type",
+					&batt_data->battery_type);
+	if (rc) {
+		pr_err("Error reading qcom,battery-type property rc=%d\n", rc);
+		batt_data->battery_type = NULL;
+		return rc;
+	}
+
+	OF_PROP_READ(batt_data->fcc, "fcc-mah", node, rc, false);
+	OF_PROP_READ(batt_data->default_rbatt_mohm,
+			"default-rbatt-mohm", node, rc, false);
+	OF_PROP_READ(batt_data->rbatt_capacitive_mohm,
+			"rbatt-capacitive-mohm", node, rc, false);
+	OF_PROP_READ(batt_data->flat_ocv_threshold_uv,
+			"flat-ocv-threshold-uv", node, rc, true);
+	OF_PROP_READ(batt_data->max_voltage_uv,
+			"max-voltage-uv", node, rc, true);
+	OF_PROP_READ(batt_data->cutoff_uv, "v-cutoff-uv", node, rc, true);
+	OF_PROP_READ(batt_data->iterm_ua, "chg-term-ua", node, rc, true);
+	OF_PROP_READ(batt_data->fastchg_current_ma,
+			"fastchg-current-ma", node, rc, true);
+	OF_PROP_READ(batt_data->fg_cc_cv_threshold_mv,
+			"fg-cc-cv-threshold-mv", node, rc, true);
+
+	batt_data->batt_id_kohm = best_id_kohm;
+
+	return rc;
+}
+
+static int64_t of_batterydata_convert_battery_id_kohm(int batt_id_uv,
+				int rpull_up, int vadc_vdd)
+{
+	int64_t resistor_value_kohm, denom;
+
+	if (batt_id_uv == 0) {
+		/* vadc not correct or batt id line grounded, report 0 kohms */
+		return 0;
+	}
+	/* calculate the battery id resistance reported via ADC */
+	denom = div64_s64(vadc_vdd * 1000000LL, batt_id_uv) - 1000000LL;
+
+	if (denom == 0) {
+		/* batt id connector might be open, return 0 kohms */
+		return 0;
+	}
+	resistor_value_kohm = div64_s64(rpull_up * 1000000LL + denom/2, denom);
+
+	pr_debug("batt id voltage = %d, resistor value = %lld\n",
+			batt_id_uv, resistor_value_kohm);
+
+	return resistor_value_kohm;
+}
+
+struct device_node *of_batterydata_get_best_profile(
+		const struct device_node *batterydata_container_node,
+		int batt_id_kohm, const char *batt_type)
+{
+	struct batt_ids batt_ids;
+	struct device_node *node, *best_node = NULL;
+	const char *battery_type = NULL;
+	int delta = 0, best_delta = 0, best_id_kohm = 0, id_range_pct,
+		i = 0, rc = 0, limit = 0;
+	bool in_range = false;
+
+	/* read battery id range percentage for best profile */
+	rc = of_property_read_u32(batterydata_container_node,
+			"qcom,batt-id-range-pct", &id_range_pct);
+
+	if (rc) {
+		if (rc == -EINVAL) {
+			id_range_pct = 0;
+		} else {
+			pr_err("failed to read battery id range\n");
+			return ERR_PTR(-ENXIO);
+		}
+	}
+
+	/*
+	 * Find the battery data with a battery id resistor closest to this one
+	 */
+	for_each_child_of_node(batterydata_container_node, node) {
+		if (batt_type != NULL) {
+			rc = of_property_read_string(node, "qcom,battery-type",
+							&battery_type);
+			if (!rc && strcmp(battery_type, batt_type) == 0) {
+				best_node = node;
+				best_id_kohm = batt_id_kohm;
+				break;
+			}
+		} else {
+			rc = of_batterydata_read_batt_id_kohm(node,
+							"qcom,batt-id-kohm",
+							&batt_ids);
+			if (rc)
+				continue;
+			for (i = 0; i < batt_ids.num; i++) {
+				delta = abs(batt_ids.kohm[i] - batt_id_kohm);
+				limit = (batt_ids.kohm[i] * id_range_pct) / 100;
+				in_range = (delta <= limit);
+				/*
+				 * Check if the delta is the lowest one
+				 * and also if the limits are in range
+				 * before selecting the best node.
+				 */
+				if ((delta < best_delta || !best_node)
+					&& in_range) {
+					best_node = node;
+					best_delta = delta;
+					best_id_kohm = batt_ids.kohm[i];
+				}
+			}
+		}
+	}
+
+	if (best_node == NULL) {
+		pr_err("No battery data found\n");
+		return best_node;
+	}
+
+	/* check that profile id is in range of the measured batt_id */
+	if (abs(best_id_kohm - batt_id_kohm) >
+			((best_id_kohm * id_range_pct) / 100)) {
+		pr_err("out of range: profile id %d batt id %d pct %d",
+			best_id_kohm, batt_id_kohm, id_range_pct);
+		return NULL;
+	}
+
+	rc = of_property_read_string(best_node, "qcom,battery-type",
+							&battery_type);
+	if (!rc)
+		pr_info("%s found\n", battery_type);
+	else
+		pr_info("%s found\n", best_node->name);
+
+	return best_node;
+}
+
+int of_batterydata_read_data(struct device_node *batterydata_container_node,
+				struct bms_battery_data *batt_data,
+				int batt_id_uv)
+{
+	struct device_node *node, *best_node;
+	struct batt_ids batt_ids;
+	const char *battery_type = NULL;
+	int delta, best_delta, batt_id_kohm, rpull_up_kohm,
+		vadc_vdd_uv, best_id_kohm, i, rc = 0;
+
+	node = batterydata_container_node;
+	OF_PROP_READ(rpull_up_kohm, "rpull-up-kohm", node, rc, false);
+	OF_PROP_READ(vadc_vdd_uv, "vref-batt-therm", node, rc, false);
+	if (rc)
+		return rc;
+
+	batt_id_kohm = of_batterydata_convert_battery_id_kohm(batt_id_uv,
+					rpull_up_kohm, vadc_vdd_uv);
+	best_node = NULL;
+	best_delta = 0;
+	best_id_kohm = 0;
+
+	/*
+	 * Find the battery data with a battery id resistor closest to this one
+	 */
+	for_each_child_of_node(batterydata_container_node, node) {
+		rc = of_batterydata_read_batt_id_kohm(node,
+						"qcom,batt-id-kohm",
+						&batt_ids);
+		if (rc)
+			continue;
+		for (i = 0; i < batt_ids.num; i++) {
+			delta = abs(batt_ids.kohm[i] - batt_id_kohm);
+			if (delta < best_delta || !best_node) {
+				best_node = node;
+				best_delta = delta;
+				best_id_kohm = batt_ids.kohm[i];
+			}
+		}
+	}
+
+	if (best_node == NULL) {
+		pr_err("No battery data found\n");
+		return -ENODATA;
+	}
+	rc = of_property_read_string(best_node, "qcom,battery-type",
+							&battery_type);
+	if (!rc)
+		pr_info("%s loaded\n", battery_type);
+	else
+		pr_info("%s loaded\n", best_node->name);
+
+	return of_batterydata_load_battery_data(best_node,
+					best_id_kohm, batt_data);
+}
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 3308427..4399de34 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -939,8 +939,10 @@
 	 * pardevice fields. -arca
 	 */
 	port->ops->init_state(par_dev, par_dev->state);
-	port->proc_device = par_dev;
-	parport_device_proc_register(par_dev);
+	if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
+		port->proc_device = par_dev;
+		parport_device_proc_register(par_dev);
+	}
 
 	return par_dev;
 
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index e30f05c..4722782 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -306,13 +306,6 @@
 			return rc;
 	}
 
-	pci_iov_set_numvfs(dev, nr_virtfn);
-	iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
-	pci_cfg_access_lock(dev);
-	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
-	msleep(100);
-	pci_cfg_access_unlock(dev);
-
 	iov->initial_VFs = initial;
 	if (nr_virtfn < initial)
 		initial = nr_virtfn;
@@ -323,6 +316,13 @@
 		goto err_pcibios;
 	}
 
+	pci_iov_set_numvfs(dev, nr_virtfn);
+	iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
+	pci_cfg_access_lock(dev);
+	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+	msleep(100);
+	pci_cfg_access_unlock(dev);
+
 	for (i = 0; i < initial; i++) {
 		rc = pci_iov_add_virtfn(dev, i, 0);
 		if (rc)
@@ -554,21 +554,61 @@
 }
 
 /**
- * pci_iov_resource_bar - get position of the SR-IOV BAR
+ * pci_iov_update_resource - update a VF BAR
  * @dev: the PCI device
  * @resno: the resource number
  *
- * Returns position of the BAR encapsulated in the SR-IOV capability.
+ * Update a VF BAR in the SR-IOV capability of a PF.
  */
-int pci_iov_resource_bar(struct pci_dev *dev, int resno)
+void pci_iov_update_resource(struct pci_dev *dev, int resno)
 {
-	if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
-		return 0;
+	struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
+	struct resource *res = dev->resource + resno;
+	int vf_bar = resno - PCI_IOV_RESOURCES;
+	struct pci_bus_region region;
+	u16 cmd;
+	u32 new;
+	int reg;
 
-	BUG_ON(!dev->is_physfn);
+	/*
+	 * The generic pci_restore_bars() path calls this for all devices,
+	 * including VFs and non-SR-IOV devices.  If this is not a PF, we
+	 * have nothing to do.
+	 */
+	if (!iov)
+		return;
 
-	return dev->sriov->pos + PCI_SRIOV_BAR +
-		4 * (resno - PCI_IOV_RESOURCES);
+	pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd);
+	if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) {
+		dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n",
+			 vf_bar, res);
+		return;
+	}
+
+	/*
+	 * Ignore unimplemented BARs, unused resource slots for 64-bit
+	 * BARs, and non-movable resources, e.g., those described via
+	 * Enhanced Allocation.
+	 */
+	if (!res->flags)
+		return;
+
+	if (res->flags & IORESOURCE_UNSET)
+		return;
+
+	if (res->flags & IORESOURCE_PCI_FIXED)
+		return;
+
+	pcibios_resource_to_bus(dev->bus, &region, res);
+	new = region.start;
+	new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
+
+	reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar;
+	pci_write_config_dword(dev, reg, new);
+	if (res->flags & IORESOURCE_MEM_64) {
+		new = region.start >> 16 >> 16;
+		pci_write_config_dword(dev, reg + 4, new);
+	}
 }
 
 resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index eda6a7c..6922964 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -564,10 +564,6 @@
 {
 	int i;
 
-	/* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
-	if (dev->is_virtfn)
-		return;
-
 	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
 		pci_update_resource(dev, i);
 }
@@ -4835,36 +4831,6 @@
 }
 EXPORT_SYMBOL(pci_select_bars);
 
-/**
- * pci_resource_bar - get position of the BAR associated with a resource
- * @dev: the PCI device
- * @resno: the resource number
- * @type: the BAR type to be filled in
- *
- * Returns BAR position in config space, or 0 if the BAR is invalid.
- */
-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
-{
-	int reg;
-
-	if (resno < PCI_ROM_RESOURCE) {
-		*type = pci_bar_unknown;
-		return PCI_BASE_ADDRESS_0 + 4 * resno;
-	} else if (resno == PCI_ROM_RESOURCE) {
-		*type = pci_bar_mem32;
-		return dev->rom_base_reg;
-	} else if (resno < PCI_BRIDGE_RESOURCES) {
-		/* device specific resource */
-		*type = pci_bar_unknown;
-		reg = pci_iov_resource_bar(dev, resno);
-		if (reg)
-			return reg;
-	}
-
-	dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
-	return 0;
-}
-
 /* Some architectures require additional programming to enable VGA */
 static arch_set_vga_state_t arch_set_vga_state;
 
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 4518562..a5d37f6 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -245,7 +245,6 @@
 int pci_setup_device(struct pci_dev *dev);
 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
 		    struct resource *res, unsigned int reg);
-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type);
 void pci_configure_ari(struct pci_dev *dev);
 void __pci_bus_size_bridges(struct pci_bus *bus,
 			struct list_head *realloc_head);
@@ -289,7 +288,7 @@
 #ifdef CONFIG_PCI_IOV
 int pci_iov_init(struct pci_dev *dev);
 void pci_iov_release(struct pci_dev *dev);
-int pci_iov_resource_bar(struct pci_dev *dev, int resno);
+void pci_iov_update_resource(struct pci_dev *dev, int resno);
 resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
 void pci_restore_iov_state(struct pci_dev *dev);
 int pci_iov_bus_range(struct pci_bus *bus);
@@ -303,10 +302,6 @@
 
 {
 }
-static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno)
-{
-	return 0;
-}
 static inline void pci_restore_iov_state(struct pci_dev *dev)
 {
 }
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 300770c..d266d80 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -227,7 +227,8 @@
 			mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
 		}
 	} else {
-		res->flags |= (l & IORESOURCE_ROM_ENABLE);
+		if (l & PCI_ROM_ADDRESS_ENABLE)
+			res->flags |= IORESOURCE_ROM_ENABLE;
 		l64 = l & PCI_ROM_ADDRESS_MASK;
 		sz64 = sz & PCI_ROM_ADDRESS_MASK;
 		mask64 = (u32)PCI_ROM_ADDRESS_MASK;
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 06663d3..b6edb18 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -35,6 +35,11 @@
 	if (res->flags & IORESOURCE_ROM_SHADOW)
 		return 0;
 
+	/*
+	 * Ideally pci_update_resource() would update the ROM BAR address,
+	 * and we would only set the enable bit here.  But apparently some
+	 * devices have buggy ROM BARs that read as zero when disabled.
+	 */
 	pcibios_resource_to_bus(pdev->bus, &region, res);
 	pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
 	rom_addr &= ~PCI_ROM_ADDRESS_MASK;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 9526e34..4bc589e 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -25,21 +25,18 @@
 #include <linux/slab.h>
 #include "pci.h"
 
-
-void pci_update_resource(struct pci_dev *dev, int resno)
+static void pci_std_update_resource(struct pci_dev *dev, int resno)
 {
 	struct pci_bus_region region;
 	bool disable;
 	u16 cmd;
 	u32 new, check, mask;
 	int reg;
-	enum pci_bar_type type;
 	struct resource *res = dev->resource + resno;
 
-	if (dev->is_virtfn) {
-		dev_warn(&dev->dev, "can't update VF BAR%d\n", resno);
+	/* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
+	if (dev->is_virtfn)
 		return;
-	}
 
 	/*
 	 * Ignore resources for unimplemented BARs and unused resource slots
@@ -60,21 +57,34 @@
 		return;
 
 	pcibios_resource_to_bus(dev->bus, &region, res);
+	new = region.start;
 
-	new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
-	if (res->flags & IORESOURCE_IO)
+	if (res->flags & IORESOURCE_IO) {
 		mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
-	else
+		new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK;
+	} else if (resno == PCI_ROM_RESOURCE) {
+		mask = (u32)PCI_ROM_ADDRESS_MASK;
+	} else {
 		mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
+		new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
+	}
 
-	reg = pci_resource_bar(dev, resno, &type);
-	if (!reg)
-		return;
-	if (type != pci_bar_unknown) {
+	if (resno < PCI_ROM_RESOURCE) {
+		reg = PCI_BASE_ADDRESS_0 + 4 * resno;
+	} else if (resno == PCI_ROM_RESOURCE) {
+
+		/*
+		 * Apparently some Matrox devices have ROM BARs that read
+		 * as zero when disabled, so don't update ROM BARs unless
+		 * they're enabled.  See https://lkml.org/lkml/2005/8/30/138.
+		 */
 		if (!(res->flags & IORESOURCE_ROM_ENABLE))
 			return;
+
+		reg = dev->rom_base_reg;
 		new |= PCI_ROM_ADDRESS_ENABLE;
-	}
+	} else
+		return;
 
 	/*
 	 * We can't update a 64-bit BAR atomically, so when possible,
@@ -110,6 +120,16 @@
 		pci_write_config_word(dev, PCI_COMMAND, cmd);
 }
 
+void pci_update_resource(struct pci_dev *dev, int resno)
+{
+	if (resno <= PCI_ROM_RESOURCE)
+		pci_std_update_resource(dev, resno);
+#ifdef CONFIG_PCI_IOV
+	else if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
+		pci_iov_update_resource(dev, resno);
+#endif
+}
+
 int pci_claim_resource(struct pci_dev *dev, int resource)
 {
 	struct resource *res = &dev->resource[resource];
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index b37b572..ad3e1e7 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -27,6 +27,9 @@
 #include <asm/cputype.h>
 #include <asm/irq_regs.h>
 
+#define USE_CPUHP_STATE CPUHP_AP_PERF_ARM_STARTING
+#define USE_CPUHP_STR "AP_PERF_ARM_STARTING"
+
 static int
 armpmu_map_cache_event(const unsigned (*cache_map)
 				      [PERF_COUNT_HW_CACHE_MAX]
@@ -366,6 +369,8 @@
 		return err;
 	}
 
+	armpmu->pmu_state = ARM_PMU_STATE_RUNNING;
+
 	return 0;
 }
 
@@ -568,6 +573,7 @@
 		.read		= armpmu_read,
 		.filter_match	= armpmu_filter_match,
 		.attr_groups	= armpmu->attr_groups,
+		.events_across_hotplug = 1,
 	};
 	armpmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
 		&armpmu_common_attr_group;
@@ -620,6 +626,8 @@
 	struct platform_device *pmu_device = cpu_pmu->plat_device;
 	struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
 
+	cpu_pmu->pmu_state = ARM_PMU_STATE_GOING_DOWN;
+
 	irqs = min(pmu_device->num_resources, num_possible_cpus());
 
 	irq = platform_get_irq(pmu_device, 0);
@@ -627,6 +635,7 @@
 		on_each_cpu_mask(&cpu_pmu->supported_cpus,
 				 cpu_pmu_disable_percpu_irq, &irq, 1);
 		free_percpu_irq(irq, &hw_events->percpu_pmu);
+		cpu_pmu->percpu_irq = -1;
 	} else {
 		for (i = 0; i < irqs; ++i) {
 			int cpu = i;
@@ -641,6 +650,7 @@
 				free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
 		}
 	}
+	cpu_pmu->pmu_state = ARM_PMU_STATE_OFF;
 }
 
 static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
@@ -670,6 +680,7 @@
 
 		on_each_cpu_mask(&cpu_pmu->supported_cpus,
 				 cpu_pmu_enable_percpu_irq, &irq, 1);
+		cpu_pmu->percpu_irq = irq;
 	} else {
 		for (i = 0; i < irqs; ++i) {
 			int cpu = i;
@@ -709,22 +720,12 @@
 	return 0;
 }
 
-/*
- * PMU hardware loses all context when a CPU goes offline.
- * When a CPU is hotplugged back in, since some hardware registers are
- * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
- * junk values out of them.
- */
-static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
-{
-	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
-
-	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
-		return 0;
-	if (pmu->reset)
-		pmu->reset(pmu);
-	return 0;
-}
+struct cpu_pm_pmu_args {
+	struct arm_pmu	*armpmu;
+	unsigned long	cmd;
+	int		cpu;
+	int		ret;
+};
 
 #ifdef CONFIG_CPU_PM
 static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
@@ -772,15 +773,19 @@
 	}
 }
 
-static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
-			     void *v)
+static void cpu_pm_pmu_common(void *info)
 {
-	struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
+	struct cpu_pm_pmu_args *data	= info;
+	struct arm_pmu *armpmu		= data->armpmu;
+	unsigned long cmd		= data->cmd;
+	int cpu				= data->cpu;
 	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
 	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
 
-	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
-		return NOTIFY_DONE;
+	if (!cpumask_test_cpu(cpu, &armpmu->supported_cpus)) {
+		data->ret = NOTIFY_DONE;
+		return;
+	}
 
 	/*
 	 * Always reset the PMU registers on power-up even if
@@ -789,8 +794,12 @@
 	if (cmd == CPU_PM_EXIT && armpmu->reset)
 		armpmu->reset(armpmu);
 
-	if (!enabled)
-		return NOTIFY_OK;
+	if (!enabled) {
+		data->ret = NOTIFY_OK;
+		return;
+	}
+
+	data->ret = NOTIFY_OK;
 
 	switch (cmd) {
 	case CPU_PM_ENTER:
@@ -798,15 +807,29 @@
 		cpu_pm_pmu_setup(armpmu, cmd);
 		break;
 	case CPU_PM_EXIT:
-		cpu_pm_pmu_setup(armpmu, cmd);
 	case CPU_PM_ENTER_FAILED:
+		cpu_pm_pmu_setup(armpmu, cmd);
 		armpmu->start(armpmu);
 		break;
 	default:
-		return NOTIFY_DONE;
+		data->ret = NOTIFY_DONE;
+		break;
 	}
 
-	return NOTIFY_OK;
+	return;
+}
+
+static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
+			     void *v)
+{
+	struct cpu_pm_pmu_args data = {
+		.armpmu	= container_of(b, struct arm_pmu, cpu_pm_nb),
+		.cmd	= cmd,
+		.cpu	= smp_processor_id(),
+	};
+
+	cpu_pm_pmu_common(&data);
+	return data.ret;
 }
 
 static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
@@ -819,11 +842,75 @@
 {
 	cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
 }
+
 #else
 static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
 static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
+static void cpu_pm_pmu_common(void *info) { }
 #endif
 
+/*
+ * PMU hardware loses all context when a CPU goes offline.
+ * When a CPU is hotplugged back in, since some hardware registers are
+ * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
+ * junk values out of them.
+ */
+static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
+{
+	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
+
+	struct cpu_pm_pmu_args data = {
+		.armpmu	= pmu,
+		.cpu	= (int)cpu,
+	};
+
+	if (!pmu || !cpumask_test_cpu(cpu, &pmu->supported_cpus))
+		return 0;
+
+	data.cmd    = CPU_PM_EXIT;
+	cpu_pm_pmu_common(&data);
+	if (data.ret == NOTIFY_DONE)
+		return 0;
+
+	if (data.armpmu->pmu_state != ARM_PMU_STATE_OFF &&
+		data.armpmu->plat_device) {
+		int irq = data.armpmu->percpu_irq;
+
+		if (irq > 0 && irq_is_percpu(irq))
+			cpu_pmu_enable_percpu_irq(&irq);
+
+	}
+
+	return 0;
+}
+
+static int arm_perf_stopping_cpu(unsigned int cpu, struct hlist_node *node)
+{
+	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
+
+	struct cpu_pm_pmu_args data = {
+		.armpmu	= pmu,
+		.cpu	= (int)cpu,
+	};
+
+	if (!pmu || !cpumask_test_cpu(cpu, &pmu->supported_cpus))
+		return 0;
+
+	data.cmd = CPU_PM_ENTER;
+	cpu_pm_pmu_common(&data);
+	/* Disarm the PMU IRQ before disappearing. */
+	if (data.armpmu->pmu_state == ARM_PMU_STATE_RUNNING &&
+		data.armpmu->plat_device) {
+		int irq = data.armpmu->percpu_irq;
+
+		if (irq > 0 && irq_is_percpu(irq))
+			cpu_pmu_disable_percpu_irq(&irq);
+
+	}
+
+	return 0;
+}
+
 static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
 {
 	int err;
@@ -834,14 +921,14 @@
 	if (!cpu_hw_events)
 		return -ENOMEM;
 
-	err = cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
+	err = cpuhp_state_add_instance_nocalls(USE_CPUHP_STATE,
 					       &cpu_pmu->node);
 	if (err)
 		goto out_free;
 
 	err = cpu_pm_pmu_register(cpu_pmu);
 	if (err)
-		goto out_unregister;
+		goto out_unreg_perf_starting;
 
 	for_each_possible_cpu(cpu) {
 		struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
@@ -872,8 +959,8 @@
 
 	return 0;
 
-out_unregister:
-	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
+out_unreg_perf_starting:
+	cpuhp_state_remove_instance_nocalls(USE_CPUHP_STATE,
 					    &cpu_pmu->node);
 out_free:
 	free_percpu(cpu_hw_events);
@@ -883,7 +970,7 @@
 static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
 {
 	cpu_pm_pmu_unregister(cpu_pmu);
-	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
+	cpuhp_state_remove_instance_nocalls(USE_CPUHP_STATE,
 					    &cpu_pmu->node);
 	free_percpu(cpu_pmu->hw_events);
 }
@@ -1064,6 +1151,9 @@
 	if (!__oprofile_cpu_pmu)
 		__oprofile_cpu_pmu = pmu;
 
+	pmu->pmu_state  = ARM_PMU_STATE_OFF;
+	pmu->percpu_irq = -1;
+
 	pr_info("enabled with %s PMU driver, %d counters available\n",
 			pmu->name, pmu->num_events);
 
@@ -1083,11 +1173,12 @@
 {
 	int ret;
 
-	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
-				      "AP_PERF_ARM_STARTING",
-				      arm_perf_starting_cpu, NULL);
+	ret = cpuhp_setup_state_multi(USE_CPUHP_STATE,
+					USE_CPUHP_STR,
+					arm_perf_starting_cpu,
+					arm_perf_stopping_cpu);
 	if (ret)
-		pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
+		pr_err("CPU hotplug ARM PMU STOPPING registering failed: %d\n",
 		       ret);
 	return ret;
 }
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 92fd916..f6b99d0 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -55,6 +55,7 @@
 obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qmp-14nm.o
 obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qmp-v3.o
 obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qrbtc-sdm845.o
+obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qmp-v3-660.o
 obj-$(CONFIG_PHY_TUSB1210)		+= phy-tusb1210.o
 obj-$(CONFIG_PHY_BRCM_SATA)		+= phy-brcm-sata.o
 obj-$(CONFIG_PHY_PISTACHIO_USB)		+= phy-pistachio-usb.o
diff --git a/drivers/phy/phy-qcom-ufs-i.h b/drivers/phy/phy-qcom-ufs-i.h
index 35179c8..b92bc89 100644
--- a/drivers/phy/phy-qcom-ufs-i.h
+++ b/drivers/phy/phy-qcom-ufs-i.h
@@ -97,6 +97,10 @@
 	struct ufs_qcom_phy_vreg vdda_pll;
 	struct ufs_qcom_phy_vreg vdda_phy;
 	struct ufs_qcom_phy_vreg vddp_ref_clk;
+
+	/* Number of lanes available (1 or 2) for Rx/Tx */
+	u32 lanes_per_direction;
+
 	unsigned int quirks;
 
 	/*
@@ -152,6 +156,7 @@
  * and writes to QSERDES_RX_SIGDET_CNTRL attribute
  * @configure_lpm: pointer to a function that configures the phy
  * for low power mode.
+ * @dbg_register_dump: pointer to a function that dumps phy registers for debug.
  */
 struct ufs_qcom_phy_specific_ops {
 	int (*calibrate_phy)(struct ufs_qcom_phy *phy, bool is_rate_B);
@@ -161,6 +166,7 @@
 	void (*ctrl_rx_linecfg)(struct ufs_qcom_phy *phy, bool ctrl);
 	void (*power_control)(struct ufs_qcom_phy *phy, bool val);
 	int (*configure_lpm)(struct ufs_qcom_phy *phy, bool enable);
+	void (*dbg_register_dump)(struct ufs_qcom_phy *phy);
 };
 
 struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy);
@@ -184,5 +190,6 @@
 void ufs_qcom_phy_write_tbl(struct ufs_qcom_phy *ufs_qcom_phy,
 				struct ufs_qcom_phy_calibration *tbl,
 				int tbl_size);
-
+void ufs_qcom_phy_dump_regs(struct ufs_qcom_phy *phy,
+			    int offset, int len, char *prefix);
 #endif
diff --git a/drivers/phy/phy-qcom-ufs-qmp-v3-660.c b/drivers/phy/phy-qcom-ufs-qmp-v3-660.c
new file mode 100644
index 0000000..9450e18
--- /dev/null
+++ b/drivers/phy/phy-qcom-ufs-qmp-v3-660.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "phy-qcom-ufs-qmp-v3-660.h"
+
+#define UFS_PHY_NAME "ufs_phy_qmp_v3_660"
+
+static
+int ufs_qcom_phy_qmp_v3_660_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
+					bool is_rate_B)
+{
+	int err;
+	int tbl_size_A, tbl_size_B;
+	struct ufs_qcom_phy_calibration *tbl_A, *tbl_B;
+	u8 major = ufs_qcom_phy->host_ctrl_rev_major;
+	u16 minor = ufs_qcom_phy->host_ctrl_rev_minor;
+	u16 step = ufs_qcom_phy->host_ctrl_rev_step;
+
+	tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
+	tbl_B = phy_cal_table_rate_B;
+
+	if ((major == 0x3) && (minor == 0x001) && (step == 0x001)) {
+		tbl_A = phy_cal_table_rate_A_3_1_1;
+		tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_3_1_1);
+	} else {
+		dev_err(ufs_qcom_phy->dev,
+			"%s: Unknown UFS-PHY version (major 0x%x minor 0x%x step 0x%x), no calibration values\n",
+			__func__, major, minor, step);
+		err = -ENODEV;
+		goto out;
+	}
+
+	err = ufs_qcom_phy_calibrate(ufs_qcom_phy,
+				     tbl_A, tbl_size_A,
+				     tbl_B, tbl_size_B,
+				     is_rate_B);
+
+	if (err)
+		dev_err(ufs_qcom_phy->dev,
+			"%s: ufs_qcom_phy_calibrate() failed %d\n",
+			__func__, err);
+
+out:
+	return err;
+}
+
+static int ufs_qcom_phy_qmp_v3_660_init(struct phy *generic_phy)
+{
+	struct ufs_qcom_phy_qmp_v3_660 *phy = phy_get_drvdata(generic_phy);
+	struct ufs_qcom_phy *phy_common = &phy->common_cfg;
+	int err;
+
+	err = ufs_qcom_phy_init_clks(generic_phy, phy_common);
+	if (err) {
+		dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
+			__func__, err);
+		goto out;
+	}
+
+	err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common);
+	if (err) {
+		dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+			__func__, err);
+		goto out;
+	}
+
+out:
+	return err;
+}
+
+static
+void ufs_qcom_phy_qmp_v3_660_power_control(struct ufs_qcom_phy *phy,
+					 bool power_ctrl)
+{
+	if (!power_ctrl) {
+		/* apply analog power collapse */
+		writel_relaxed(0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+		/*
+		 * Make sure that PHY knows its analog rail is going to be
+		 * powered OFF.
+		 */
+		mb();
+	} else {
+		/* bring PHY out of analog power collapse */
+		writel_relaxed(0x1, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+
+		/*
+		 * Before any transactions involving PHY, ensure PHY knows
+		 * that it's analog rail is powered ON.
+		 */
+		mb();
+	}
+}
+
+static inline
+void ufs_qcom_phy_qmp_v3_660_set_tx_lane_enable(struct ufs_qcom_phy *phy,
+						   u32 val)
+{
+	/*
+	 * v3 PHY does not have TX_LANE_ENABLE register.
+	 * Implement this function so as not to propagate error to caller.
+	 */
+}
+
+static
+void ufs_qcom_phy_qmp_v3_660_ctrl_rx_linecfg(struct ufs_qcom_phy *phy,
+						bool ctrl)
+{
+	u32 temp;
+
+	temp = readl_relaxed(phy->mmio + UFS_PHY_LINECFG_DISABLE);
+
+	if (ctrl) /* enable RX LineCfg */
+		temp &= ~UFS_PHY_RX_LINECFG_DISABLE_BIT;
+	else /* disable RX LineCfg */
+		temp |= UFS_PHY_RX_LINECFG_DISABLE_BIT;
+
+	writel_relaxed(temp, phy->mmio + UFS_PHY_LINECFG_DISABLE);
+	/* Make sure that RX LineCfg config applied before we return */
+	mb();
+}
+
+static inline void ufs_qcom_phy_qmp_v3_660_start_serdes(
+					struct ufs_qcom_phy *phy)
+{
+	u32 tmp;
+
+	tmp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START);
+	tmp &= ~MASK_SERDES_START;
+	tmp |= (1 << OFFSET_SERDES_START);
+	writel_relaxed(tmp, phy->mmio + UFS_PHY_PHY_START);
+	/* Ensure register value is committed */
+	mb();
+}
+
+static int ufs_qcom_phy_qmp_v3_660_is_pcs_ready(
+				struct ufs_qcom_phy *phy_common)
+{
+	int err = 0;
+	u32 val;
+
+	err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS,
+		val, (val & MASK_PCS_READY), 10, 1000000);
+	if (err)
+		dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
+			__func__, err);
+	return err;
+}
+
+static void ufs_qcom_phy_qmp_v3_660_dbg_register_dump(
+					struct ufs_qcom_phy *phy)
+{
+	ufs_qcom_phy_dump_regs(phy, COM_BASE, COM_SIZE,
+					"PHY QSERDES COM Registers ");
+	ufs_qcom_phy_dump_regs(phy, PHY_BASE, PHY_SIZE,
+					"PHY Registers ");
+	ufs_qcom_phy_dump_regs(phy, RX_BASE, RX_SIZE,
+					"PHY RX0 Registers ");
+	ufs_qcom_phy_dump_regs(phy, TX_BASE, TX_SIZE,
+					"PHY TX0 Registers ");
+}
+
+struct phy_ops ufs_qcom_phy_qmp_v3_660_phy_ops = {
+	.init		= ufs_qcom_phy_qmp_v3_660_init,
+	.exit		= ufs_qcom_phy_exit,
+	.power_on	= ufs_qcom_phy_power_on,
+	.power_off	= ufs_qcom_phy_power_off,
+	.owner		= THIS_MODULE,
+};
+
+struct ufs_qcom_phy_specific_ops phy_v3_660_ops = {
+	.calibrate_phy		= ufs_qcom_phy_qmp_v3_660_phy_calibrate,
+	.start_serdes		= ufs_qcom_phy_qmp_v3_660_start_serdes,
+	.is_physical_coding_sublayer_ready =
+				ufs_qcom_phy_qmp_v3_660_is_pcs_ready,
+	.set_tx_lane_enable	= ufs_qcom_phy_qmp_v3_660_set_tx_lane_enable,
+	.ctrl_rx_linecfg	= ufs_qcom_phy_qmp_v3_660_ctrl_rx_linecfg,
+	.power_control		= ufs_qcom_phy_qmp_v3_660_power_control,
+	.dbg_register_dump	= ufs_qcom_phy_qmp_v3_660_dbg_register_dump,
+};
+
+static int ufs_qcom_phy_qmp_v3_660_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct phy *generic_phy;
+	struct ufs_qcom_phy_qmp_v3_660 *phy;
+	int err = 0;
+
+	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+				&ufs_qcom_phy_qmp_v3_660_phy_ops,
+				&phy_v3_660_ops);
+
+	if (!generic_phy) {
+		dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n",
+			__func__);
+		err = -EIO;
+		goto out;
+	}
+
+	phy_set_drvdata(generic_phy, phy);
+
+	strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
+		sizeof(phy->common_cfg.name));
+
+out:
+	return err;
+}
+
+static int ufs_qcom_phy_qmp_v3_660_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct phy *generic_phy = to_phy(dev);
+	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+	int err = 0;
+
+	err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
+	if (err)
+		dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
+			__func__, err);
+
+	return err;
+}
+
+static const struct of_device_id ufs_qcom_phy_qmp_v3_660_of_match[] = {
+	{.compatible = "qcom,ufs-phy-qmp-v3-660"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_v3_660_of_match);
+
+static struct platform_driver ufs_qcom_phy_qmp_v3_660_driver = {
+	.probe = ufs_qcom_phy_qmp_v3_660_probe,
+	.remove = ufs_qcom_phy_qmp_v3_660_remove,
+	.driver = {
+		.of_match_table = ufs_qcom_phy_qmp_v3_660_of_match,
+		.name = "ufs_qcom_phy_qmp_v3_660",
+		.owner = THIS_MODULE,
+	},
+};
+
+module_platform_driver(ufs_qcom_phy_qmp_v3_660_driver);
+
+MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP v3 660");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-qcom-ufs-qmp-v3-660.h b/drivers/phy/phy-qcom-ufs-qmp-v3-660.h
new file mode 100644
index 0000000..89fa5d3
--- /dev/null
+++ b/drivers/phy/phy-qcom-ufs-qmp-v3-660.h
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef UFS_QCOM_PHY_QMP_V3_660_H_
+#define UFS_QCOM_PHY_QMP_V3_660_H_
+
+#include "phy-qcom-ufs-i.h"
+
+/* QCOM UFS PHY control registers */
+#define COM_BASE	0x000
+#define COM_OFF(x)	(COM_BASE + x)
+#define COM_SIZE	0x1C0
+
+#define TX_BASE		0x400
+#define TX_OFF(x)	(TX_BASE + x)
+#define TX_SIZE		0x128
+
+#define RX_BASE		0x600
+#define RX_OFF(x)	(RX_BASE + x)
+#define RX_SIZE		0x1FC
+
+#define PHY_BASE	0xC00
+#define PHY_OFF(x)	(PHY_BASE + x)
+#define PHY_SIZE	0x1B4
+
+/* UFS PHY QSERDES COM registers */
+#define QSERDES_COM_ATB_SEL1			COM_OFF(0x00)
+#define QSERDES_COM_ATB_SEL2			COM_OFF(0x04)
+#define QSERDES_COM_FREQ_UPDATE			COM_OFF(0x08)
+#define QSERDES_COM_BG_TIMER			COM_OFF(0x0C)
+#define QSERDES_COM_SSC_EN_CENTER		COM_OFF(0x10)
+#define QSERDES_COM_SSC_ADJ_PER1		COM_OFF(0x14)
+#define QSERDES_COM_SSC_ADJ_PER2		COM_OFF(0x18)
+#define QSERDES_COM_SSC_PER1			COM_OFF(0x1C)
+#define QSERDES_COM_SSC_PER2			COM_OFF(0x20)
+#define QSERDES_COM_SSC_STEP_SIZE1		COM_OFF(0x24)
+#define QSERDES_COM_SSC_STEP_SIZE2		COM_OFF(0x28)
+#define QSERDES_COM_POST_DIV			COM_OFF(0x2C)
+#define QSERDES_COM_POST_DIV_MUX		COM_OFF(0x30)
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		COM_OFF(0x34)
+#define QSERDES_COM_CLK_ENABLE1			COM_OFF(0x38)
+#define QSERDES_COM_SYS_CLK_CTRL		COM_OFF(0x3C)
+#define QSERDES_COM_SYSCLK_BUF_ENABLE		COM_OFF(0x40)
+#define QSERDES_COM_PLL_EN			COM_OFF(0x44)
+#define QSERDES_COM_PLL_IVCO			COM_OFF(0x48)
+#define QSERDES_COM_LOCK_CMP1_MODE0		COM_OFF(0X4C)
+#define QSERDES_COM_LOCK_CMP2_MODE0		COM_OFF(0X50)
+#define QSERDES_COM_LOCK_CMP3_MODE0		COM_OFF(0X54)
+#define QSERDES_COM_LOCK_CMP1_MODE1		COM_OFF(0X58)
+#define QSERDES_COM_LOCK_CMP2_MODE1		COM_OFF(0X5C)
+#define QSERDES_COM_LOCK_CMP3_MODE1		COM_OFF(0X60)
+#define QSERDES_COM_CMD_RSVD0			COM_OFF(0x64)
+#define QSERDES_COM_EP_CLOCK_DETECT_CTRL	COM_OFF(0x68)
+#define QSERDES_COM_SYSCLK_DET_COMP_STATUS	COM_OFF(0x6C)
+#define QSERDES_COM_BG_TRIM			COM_OFF(0x70)
+#define QSERDES_COM_CLK_EP_DIV			COM_OFF(0x74)
+#define QSERDES_COM_CP_CTRL_MODE0		COM_OFF(0x78)
+#define QSERDES_COM_CP_CTRL_MODE1		COM_OFF(0x7C)
+#define QSERDES_COM_CMN_RSVD1			COM_OFF(0x80)
+#define QSERDES_COM_PLL_RCTRL_MODE0		COM_OFF(0x84)
+#define QSERDES_COM_PLL_RCTRL_MODE1		COM_OFF(0x88)
+#define QSERDES_COM_CMN_RSVD2			COM_OFF(0x8C)
+#define QSERDES_COM_PLL_CCTRL_MODE0		COM_OFF(0x90)
+#define QSERDES_COM_PLL_CCTRL_MODE1		COM_OFF(0x94)
+#define QSERDES_COM_CMN_RSVD3			COM_OFF(0x98)
+#define QSERDES_COM_PLL_CNTRL			COM_OFF(0x9C)
+#define QSERDES_COM_PHASE_SEL_CTRL		COM_OFF(0xA0)
+#define QSERDES_COM_PHASE_SEL_DC		COM_OFF(0xA4)
+#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM		COM_OFF(0xA8)
+#define QSERDES_COM_SYSCLK_EN_SEL		COM_OFF(0xAC)
+#define QSERDES_COM_CML_SYSCLK_SEL		COM_OFF(0xB0)
+#define QSERDES_COM_RESETSM_CNTRL		COM_OFF(0xB4)
+#define QSERDES_COM_RESETSM_CNTRL2		COM_OFF(0xB8)
+#define QSERDES_COM_RESTRIM_CTRL		COM_OFF(0xBC)
+#define QSERDES_COM_RESTRIM_CTRL2		COM_OFF(0xC0)
+#define QSERDES_COM_LOCK_CMP_EN			COM_OFF(0xC8)
+#define QSERDES_COM_LOCK_CMP_CFG		COM_OFF(0xCC)
+#define QSERDES_COM_DEC_START_MODE0		COM_OFF(0xD0)
+#define QSERDES_COM_DEC_START_MODE1		COM_OFF(0xD4)
+#define QSERDES_COM_VCOCAL_DEADMAN_CTRL		COM_OFF(0xD8)
+#define QSERDES_COM_DIV_FRAC_START1_MODE0	COM_OFF(0xDC)
+#define QSERDES_COM_DIV_FRAC_START2_MODE0	COM_OFF(0xE0)
+#define QSERDES_COM_DIV_FRAC_START3_MODE0	COM_OFF(0xE4)
+#define QSERDES_COM_DIV_FRAC_START1_MODE1	COM_OFF(0xE8)
+#define QSERDES_COM_DIV_FRAC_START2_MODE1	COM_OFF(0xEC)
+#define QSERDES_COM_DIV_FRAC_START3_MODE1	COM_OFF(0xF0)
+#define QSERDES_COM_VCO_TUNE_MINVAL1		COM_OFF(0xF4)
+#define QSERDES_COM_VCO_TUNE_MINVAL2		COM_OFF(0xF8)
+#define QSERDES_COM_CMN_RSVD4			COM_OFF(0xFC)
+#define QSERDES_COM_INTEGLOOP_INITVAL		COM_OFF(0x100)
+#define QSERDES_COM_INTEGLOOP_EN		COM_OFF(0x104)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0	COM_OFF(0x108)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0	COM_OFF(0x10C)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1	COM_OFF(0x110)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1	COM_OFF(0x114)
+#define QSERDES_COM_VCO_TUNE_MAXVAL1		COM_OFF(0x118)
+#define QSERDES_COM_VCO_TUNE_MAXVAL2		COM_OFF(0x11C)
+#define QSERDES_COM_RES_TRIM_CONTROL2		COM_OFF(0x120)
+#define QSERDES_COM_VCO_TUNE_CTRL		COM_OFF(0x124)
+#define QSERDES_COM_VCO_TUNE_MAP		COM_OFF(0x128)
+#define QSERDES_COM_VCO_TUNE1_MODE0		COM_OFF(0x12C)
+#define QSERDES_COM_VCO_TUNE2_MODE0		COM_OFF(0x130)
+#define QSERDES_COM_VCO_TUNE1_MODE1		COM_OFF(0x134)
+#define QSERDES_COM_VCO_TUNE2_MODE1		COM_OFF(0x138)
+#define QSERDES_COM_VCO_TUNE_INITVAL1		COM_OFF(0x13C)
+#define QSERDES_COM_VCO_TUNE_INITVAL2		COM_OFF(0x140)
+#define QSERDES_COM_VCO_TUNE_TIMER1		COM_OFF(0x144)
+#define QSERDES_COM_VCO_TUNE_TIMER2		COM_OFF(0x148)
+#define QSERDES_COM_SAR				COM_OFF(0x14C)
+#define QSERDES_COM_SAR_CLK			COM_OFF(0x150)
+#define QSERDES_COM_SAR_CODE_OUT_STATUS		COM_OFF(0x154)
+#define QSERDES_COM_SAR_CODE_READY_STATUS	COM_OFF(0x158)
+#define QSERDES_COM_CMN_STATUS			COM_OFF(0x15C)
+#define QSERDES_COM_RESET_SM_STATUS		COM_OFF(0x160)
+#define QSERDES_COM_RESTRIM_CODE_STATUS		COM_OFF(0x164)
+#define QSERDES_COM_PLLCAL_CODE1_STATUS		COM_OFF(0x168)
+#define QSERDES_COM_PLLCAL_CODE2_STATUS		COM_OFF(0x16C)
+#define QSERDES_COM_BG_CTRL			COM_OFF(0x170)
+#define QSERDES_COM_CLK_SELECT			COM_OFF(0x174)
+#define QSERDES_COM_HSCLK_SEL			COM_OFF(0x178)
+#define QSERDES_COM_INTEGLOOP_BINCODE_STATUS	COM_OFF(0x17C)
+#define QSERDES_COM_PLL_ANALOG			COM_OFF(0x180)
+#define QSERDES_COM_CORECLK_DIV			COM_OFF(0x184)
+#define QSERDES_COM_SW_RESET			COM_OFF(0x188)
+#define QSERDES_COM_CORE_CLK_EN			COM_OFF(0x18C)
+#define QSERDES_COM_C_READY_STATUS		COM_OFF(0x190)
+#define QSERDES_COM_CMN_CONFIG			COM_OFF(0x194)
+#define QSERDES_COM_CMN_RATE_OVERRIDE		COM_OFF(0x198)
+#define QSERDES_COM_SVS_MODE_CLK_SEL		COM_OFF(0x19C)
+#define QSERDES_COM_DEBUG_BUS0			COM_OFF(0x1A0)
+#define QSERDES_COM_DEBUG_BUS1			COM_OFF(0x1A4)
+#define QSERDES_COM_DEBUG_BUS2			COM_OFF(0x1A8)
+#define QSERDES_COM_DEBUG_BUS3			COM_OFF(0x1AC)
+#define QSERDES_COM_DEBUG_BUS_SEL		COM_OFF(0x1B0)
+#define QSERDES_COM_CMN_MISC1			COM_OFF(0x1B4)
+#define QSERDES_COM_CORECLK_DIV_MODE1		COM_OFF(0x1BC)
+#define QSERDES_COM_CMN_RSVD5			COM_OFF(0x1C0)
+
+/* UFS PHY registers */
+#define UFS_PHY_PHY_START			PHY_OFF(0x00)
+#define UFS_PHY_POWER_DOWN_CONTROL		PHY_OFF(0x04)
+#define UFS_PHY_TX_LARGE_AMP_DRV_LVL		PHY_OFF(0x34)
+#define UFS_PHY_TX_SMALL_AMP_DRV_LVL		PHY_OFF(0x3C)
+#define UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAP	PHY_OFF(0xCC)
+#define UFS_PHY_LINECFG_DISABLE			PHY_OFF(0x138)
+#define UFS_PHY_RX_SYM_RESYNC_CTRL		PHY_OFF(0x13C)
+#define UFS_PHY_RX_SIGDET_CTRL2			PHY_OFF(0x148)
+#define UFS_PHY_RX_PWM_GEAR_BAND		PHY_OFF(0x154)
+#define UFS_PHY_PCS_READY_STATUS		PHY_OFF(0x168)
+
+/* UFS PHY TX registers */
+#define QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN	TX_OFF(0x68)
+#define	QSERDES_TX_LANE_MODE				TX_OFF(0x94)
+
+/* UFS PHY RX registers */
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_HALF	RX_OFF(0x30)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER	RX_OFF(0x34)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_EIGHTH	RX_OFF(0x38)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN		RX_OFF(0x3C)
+#define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN	RX_OFF(0x40)
+#define QSERDES_RX_UCDR_SO_SATURATION_ENABLE	RX_OFF(0x48)
+#define QSERDES_RX_RX_TERM_BW			RX_OFF(0x90)
+#define QSERDES_RX_RX_EQ_GAIN1_LSB		RX_OFF(0xC4)
+#define QSERDES_RX_RX_EQ_GAIN1_MSB		RX_OFF(0xC8)
+#define QSERDES_RX_RX_EQ_GAIN2_LSB		RX_OFF(0xCC)
+#define QSERDES_RX_RX_EQ_GAIN2_MSB		RX_OFF(0xD0)
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2	RX_OFF(0xD8)
+#define QSERDES_RX_SIGDET_CNTRL			RX_OFF(0x114)
+#define QSERDES_RX_SIGDET_LVL			RX_OFF(0x118)
+#define QSERDES_RX_SIGDET_DEGLITCH_CNTRL	RX_OFF(0x11C)
+#define QSERDES_RX_RX_INTERFACE_MODE		RX_OFF(0x12C)
+
+
+#define UFS_PHY_RX_LINECFG_DISABLE_BIT		BIT(1)
+
+/*
+ * This structure represents the v3 660 specific phy.
+ * common_cfg MUST remain the first field in this structure
+ * in case extra fields are added. This way, when calling
+ * get_ufs_qcom_phy() of generic phy, we can extract the
+ * common phy structure (struct ufs_qcom_phy) out of it
+ * regardless of the relevant specific phy.
+ */
+struct ufs_qcom_phy_qmp_v3_660 {
+	struct ufs_qcom_phy common_cfg;
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_3_1_1[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x0e),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_EN, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_CTRL, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x20),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORE_CLK_EN, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x0b),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN, 0x45),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE, 0x06),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_LVL, 0x24),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_INTERFACE_MODE, 0x40),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x1E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_TERM_BW, 0x5B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0D),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IVCO, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TRIM, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_PWM_GEAR_BAND, 0x15),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SO_SATURATION_ENABLE, 0x4B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL1, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6c),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAP, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x44),
+};
+
+#endif
diff --git a/drivers/phy/phy-qcom-ufs-qmp-v3.c b/drivers/phy/phy-qcom-ufs-qmp-v3.c
index 6b8dbc2..0bfde0c7 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-v3.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-v3.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,26 +20,24 @@
 int ufs_qcom_phy_qmp_v3_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
 					bool is_rate_B)
 {
-	int err;
-	int tbl_size_A, tbl_size_B;
-	struct ufs_qcom_phy_calibration *tbl_A, *tbl_B;
+	/*
+	 * Writing PHY calibration in this order:
+	 * 1. Write Rate-A calibration first (1-lane mode).
+	 * 2. Write 2nd lane configuration if needed.
+	 * 3. Write Rate-B calibration overrides
+	 */
+	ufs_qcom_phy_write_tbl(ufs_qcom_phy, phy_cal_table_rate_A,
+			       ARRAY_SIZE(phy_cal_table_rate_A));
+	if (ufs_qcom_phy->lanes_per_direction == 2)
+		ufs_qcom_phy_write_tbl(ufs_qcom_phy, phy_cal_table_2nd_lane,
+				       ARRAY_SIZE(phy_cal_table_2nd_lane));
+	if (is_rate_B)
+		ufs_qcom_phy_write_tbl(ufs_qcom_phy, phy_cal_table_rate_B,
+				       ARRAY_SIZE(phy_cal_table_rate_B));
+	/* flush buffered writes */
+	mb();
 
-	tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
-	tbl_B = phy_cal_table_rate_B;
-
-	tbl_A = phy_cal_table_rate_A;
-	tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A);
-
-	err = ufs_qcom_phy_calibrate(ufs_qcom_phy,
-				     tbl_A, tbl_size_A,
-				     tbl_B, tbl_size_B,
-				     is_rate_B);
-
-	if (err)
-		dev_err(ufs_qcom_phy->dev,
-			"%s: ufs_qcom_phy_calibrate() failed %d\n",
-			__func__, err);
-	return err;
+	return 0;
 }
 
 static int ufs_qcom_phy_qmp_v3_init(struct phy *generic_phy)
@@ -145,37 +143,20 @@
 	return err;
 }
 
-static
-int ufs_qcom_phy_qmp_v3_configure_lpm(struct ufs_qcom_phy *ufs_qcom_phy,
-					bool enable)
+static void ufs_qcom_phy_qmp_v3_dbg_register_dump(struct ufs_qcom_phy *phy)
 {
-	int err = 0;
-	int tbl_size;
-	struct ufs_qcom_phy_calibration *tbl = NULL;
-
-	/* The default low power mode configuration is SVS2 */
-	if (enable) {
-		tbl_size = ARRAY_SIZE(phy_cal_table_svs2_enable);
-		tbl = phy_cal_table_svs2_enable;
-	} else {
-		tbl_size = ARRAY_SIZE(phy_cal_table_svs2_disable);
-		tbl = phy_cal_table_svs2_disable;
-	}
-
-	if (!tbl) {
-		dev_err(ufs_qcom_phy->dev, "%s: tbl for SVS2 %s is NULL",
-			__func__, enable ? "enable" : "disable");
-		err = -EINVAL;
-		goto out;
-	}
-
-	ufs_qcom_phy_write_tbl(ufs_qcom_phy, tbl, tbl_size);
-
-	/* flush buffered writes */
-	mb();
-
-out:
-	return err;
+	ufs_qcom_phy_dump_regs(phy, COM_BASE, COM_SIZE,
+					"PHY QSERDES COM Registers ");
+	ufs_qcom_phy_dump_regs(phy, PHY_BASE, PHY_SIZE,
+					"PHY Registers ");
+	ufs_qcom_phy_dump_regs(phy, RX_BASE(0), RX_SIZE,
+					"PHY RX0 Registers ");
+	ufs_qcom_phy_dump_regs(phy, TX_BASE(0), TX_SIZE,
+					"PHY TX0 Registers ");
+	ufs_qcom_phy_dump_regs(phy, RX_BASE(1), RX_SIZE,
+					"PHY RX1 Registers ");
+	ufs_qcom_phy_dump_regs(phy, TX_BASE(1), TX_SIZE,
+					"PHY TX1 Registers ");
 }
 
 struct phy_ops ufs_qcom_phy_qmp_v3_phy_ops = {
@@ -193,7 +174,7 @@
 	.set_tx_lane_enable	= ufs_qcom_phy_qmp_v3_set_tx_lane_enable,
 	.ctrl_rx_linecfg	= ufs_qcom_phy_qmp_v3_ctrl_rx_linecfg,
 	.power_control		= ufs_qcom_phy_qmp_v3_power_control,
-	.configure_lpm		= ufs_qcom_phy_qmp_v3_configure_lpm,
+	.dbg_register_dump	= ufs_qcom_phy_qmp_v3_dbg_register_dump,
 };
 
 static int ufs_qcom_phy_qmp_v3_probe(struct platform_device *pdev)
diff --git a/drivers/phy/phy-qcom-ufs-qmp-v3.h b/drivers/phy/phy-qcom-ufs-qmp-v3.h
index e9ac76b..4851aac 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-v3.h
+++ b/drivers/phy/phy-qcom-ufs-qmp-v3.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,10 +18,18 @@
 #include "phy-qcom-ufs-i.h"
 
 /* QCOM UFS PHY control registers */
-#define COM_OFF(x)	(0x000 + x)
-#define PHY_OFF(x)	(0xC00 + x)
-#define TX_OFF(n, x)	(0x400 + (0x400 * n) + x)
-#define RX_OFF(n, x)	(0x600 + (0x400 * n) + x)
+#define COM_BASE	0x000
+#define COM_SIZE	0x18C
+#define PHY_BASE	0xC00
+#define PHY_SIZE	0x1DC
+#define TX_BASE(n)	(0x400 + (0x400 * n))
+#define TX_SIZE		0x128
+#define RX_BASE(n)	(0x600 + (0x400 * n))
+#define RX_SIZE		0x1FC
+#define COM_OFF(x)	(COM_BASE + x)
+#define PHY_OFF(x)	(PHY_BASE + x)
+#define TX_OFF(n, x)	(TX_BASE(n) + x)
+#define RX_OFF(n, x)	(RX_BASE(n) + x)
 
 /* UFS PHY QSERDES COM registers */
 #define QSERDES_COM_ATB_SEL1			COM_OFF(0x00)
@@ -133,9 +141,13 @@
 #define UFS_PHY_TX_SMALL_AMP_DRV_LVL		PHY_OFF(0x34)
 #define UFS_PHY_LINECFG_DISABLE			PHY_OFF(0x130)
 #define UFS_PHY_RX_SYM_RESYNC_CTRL		PHY_OFF(0x134)
+#define UFS_PHY_RX_MIN_HIBERN8_TIME		PHY_OFF(0x138)
+#define UFS_PHY_RX_SIGDET_CTRL1			PHY_OFF(0x13C)
 #define UFS_PHY_RX_SIGDET_CTRL2			PHY_OFF(0x140)
 #define UFS_PHY_RX_PWM_GEAR_BAND		PHY_OFF(0x14C)
 #define UFS_PHY_PCS_READY_STATUS		PHY_OFF(0x160)
+#define UFS_PHY_TX_MID_TERM_CTRL1		PHY_OFF(0x1BC)
+#define UFS_PHY_MULTI_LANE_CTRL1		PHY_OFF(0x1C4)
 
 /* UFS PHY TX registers */
 #define QSERDES_TX0_TRANSCEIVER_BIAS_EN		TX_OFF(0, 0x5C)
@@ -143,6 +155,9 @@
 #define QSERDES_TX0_LANE_MODE_2			TX_OFF(0, 0x90)
 #define QSERDES_TX0_LANE_MODE_3			TX_OFF(0, 0x94)
 
+#define QSERDES_TX1_LANE_MODE_1			TX_OFF(1, 0x8C)
+
+
 /* UFS PHY RX registers */
 #define QSERDES_RX0_UCDR_SVS_SO_GAIN_HALF		RX_OFF(0, 0x24)
 #define QSERDES_RX0_UCDR_SVS_SO_GAIN_QUARTER		RX_OFF(0, 0x28)
@@ -163,6 +178,22 @@
 #define QSERDES_RX0_SIGDET_DEGLITCH_CNTRL		RX_OFF(0, 0x10C)
 #define QSERDES_RX0_RX_INTERFACE_MODE			RX_OFF(0, 0x11C)
 
+#define QSERDES_RX1_UCDR_SVS_SO_GAIN_HALF		RX_OFF(1, 0x24)
+#define QSERDES_RX1_UCDR_SVS_SO_GAIN_QUARTER		RX_OFF(1, 0x28)
+#define QSERDES_RX1_UCDR_SVS_SO_GAIN			RX_OFF(1, 0x2C)
+#define QSERDES_RX1_UCDR_FASTLOCK_FO_GAIN		RX_OFF(1, 0x30)
+#define QSERDES_RX1_UCDR_SO_SATURATION_AND_ENABLE	RX_OFF(1, 0x34)
+#define QSERDES_RX1_UCDR_FASTLOCK_COUNT_LOW		RX_OFF(1, 0x3C)
+#define QSERDES_RX1_UCDR_PI_CONTROLS			RX_OFF(1, 0x44)
+#define QSERDES_RX1_RX_TERM_BW				RX_OFF(1, 0x7C)
+#define QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL2		RX_OFF(1, 0xD4)
+#define QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL3		RX_OFF(1, 0xD8)
+#define QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL4		RX_OFF(1, 0xDC)
+#define QSERDES_RX1_SIGDET_CNTRL			RX_OFF(1, 0x104)
+#define QSERDES_RX1_SIGDET_LVL				RX_OFF(1, 0x108)
+#define QSERDES_RX1_SIGDET_DEGLITCH_CNTRL		RX_OFF(1, 0x10C)
+#define QSERDES_RX1_RX_INTERFACE_MODE			RX_OFF(1, 0x11C)
+
 #define UFS_PHY_RX_LINECFG_DISABLE_BIT		BIT(1)
 
 /*
@@ -181,6 +212,7 @@
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x06),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0xD5),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x20),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x02),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
@@ -195,22 +227,22 @@
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL1, 0xFF),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x08),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x06),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x34),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x36),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3F),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0xCB),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0xDA),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x01),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xFF),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0C),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x08),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x06),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x34),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x36),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x3F),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xB2),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xC1),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0F),
@@ -234,42 +266,33 @@
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A),
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_MID_TERM_CTRL1, 0x43),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL1, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_HIBERN8_TIME, 0x9A), /* 8 us */
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_2nd_lane[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_LANE_MODE_1, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_SIGDET_LVL, 0x24),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_SIGDET_CNTRL, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_SIGDET_DEGLITCH_CNTRL, 0x1E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_INTERFACE_MODE, 0x40),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_TERM_BW, 0x5B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL4, 0x1D),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SVS_SO_GAIN_HALF, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SVS_SO_GAIN, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SO_SATURATION_AND_ENABLE, 0x4B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_PI_CONTROLS, 0xF1),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_MULTI_LANE_CTRL1, 0x02),
 };
 
 static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x44),
 };
 
-static struct ufs_qcom_phy_calibration phy_cal_table_svs2_enable[] = {
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE0, 0x14),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x14),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x0a),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x7e),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0x7f),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x06),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x7e),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x99),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x07),
-	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TIMER_20US_CORECLK_STEPS_MSB, 0x0b),
-	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TIMER_20US_CORECLK_STEPS_LSB, 0x66),
-};
-
-static struct ufs_qcom_phy_calibration phy_cal_table_svs2_disable[] = {
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE0, 0x0a),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x3f),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
-	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TIMER_20US_CORECLK_STEPS_MSB, 0x16),
-	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TIMER_20US_CORECLK_STEPS_LSB, 0xcc),
-};
-
 #endif
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
index b8b9080..d18929f 100644
--- a/drivers/phy/phy-qcom-ufs.c
+++ b/drivers/phy/phy-qcom-ufs.c
@@ -15,13 +15,15 @@
 #include "phy-qcom-ufs-i.h"
 
 #define MAX_PROP_NAME              32
-#define VDDA_PHY_MIN_UV            1000000
-#define VDDA_PHY_MAX_UV            1000000
+#define VDDA_PHY_MIN_UV            800000
+#define VDDA_PHY_MAX_UV            925000
 #define VDDA_PLL_MIN_UV            1200000
 #define VDDA_PLL_MAX_UV            1800000
 #define VDDP_REF_CLK_MIN_UV        1200000
 #define VDDP_REF_CLK_MAX_UV        1200000
 
+#define UFS_PHY_DEFAULT_LANES_PER_DIRECTION	1
+
 static int __ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
 				    const char *, bool);
 static int ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
@@ -113,6 +115,19 @@
 		goto out;
 	}
 
+	if (of_property_read_u32(dev->of_node, "lanes-per-direction",
+				 &common_cfg->lanes_per_direction))
+		common_cfg->lanes_per_direction =
+			UFS_PHY_DEFAULT_LANES_PER_DIRECTION;
+
+	/*
+	 * UFS PHY power management is managed by its parent (UFS host
+	 * controller) hence set the no the no runtime PM callbacks flag
+	 * on UFS PHY device to avoid any accidental attempt to call the
+	 * PM callbacks for PHY device.
+	 */
+	pm_runtime_no_callbacks(&generic_phy->dev);
+
 	common_cfg->phy_spec_ops = phy_spec_ops;
 	common_cfg->dev = dev;
 
@@ -191,27 +206,20 @@
 		       struct ufs_qcom_phy *phy_common)
 {
 	int err;
-	struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
 
-	err = ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk",
-				   &phy_common->tx_iface_clk);
 	/*
 	 * tx_iface_clk does not exist in newer version of ufs-phy HW,
 	 * so don't return error if it is not found
 	 */
-	if (err)
-		dev_dbg(phy->dev, "%s: failed to get tx_iface_clk\n",
-			__func__);
+	__ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk",
+				   &phy_common->tx_iface_clk, false);
 
-	err = ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk",
-				   &phy_common->rx_iface_clk);
 	/*
 	 * rx_iface_clk does not exist in newer version of ufs-phy HW,
 	 * so don't return error if it is not found
 	 */
-	if (err)
-		dev_dbg(phy->dev, "%s: failed to get rx_iface_clk\n",
-			__func__);
+	__ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk",
+				   &phy_common->rx_iface_clk, false);
 
 	err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk_src",
 				   &phy_common->ref_clk_src);
@@ -246,7 +254,6 @@
 			      struct ufs_qcom_phy *phy_common)
 {
 	int err;
-	int vdda_phy_uV;
 
 	err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_pll,
 		"vdda-pll");
@@ -258,10 +265,6 @@
 	if (err)
 		goto out;
 
-	vdda_phy_uV = regulator_get_voltage(phy_common->vdda_phy.reg);
-	phy_common->vdda_phy.max_uV = vdda_phy_uV;
-	phy_common->vdda_phy.min_uV = vdda_phy_uV;
-
 	/* vddp-ref-clk-* properties are optional */
 	__ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vddp_ref_clk,
 				 "vddp-ref-clk", true);
@@ -279,6 +282,14 @@
 
 	char prop_name[MAX_PROP_NAME];
 
+	if (dev->of_node) {
+		snprintf(prop_name, MAX_PROP_NAME, "%s-supply", name);
+		if (!of_parse_phandle(dev->of_node, prop_name, 0)) {
+			dev_dbg(dev, "No vreg data found for %s\n", prop_name);
+			return optional ? err : -ENODATA;
+		}
+	}
+
 	vreg->name = kstrdup(name, GFP_KERNEL);
 	if (!vreg->name) {
 		err = -ENOMEM;
@@ -786,3 +797,21 @@
 	return ret;
 }
 EXPORT_SYMBOL(ufs_qcom_phy_configure_lpm);
+
+void ufs_qcom_phy_dump_regs(struct ufs_qcom_phy *phy, int offset,
+				int len, char *prefix)
+{
+	print_hex_dump(KERN_ERR, prefix,
+			len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
+			16, 4, phy->mmio + offset, len, false);
+}
+EXPORT_SYMBOL(ufs_qcom_phy_dump_regs);
+
+void ufs_qcom_phy_dbg_register_dump(struct phy *generic_phy)
+{
+	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+
+	if (ufs_qcom_phy->phy_spec_ops->dbg_register_dump)
+		ufs_qcom_phy->phy_spec_ops->dbg_register_dump(ufs_qcom_phy);
+}
+EXPORT_SYMBOL(ufs_qcom_phy_dbg_register_dump);
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 522c724..ba6d5ce 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -146,4 +146,11 @@
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
 	  WCD gpio controller block.
 
+config PINCTRL_LPI
+	tristate "Qualcomm Technologies, Inc LPI pin controller driver"
+	depends on GPIOLIB && OF
+	help
+	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+	  LPI gpio controller block.
+
 endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 7c74288..5e05e897 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -18,3 +18,4 @@
 obj-$(CONFIG_PINCTRL_SDM845) += pinctrl-sdm845.o
 obj-$(CONFIG_PINCTRL_SDM830) += pinctrl-sdm830.o
 obj-$(CONFIG_PINCTRL_WCD)	+= pinctrl-wcd.o
+obj-$(CONFIG_PINCTRL_LPI)	+= pinctrl-lpi.o
diff --git a/drivers/pinctrl/qcom/pinctrl-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpi.c
new file mode 100644
index 0000000..009e27bf
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-lpi.c
@@ -0,0 +1,638 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/qdsp6v2/audio_notifier.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+
+#define LPI_ADDRESS_SIZE			0xC000
+
+#define LPI_GPIO_REG_VAL_CTL			0x00
+#define LPI_GPIO_REG_DIR_CTL			0x04
+
+#define LPI_GPIO_REG_PULL_SHIFT			0x0
+#define LPI_GPIO_REG_PULL_MASK			0x3
+
+#define LPI_GPIO_REG_FUNCTION_SHIFT		0x2
+#define LPI_GPIO_REG_FUNCTION_MASK		0x3C
+
+#define LPI_GPIO_REG_OUT_STRENGTH_SHIFT		0x6
+#define LPI_GPIO_REG_OUT_STRENGTH_MASK		0x1C0
+
+#define LPI_GPIO_REG_OE_SHIFT			0x9
+#define LPI_GPIO_REG_OE_MASK			0x200
+
+#define LPI_GPIO_REG_DIR_SHIFT			0x1
+#define LPI_GPIO_REG_DIR_MASK			0x2
+
+#define LPI_GPIO_BIAS_DISABLE			0x0
+#define LPI_GPIO_PULL_DOWN			0x1
+#define LPI_GPIO_KEEPER				0x2
+#define LPI_GPIO_PULL_UP			0x3
+
+#define LPI_GPIO_FUNC_GPIO			"gpio"
+#define LPI_GPIO_FUNC_FUNC1			"func1"
+#define LPI_GPIO_FUNC_FUNC2			"func2"
+#define LPI_GPIO_FUNC_FUNC3			"func3"
+#define LPI_GPIO_FUNC_FUNC4			"func4"
+#define LPI_GPIO_FUNC_FUNC5			"func5"
+
+static bool lpi_dev_up;
+
+/* The index of each function in lpi_gpio_functions[] array */
+enum lpi_gpio_func_index {
+	LPI_GPIO_FUNC_INDEX_GPIO	= 0x00,
+	LPI_GPIO_FUNC_INDEX_FUNC1	= 0x01,
+	LPI_GPIO_FUNC_INDEX_FUNC2	= 0x02,
+	LPI_GPIO_FUNC_INDEX_FUNC3	= 0x03,
+	LPI_GPIO_FUNC_INDEX_FUNC4	= 0x04,
+	LPI_GPIO_FUNC_INDEX_FUNC5	= 0x05,
+};
+
+/**
+ * struct lpi_gpio_pad - keep current GPIO settings
+ * @offset: Nth GPIO in supported GPIOs.
+ * @output_enabled: Set to true if GPIO output logic is enabled.
+ * @value: value of a pin
+ * @base: Address base of LPI GPIO PAD.
+ * @pullup: Constant current which flow through GPIO output buffer.
+ * @strength: No, Low, Medium, High
+ * @function: See lpi_gpio_functions[]
+ */
+struct lpi_gpio_pad {
+	u16		offset;
+	bool		output_enabled;
+	bool		value;
+	char __iomem	*base;
+	unsigned int	pullup;
+	unsigned int	strength;
+	unsigned int	function;
+};
+
+struct lpi_gpio_state {
+	struct device	*dev;
+	struct pinctrl_dev *ctrl;
+	struct gpio_chip chip;
+	char __iomem	*base;
+};
+
+static const char *const lpi_gpio_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+	"gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+	"gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+	"gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+	"gpio29", "gpio30", "gpio31",
+};
+
+static const u32 lpi_offset[] = {
+	0x00000000,
+	0x00001000,
+	0x00002000,
+	0x00002010,
+	0x00003000,
+	0x00003010,
+	0x00004000,
+	0x00004010,
+	0x00005000,
+	0x00005010,
+	0x00005020,
+	0x00005030,
+	0x00005040,
+	0x00005050,
+	0x00006000,
+	0x00006010,
+	0x00007000,
+	0x00007010,
+	0x00008000,
+	0x00008010,
+	0x00008020,
+	0x00008030,
+	0x00008040,
+	0x00008050,
+	0x00008060,
+	0x00008070,
+	0x00009000,
+	0x00009010,
+	0x0000A000,
+	0x0000A010,
+	0x0000B000,
+	0x0000B010,
+};
+
+static const char *const lpi_gpio_functions[] = {
+	[LPI_GPIO_FUNC_INDEX_GPIO]	= LPI_GPIO_FUNC_GPIO,
+	[LPI_GPIO_FUNC_INDEX_FUNC1]	= LPI_GPIO_FUNC_FUNC1,
+	[LPI_GPIO_FUNC_INDEX_FUNC2]	= LPI_GPIO_FUNC_FUNC2,
+	[LPI_GPIO_FUNC_INDEX_FUNC3]	= LPI_GPIO_FUNC_FUNC3,
+	[LPI_GPIO_FUNC_INDEX_FUNC4]	= LPI_GPIO_FUNC_FUNC4,
+	[LPI_GPIO_FUNC_INDEX_FUNC5]	= LPI_GPIO_FUNC_FUNC5,
+};
+
+static int lpi_gpio_read(struct lpi_gpio_pad *pad, unsigned int addr)
+{
+	int ret;
+
+	if (!lpi_dev_up) {
+		pr_err_ratelimited("%s: ADSP is down due to SSR, return\n",
+				   __func__);
+		return 0;
+	}
+
+	ret = ioread32(pad->base + pad->offset + addr);
+	if (ret < 0)
+		pr_err("%s: read 0x%x failed\n", __func__, addr);
+
+	return ret;
+}
+
+static int lpi_gpio_write(struct lpi_gpio_pad *pad, unsigned int addr,
+			  unsigned int val)
+{
+	if (!lpi_dev_up) {
+		pr_err_ratelimited("%s: ADSP is down due to SSR, return\n",
+				   __func__);
+		return 0;
+	}
+
+	iowrite32(val, pad->base + pad->offset + addr);
+	return 0;
+}
+
+static int lpi_gpio_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	/* Every PIN is a group */
+	return pctldev->desc->npins;
+}
+
+static const char *lpi_gpio_get_group_name(struct pinctrl_dev *pctldev,
+					   unsigned int pin)
+{
+	return pctldev->desc->pins[pin].name;
+}
+
+static int lpi_gpio_get_group_pins(struct pinctrl_dev *pctldev,
+				   unsigned int pin,
+				   const unsigned int **pins,
+				   unsigned int *num_pins)
+{
+	*pins = &pctldev->desc->pins[pin].number;
+	*num_pins = 1;
+	return 0;
+}
+
+static const struct pinctrl_ops lpi_gpio_pinctrl_ops = {
+	.get_groups_count	= lpi_gpio_get_groups_count,
+	.get_group_name		= lpi_gpio_get_group_name,
+	.get_group_pins		= lpi_gpio_get_group_pins,
+	.dt_node_to_map		= pinconf_generic_dt_node_to_map_group,
+	.dt_free_map		= pinctrl_utils_free_map,
+};
+
+static int lpi_gpio_get_functions_count(struct pinctrl_dev *pctldev)
+{
+	return ARRAY_SIZE(lpi_gpio_functions);
+}
+
+static const char *lpi_gpio_get_function_name(struct pinctrl_dev *pctldev,
+					      unsigned int function)
+{
+	return lpi_gpio_functions[function];
+}
+
+static int lpi_gpio_get_function_groups(struct pinctrl_dev *pctldev,
+					unsigned int function,
+					const char *const **groups,
+					unsigned *const num_qgroups)
+{
+	*groups = lpi_gpio_groups;
+	*num_qgroups = pctldev->desc->npins;
+	return 0;
+}
+
+static int lpi_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
+			    unsigned int pin)
+{
+	struct lpi_gpio_pad *pad;
+	unsigned int val;
+
+	pad = pctldev->desc->pins[pin].drv_data;
+
+	pad->function = function;
+
+	val = lpi_gpio_read(pad, LPI_GPIO_REG_VAL_CTL);
+	val &= ~(LPI_GPIO_REG_FUNCTION_MASK);
+	val |= pad->function << LPI_GPIO_REG_FUNCTION_SHIFT;
+	lpi_gpio_write(pad, LPI_GPIO_REG_VAL_CTL, val);
+	return 0;
+}
+
+static const struct pinmux_ops lpi_gpio_pinmux_ops = {
+	.get_functions_count	= lpi_gpio_get_functions_count,
+	.get_function_name	= lpi_gpio_get_function_name,
+	.get_function_groups	= lpi_gpio_get_function_groups,
+	.set_mux		= lpi_gpio_set_mux,
+};
+
+static int lpi_config_get(struct pinctrl_dev *pctldev,
+			  unsigned int pin, unsigned long *config)
+{
+	unsigned int param = pinconf_to_config_param(*config);
+	struct lpi_gpio_pad *pad;
+	unsigned int arg;
+
+	pad = pctldev->desc->pins[pin].drv_data;
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_DISABLE:
+		arg = pad->pullup = LPI_GPIO_BIAS_DISABLE;
+		break;
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		arg = pad->pullup == LPI_GPIO_PULL_DOWN;
+		break;
+	case PIN_CONFIG_BIAS_BUS_HOLD:
+		arg = pad->pullup = LPI_GPIO_KEEPER;
+		break;
+	case PIN_CONFIG_BIAS_PULL_UP:
+		arg = pad->pullup == LPI_GPIO_PULL_UP;
+		break;
+	case PIN_CONFIG_INPUT_ENABLE:
+	case PIN_CONFIG_OUTPUT:
+		arg = pad->output_enabled;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*config = pinconf_to_config_packed(param, arg);
+	return 0;
+}
+
+static unsigned int lpi_drive_to_regval(u32 arg)
+{
+	return (arg/2 - 1);
+}
+
+static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+			  unsigned long *configs, unsigned int nconfs)
+{
+	struct lpi_gpio_pad *pad;
+	unsigned int param, arg;
+	int i, ret = 0, val;
+
+	pad = pctldev->desc->pins[pin].drv_data;
+
+	for (i = 0; i < nconfs; i++) {
+		param = pinconf_to_config_param(configs[i]);
+		arg = pinconf_to_config_argument(configs[i]);
+
+		dev_dbg(pctldev->dev, "%s: param: %d arg: %d pin: %d\n",
+			__func__, param, arg, pin);
+
+		switch (param) {
+		case PIN_CONFIG_BIAS_DISABLE:
+			pad->pullup = LPI_GPIO_BIAS_DISABLE;
+			break;
+		case PIN_CONFIG_BIAS_PULL_DOWN:
+			pad->pullup = LPI_GPIO_PULL_DOWN;
+			break;
+		case PIN_CONFIG_BIAS_BUS_HOLD:
+			pad->pullup = LPI_GPIO_KEEPER;
+			break;
+		case PIN_CONFIG_BIAS_PULL_UP:
+			pad->pullup = LPI_GPIO_PULL_UP;
+			break;
+		case PIN_CONFIG_INPUT_ENABLE:
+			pad->output_enabled = false;
+			break;
+		case PIN_CONFIG_OUTPUT:
+			pad->output_enabled = true;
+			pad->value = arg;
+			break;
+		case PIN_CONFIG_DRIVE_STRENGTH:
+			pad->strength = arg;
+			break;
+		default:
+			ret = -EINVAL;
+			goto done;
+		}
+	}
+
+	val = lpi_gpio_read(pad, LPI_GPIO_REG_VAL_CTL);
+	val &= ~(LPI_GPIO_REG_PULL_MASK | LPI_GPIO_REG_OUT_STRENGTH_MASK |
+		 LPI_GPIO_REG_OE_MASK);
+	val |= pad->pullup << LPI_GPIO_REG_PULL_SHIFT;
+	val |= lpi_drive_to_regval(pad->strength) <<
+		LPI_GPIO_REG_OUT_STRENGTH_SHIFT;
+	if (pad->output_enabled)
+		val |= pad->value << LPI_GPIO_REG_OE_SHIFT;
+
+	lpi_gpio_write(pad, LPI_GPIO_REG_VAL_CTL, val);
+	lpi_gpio_write(pad, LPI_GPIO_REG_DIR_CTL,
+		       pad->output_enabled << LPI_GPIO_REG_DIR_SHIFT);
+done:
+	return ret;
+}
+
+static const struct pinconf_ops lpi_gpio_pinconf_ops = {
+	.is_generic			= true,
+	.pin_config_group_get		= lpi_config_get,
+	.pin_config_group_set		= lpi_config_set,
+};
+
+static int lpi_gpio_direction_input(struct gpio_chip *chip, unsigned int pin)
+{
+	struct lpi_gpio_state *state = gpiochip_get_data(chip);
+	unsigned long config;
+
+	config = pinconf_to_config_packed(PIN_CONFIG_INPUT_ENABLE, 1);
+
+	return lpi_config_set(state->ctrl, pin, &config, 1);
+}
+
+static int lpi_gpio_direction_output(struct gpio_chip *chip,
+				     unsigned int pin, int val)
+{
+	struct lpi_gpio_state *state = gpiochip_get_data(chip);
+	unsigned long config;
+
+	config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, val);
+
+	return lpi_config_set(state->ctrl, pin, &config, 1);
+}
+
+static int lpi_gpio_get(struct gpio_chip *chip, unsigned int pin)
+{
+	struct lpi_gpio_state *state = gpiochip_get_data(chip);
+	struct lpi_gpio_pad *pad;
+	int value;
+
+	pad = state->ctrl->desc->pins[pin].drv_data;
+
+	value = lpi_gpio_read(pad, LPI_GPIO_REG_VAL_CTL);
+	return value;
+}
+
+static void lpi_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
+{
+	struct lpi_gpio_state *state = gpiochip_get_data(chip);
+	unsigned long config;
+
+	config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value);
+
+	lpi_config_set(state->ctrl, pin, &config, 1);
+}
+
+static int lpi_notifier_service_cb(struct notifier_block *this,
+				   unsigned long opcode, void *ptr)
+{
+	pr_debug("%s: Service opcode 0x%lx\n", __func__, opcode);
+
+	switch (opcode) {
+	case AUDIO_NOTIFIER_SERVICE_DOWN:
+		lpi_dev_up = false;
+		break;
+	case AUDIO_NOTIFIER_SERVICE_UP:
+		lpi_dev_up = true;
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block service_nb = {
+	.notifier_call  = lpi_notifier_service_cb,
+	.priority = -INT_MAX,
+};
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+static unsigned int lpi_regval_to_drive(u32 val)
+{
+	return (val + 1) * 2;
+}
+
+static void lpi_gpio_dbg_show_one(struct seq_file *s,
+				  struct pinctrl_dev *pctldev,
+				  struct gpio_chip *chip,
+				  unsigned int offset,
+				  unsigned int gpio)
+{
+	struct pinctrl_pin_desc pindesc;
+	struct lpi_gpio_pad *pad;
+	unsigned int func;
+	int is_out;
+	int drive;
+	int pull;
+	u32 ctl_reg;
+
+	static const char * const pulls[] = {
+		"no pull",
+		"pull down",
+		"keeper",
+		"pull up"
+	};
+
+	pindesc = pctldev->desc->pins[offset];
+	pad = pctldev->desc->pins[offset].drv_data;
+	ctl_reg = lpi_gpio_read(pad, LPI_GPIO_REG_DIR_CTL);
+	is_out = (ctl_reg & LPI_GPIO_REG_DIR_MASK) >> LPI_GPIO_REG_DIR_SHIFT;
+	ctl_reg = lpi_gpio_read(pad, LPI_GPIO_REG_VAL_CTL);
+
+	func = (ctl_reg & LPI_GPIO_REG_FUNCTION_MASK) >>
+		LPI_GPIO_REG_FUNCTION_SHIFT;
+	drive = (ctl_reg & LPI_GPIO_REG_OUT_STRENGTH_MASK) >>
+		 LPI_GPIO_REG_OUT_STRENGTH_SHIFT;
+	pull = (ctl_reg & LPI_GPIO_REG_PULL_MASK) >> LPI_GPIO_REG_PULL_SHIFT;
+
+	seq_printf(s, " %-8s: %-3s %d",
+		   pindesc.name, is_out ? "out" : "in", func);
+	seq_printf(s, " %dmA", lpi_regval_to_drive(drive));
+	seq_printf(s, " %s", pulls[pull]);
+}
+
+static void lpi_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+	unsigned int gpio = chip->base;
+	unsigned int i;
+
+	for (i = 0; i < chip->ngpio; i++, gpio++) {
+		lpi_gpio_dbg_show_one(s, NULL, chip, i, gpio);
+		seq_puts(s, "\n");
+	}
+}
+
+#else
+#define lpi_gpio_dbg_show NULL
+#endif
+
+static const struct gpio_chip lpi_gpio_template = {
+	.direction_input	= lpi_gpio_direction_input,
+	.direction_output	= lpi_gpio_direction_output,
+	.get			= lpi_gpio_get,
+	.set			= lpi_gpio_set,
+	.request		= gpiochip_generic_request,
+	.free			= gpiochip_generic_free,
+	.dbg_show		= lpi_gpio_dbg_show,
+};
+
+static int lpi_pinctrl_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct pinctrl_pin_desc *pindesc;
+	struct pinctrl_desc *pctrldesc;
+	struct lpi_gpio_pad *pad, *pads;
+	struct lpi_gpio_state *state;
+	int ret, npins, i;
+	char __iomem *lpi_base;
+	u32 reg;
+
+	ret = of_property_read_u32(dev->of_node, "reg", &reg);
+	if (ret < 0) {
+		dev_err(dev, "missing base address\n");
+		return ret;
+	}
+
+	ret = of_property_read_u32(dev->of_node, "qcom,num-gpios", &npins);
+	if (ret < 0)
+		return ret;
+
+	WARN_ON(npins > ARRAY_SIZE(lpi_gpio_groups));
+
+	state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, state);
+
+	state->dev = &pdev->dev;
+
+	pindesc = devm_kcalloc(dev, npins, sizeof(*pindesc), GFP_KERNEL);
+	if (!pindesc)
+		return -ENOMEM;
+
+	pads = devm_kcalloc(dev, npins, sizeof(*pads), GFP_KERNEL);
+	if (!pads)
+		return -ENOMEM;
+
+	pctrldesc = devm_kzalloc(dev, sizeof(*pctrldesc), GFP_KERNEL);
+	if (!pctrldesc)
+		return -ENOMEM;
+
+	pctrldesc->pctlops = &lpi_gpio_pinctrl_ops;
+	pctrldesc->pmxops = &lpi_gpio_pinmux_ops;
+	pctrldesc->confops = &lpi_gpio_pinconf_ops;
+	pctrldesc->owner = THIS_MODULE;
+	pctrldesc->name = dev_name(dev);
+	pctrldesc->pins = pindesc;
+	pctrldesc->npins = npins;
+
+	lpi_base = devm_ioremap(dev, reg, LPI_ADDRESS_SIZE);
+	if (lpi_base == NULL) {
+		dev_err(dev, "%s devm_ioremap failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	state->base = lpi_base;
+
+	for (i = 0; i < npins; i++, pindesc++) {
+		pad = &pads[i];
+		pindesc->drv_data = pad;
+		pindesc->number = i;
+		pindesc->name = lpi_gpio_groups[i];
+
+		pad->base = lpi_base;
+		pad->offset = lpi_offset[i];
+	}
+
+	state->chip = lpi_gpio_template;
+	state->chip.parent = dev;
+	state->chip.base = -1;
+	state->chip.ngpio = npins;
+	state->chip.label = dev_name(dev);
+	state->chip.of_gpio_n_cells = 2;
+	state->chip.can_sleep = false;
+
+	state->ctrl = devm_pinctrl_register(dev, pctrldesc, state);
+	if (IS_ERR(state->ctrl))
+		return PTR_ERR(state->ctrl);
+
+	ret = gpiochip_add_data(&state->chip, state);
+	if (ret) {
+		dev_err(state->dev, "can't add gpio chip\n");
+		goto err_chip;
+	}
+
+	ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, npins);
+	if (ret) {
+		dev_err(dev, "failed to add pin range\n");
+		goto err_range;
+	}
+
+	lpi_dev_up = true;
+	ret = audio_notifier_register("lpi_tlmm", AUDIO_NOTIFIER_ADSP_DOMAIN,
+				      &service_nb);
+	if (ret < 0) {
+		pr_err("%s: Audio notifier register failed ret = %d\n",
+			__func__, ret);
+		goto err_range;
+	}
+
+	return 0;
+
+err_range:
+	gpiochip_remove(&state->chip);
+err_chip:
+	return ret;
+}
+
+static int lpi_pinctrl_remove(struct platform_device *pdev)
+{
+	struct lpi_gpio_state *state = platform_get_drvdata(pdev);
+
+	gpiochip_remove(&state->chip);
+	return 0;
+}
+
+static const struct of_device_id lpi_pinctrl_of_match[] = {
+	{ .compatible = "qcom,lpi-pinctrl" }, /* Generic */
+	{ },
+};
+
+MODULE_DEVICE_TABLE(of, lpi_pinctrl_of_match);
+
+static struct platform_driver lpi_pinctrl_driver = {
+	.driver = {
+		   .name = "qcom-lpi-pinctrl",
+		   .of_match_table = lpi_pinctrl_of_match,
+	},
+	.probe = lpi_pinctrl_probe,
+	.remove = lpi_pinctrl_remove,
+};
+
+module_platform_driver(lpi_pinctrl_driver);
+
+MODULE_DESCRIPTION("QTI LPI GPIO pin control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index f9d483b..2a1367e 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -597,10 +597,6 @@
 
 	spin_lock_irqsave(&pctrl->lock, flags);
 
-	val = readl(pctrl->regs + g->intr_status_reg);
-	val &= ~BIT(g->intr_status_bit);
-	writel(val, pctrl->regs + g->intr_status_reg);
-
 	val = readl(pctrl->regs + g->intr_cfg_reg);
 	val |= BIT(g->intr_enable_bit);
 	writel(val, pctrl->regs + g->intr_cfg_reg);
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
index b237a6d..67adf58 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -270,6 +270,7 @@
 	PINCTRL_PIN(150, "SDC2_CLK"),
 	PINCTRL_PIN(151, "SDC2_CMD"),
 	PINCTRL_PIN(152, "SDC2_DATA"),
+	PINCTRL_PIN(153, "UFS_RESET"),
 };
 
 #define DECLARE_MSM_GPIO_PINS(pin) \
@@ -428,6 +429,7 @@
 static const unsigned int sdc2_clk_pins[] = { 150 };
 static const unsigned int sdc2_cmd_pins[] = { 151 };
 static const unsigned int sdc2_data_pins[] = { 152 };
+static const unsigned int ufs_reset_pins[] = { 153 };
 
 enum sdm845_functions {
 	msm_mux_gpio,
@@ -725,6 +727,15 @@
 	msm_mux_reserved79,
 	msm_mux_reserved80,
 	msm_mux_qup15,
+	msm_mux_reserved81,
+	msm_mux_reserved82,
+	msm_mux_reserved83,
+	msm_mux_reserved84,
+	msm_mux_pcie1_pwrfault,
+	msm_mux_qup5,
+	msm_mux_reserved85,
+	msm_mux_pcie1_mrl,
+	msm_mux_reserved86,
 	msm_mux_reserved87,
 	msm_mux_reserved88,
 	msm_mux_tsif1_clk,
@@ -749,15 +760,6 @@
 	msm_mux_vfr_1,
 	msm_mux_tgu_ch2,
 	msm_mux_reserved92,
-	msm_mux_reserved81,
-	msm_mux_reserved82,
-	msm_mux_reserved83,
-	msm_mux_reserved84,
-	msm_mux_pcie1_pwrfault,
-	msm_mux_qup5,
-	msm_mux_reserved85,
-	msm_mux_pcie1_mrl,
-	msm_mux_reserved86,
 	msm_mux_tsif2_clk,
 	msm_mux_sdc4_clk,
 	msm_mux_qup7,
@@ -1679,6 +1681,33 @@
 static const char * const qup15_groups[] = {
 	"gpio81", "gpio82", "gpio83", "gpio84",
 };
+static const char * const reserved81_groups[] = {
+	"gpio81",
+};
+static const char * const reserved82_groups[] = {
+	"gpio82",
+};
+static const char * const reserved83_groups[] = {
+	"gpio83",
+};
+static const char * const reserved84_groups[] = {
+	"gpio84",
+};
+static const char * const pcie1_pwrfault_groups[] = {
+	"gpio85",
+};
+static const char * const qup5_groups[] = {
+	"gpio85", "gpio86", "gpio87", "gpio88",
+};
+static const char * const reserved85_groups[] = {
+	"gpio85",
+};
+static const char * const pcie1_mrl_groups[] = {
+	"gpio86",
+};
+static const char * const reserved86_groups[] = {
+	"gpio86",
+};
 static const char * const reserved87_groups[] = {
 	"gpio87",
 };
@@ -1751,33 +1780,6 @@
 static const char * const reserved92_groups[] = {
 	"gpio92",
 };
-static const char * const reserved81_groups[] = {
-	"gpio81",
-};
-static const char * const reserved82_groups[] = {
-	"gpio82",
-};
-static const char * const reserved83_groups[] = {
-	"gpio83",
-};
-static const char * const reserved84_groups[] = {
-	"gpio84",
-};
-static const char * const pcie1_pwrfault_groups[] = {
-	"gpio85",
-};
-static const char * const qup5_groups[] = {
-	"gpio85", "gpio86", "gpio87", "gpio88",
-};
-static const char * const reserved85_groups[] = {
-	"gpio85",
-};
-static const char * const pcie1_mrl_groups[] = {
-	"gpio86",
-};
-static const char * const reserved86_groups[] = {
-	"gpio86",
-};
 static const char * const tsif2_clk_groups[] = {
 	"gpio93",
 };
@@ -2111,6 +2113,15 @@
 	FUNCTION(reserved79),
 	FUNCTION(reserved80),
 	FUNCTION(qup15),
+	FUNCTION(reserved81),
+	FUNCTION(reserved82),
+	FUNCTION(reserved83),
+	FUNCTION(reserved84),
+	FUNCTION(pcie1_pwrfault),
+	FUNCTION(qup5),
+	FUNCTION(reserved85),
+	FUNCTION(pcie1_mrl),
+	FUNCTION(reserved86),
 	FUNCTION(reserved87),
 	FUNCTION(reserved88),
 	FUNCTION(tsif1_clk),
@@ -2135,15 +2146,6 @@
 	FUNCTION(vfr_1),
 	FUNCTION(tgu_ch2),
 	FUNCTION(reserved92),
-	FUNCTION(reserved81),
-	FUNCTION(reserved82),
-	FUNCTION(reserved83),
-	FUNCTION(reserved84),
-	FUNCTION(pcie1_pwrfault),
-	FUNCTION(qup5),
-	FUNCTION(reserved85),
-	FUNCTION(pcie1_mrl),
-	FUNCTION(reserved86),
 	FUNCTION(tsif2_clk),
 	FUNCTION(sdc4_clk),
 	FUNCTION(qup7),
@@ -2416,9 +2418,10 @@
 	PINGROUP(147, NORTH, NA, NA, reserved147, NA, NA, NA, NA, NA, NA),
 	PINGROUP(148, NORTH, NA, reserved148, NA, NA, NA, NA, NA, NA, NA),
 	PINGROUP(149, NORTH, NA, reserved149, NA, NA, NA, NA, NA, NA, NA),
-	SDC_QDSD_PINGROUP(sdc2_clk, 0x59a000, 14, 6),
-	SDC_QDSD_PINGROUP(sdc2_cmd, 0x59a000, 11, 3),
-	SDC_QDSD_PINGROUP(sdc2_data, 0x59a000, 9, 0),
+	SDC_QDSD_PINGROUP(sdc2_clk, 0x99a000, 14, 6),
+	SDC_QDSD_PINGROUP(sdc2_cmd, 0x99a000, 11, 3),
+	SDC_QDSD_PINGROUP(sdc2_data, 0x99a000, 9, 0),
+	UFS_RESET(ufs_reset, 0x99f000),
 };
 
 static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 77e9dd7..f06fb1f 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -138,6 +138,7 @@
  * struct pmic_gpio_pad - keep current GPIO settings
  * @base: Address base in SPMI device.
  * @irq: IRQ number which this GPIO generate.
+ * @gpio_idx: The index in GPIO's hardware number space (1-based)
  * @is_enabled: Set to false when GPIO should be put in high Z state.
  * @out_value: Cached pin output value
  * @have_buffer: Set to true if GPIO output could be configured in push-pull,
@@ -158,6 +159,7 @@
 struct pmic_gpio_pad {
 	u16		base;
 	int		irq;
+	int		gpio_idx;
 	bool		is_enabled;
 	bool		out_value;
 	bool		have_buffer;
@@ -179,6 +181,7 @@
 	struct regmap	*map;
 	struct pinctrl_dev *ctrl;
 	struct gpio_chip chip;
+	const char **gpio_groups;
 };
 
 static const struct pinconf_generic_params pmic_gpio_bindings[] = {
@@ -297,7 +300,9 @@
 					 const char *const **groups,
 					 unsigned *const num_qgroups)
 {
-	*groups = pmic_gpio_groups;
+	struct pmic_gpio_state *state = pinctrl_dev_get_drvdata(pctldev);
+
+	*groups = state->gpio_groups;
 	*num_qgroups = pctldev->desc->npins;
 	return 0;
 }
@@ -637,7 +642,7 @@
 
 	pad = pctldev->desc->pins[pin].drv_data;
 
-	seq_printf(s, " gpio%-2d:", pin + PMIC_GPIO_PHYSICAL_OFFSET);
+	seq_printf(s, " gpio%-2d:", pad->gpio_idx);
 
 	val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_EN_CTL);
 
@@ -742,13 +747,29 @@
 			      const struct of_phandle_args *gpio_desc,
 			      u32 *flags)
 {
+	int i;
+	struct pmic_gpio_state *state = gpiochip_get_data(chip);
+	struct pinctrl_desc *desc = state->ctrl->desc;
+	struct pmic_gpio_pad *pad;
+
 	if (chip->of_gpio_n_cells < 2)
 		return -EINVAL;
 
 	if (flags)
 		*flags = gpio_desc->args[1];
 
-	return gpio_desc->args[0] - PMIC_GPIO_PHYSICAL_OFFSET;
+	for (i = 0; i < chip->ngpio; i++) {
+		pad = desc->pins[i].drv_data;
+		if (pad->gpio_idx == gpio_desc->args[0]) {
+			dev_dbg(state->dev, "gpio%-2d xlate to pin%-2d\n",
+						gpio_desc->args[0], i);
+			return i;
+		}
+	}
+
+	dev_err(state->dev, "Couldn't find pin for gpio %d\n",
+				gpio_desc->args[0]);
+	return -ENODEV;
 }
 
 static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
@@ -934,43 +955,124 @@
 	struct pinctrl_desc *pctrldesc;
 	struct pmic_gpio_pad *pad, *pads;
 	struct pmic_gpio_state *state;
-	int ret, npins, i;
-	u32 reg;
+	int ret, npins, ngpios, i, j, pin_idx;
+	int disallowed_count = 0;
+	u32 reg[2], start, size;
+	u32 *disallowed = NULL;
 
-	ret = of_property_read_u32(dev->of_node, "reg", &reg);
+	ret = of_property_read_u32_array(dev->of_node, "reg", reg, 2);
 	if (ret < 0) {
-		dev_err(dev, "missing base address");
+		dev_err(dev, "reg property reading failed\n");
 		return ret;
 	}
+	start = reg[0];
+	size = reg[1];
 
-	npins = platform_irq_count(pdev);
-	if (!npins)
+	ngpios = size / PMIC_GPIO_ADDRESS_RANGE;
+	if (ngpios == 0) {
+		dev_err(dev, "no gpios assigned\n");
+		return -ENODEV;
+	}
+
+	if (ngpios > ARRAY_SIZE(pmic_gpio_groups)) {
+		dev_err(dev, "reg property defines %d gpios, but only %d are allowed\n",
+				ngpios, (int)ARRAY_SIZE(pmic_gpio_groups));
 		return -EINVAL;
-	if (npins < 0)
-		return npins;
+	}
 
-	BUG_ON(npins > ARRAY_SIZE(pmic_gpio_groups));
+	if (of_find_property(dev->of_node, "qcom,gpios-disallowed",
+					&disallowed_count)) {
+		disallowed_count /= sizeof(u32);
+		if (disallowed_count == 0) {
+			dev_err(dev, "No data in gpios-disallowed\n");
+			return -EINVAL;
+		}
+
+		disallowed = kcalloc(disallowed_count, sizeof(u32), GFP_KERNEL);
+		if (disallowed == NULL)
+			return -ENOMEM;
+
+		ret = of_property_read_u32_array(dev->of_node,
+				"qcom,gpios-disallowed",
+				disallowed, disallowed_count);
+		if (ret < 0) {
+			dev_err(dev, "qcom,gpios-disallowed property reading failed, ret=%d\n",
+								ret);
+			goto err_free;
+		}
+
+		for (i = 0; i < disallowed_count; i++) {
+			if (disallowed[i] >= ngpios + PMIC_GPIO_PHYSICAL_OFFSET
+				|| disallowed[i] < PMIC_GPIO_PHYSICAL_OFFSET) {
+				dev_err(dev, "invalid gpio = %d specified in qcom,gpios-disallowed, supported values: %d to %d\n",
+					disallowed[i],
+					PMIC_GPIO_PHYSICAL_OFFSET,
+					ngpios - 1 + PMIC_GPIO_PHYSICAL_OFFSET);
+				ret = -EINVAL;
+				goto err_free;
+			}
+			for (j = 0; j < i; j++) {
+				if (disallowed[i] == disallowed[j]) {
+					dev_err(dev, "duplicate gpio = %d listed in qcom,gpios-disallowed\n",
+							disallowed[i]);
+					ret = -EINVAL;
+					goto err_free;
+				}
+			}
+			dev_dbg(dev, "gpio %d NOT supported\n", disallowed[i]);
+		}
+	} else {
+		disallowed_count = 0;
+	}
+
+	npins = ngpios - disallowed_count;
+	if (npins <= 0) {
+		dev_err(dev, "No pins assigned\n");
+		ret = -ENODEV;
+		goto err_free;
+	}
+	if (platform_irq_count(pdev) != npins) {
+		dev_err(dev, "%d IRQs defined but %d expected\n",
+				platform_irq_count(pdev), npins);
+		ret = -EINVAL;
+		goto err_free;
+	}
 
 	state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
-	if (!state)
-		return -ENOMEM;
+	if (!state) {
+		ret = -ENOMEM;
+		goto err_free;
+	}
 
 	platform_set_drvdata(pdev, state);
 
 	state->dev = &pdev->dev;
 	state->map = dev_get_regmap(dev->parent, NULL);
 
+	state->gpio_groups = devm_kcalloc(dev, sizeof(*state->gpio_groups),
+						npins, GFP_KERNEL);
+	if (!state->gpio_groups) {
+		ret = -ENOMEM;
+		goto err_free;
+	}
+
 	pindesc = devm_kcalloc(dev, npins, sizeof(*pindesc), GFP_KERNEL);
-	if (!pindesc)
-		return -ENOMEM;
+	if (!pindesc) {
+		ret = -ENOMEM;
+		goto err_free;
+	}
 
 	pads = devm_kcalloc(dev, npins, sizeof(*pads), GFP_KERNEL);
-	if (!pads)
-		return -ENOMEM;
+	if (!pads) {
+		ret = -ENOMEM;
+		goto err_free;
+	}
 
 	pctrldesc = devm_kzalloc(dev, sizeof(*pctrldesc), GFP_KERNEL);
-	if (!pctrldesc)
-		return -ENOMEM;
+	if (!pctrldesc) {
+		ret = -ENOMEM;
+		goto err_free;
+	}
 
 	pctrldesc->pctlops = &pmic_gpio_pinctrl_ops;
 	pctrldesc->pmxops = &pmic_gpio_pinmux_ops;
@@ -984,22 +1086,42 @@
 #ifdef CONFIG_DEBUG_FS
 	pctrldesc->custom_conf_items = pmic_conf_items;
 #endif
+	for (pin_idx = 0, i = 0; i < ngpios; i++) {
+		for (j = 0; j < disallowed_count; j++) {
+			if (i + PMIC_GPIO_PHYSICAL_OFFSET == disallowed[j])
+				break;
+		}
+		if (j != disallowed_count)
+			continue;
 
-	for (i = 0; i < npins; i++, pindesc++) {
-		pad = &pads[i];
+		pad = &pads[pin_idx];
 		pindesc->drv_data = pad;
-		pindesc->number = i;
+		pindesc->number = pin_idx;
 		pindesc->name = pmic_gpio_groups[i];
 
-		pad->irq = platform_get_irq(pdev, i);
-		if (pad->irq < 0)
-			return pad->irq;
+		pad->gpio_idx = i + PMIC_GPIO_PHYSICAL_OFFSET;
+		pad->irq = platform_get_irq(pdev, pin_idx);
+		if (pad->irq < 0) {
+			dev_err(state->dev,
+				"failed to get irq for gpio %d (pin %d), ret=%d\n",
+					pad->gpio_idx, pin_idx, pad->irq);
+			ret = pad->irq;
+			goto err_free;
+		}
+		/* Every pin is a group */
+		state->gpio_groups[pin_idx] = pmic_gpio_groups[i];
 
-		pad->base = reg + i * PMIC_GPIO_ADDRESS_RANGE;
+		pad->base = start + i * PMIC_GPIO_ADDRESS_RANGE;
 
 		ret = pmic_gpio_populate(state, pad);
-		if (ret < 0)
-			return ret;
+		if (ret < 0) {
+			dev_err(state->dev,
+				"failed to populate gpio %d, ret=%d\n",
+							i, ret);
+			goto err_free;
+		}
+		pindesc++;
+		pin_idx++;
 	}
 
 	state->chip = pmic_gpio_gpio_template;
@@ -1011,25 +1133,29 @@
 	state->chip.can_sleep = false;
 
 	state->ctrl = devm_pinctrl_register(dev, pctrldesc, state);
-	if (IS_ERR(state->ctrl))
-		return PTR_ERR(state->ctrl);
+	if (IS_ERR(state->ctrl)) {
+		ret = PTR_ERR(state->ctrl);
+		dev_err(state->dev, "failed to register pinctrl device, ret=%d\n",
+							ret);
+		goto err_free;
+	}
 
 	ret = gpiochip_add_data(&state->chip, state);
 	if (ret) {
-		dev_err(state->dev, "can't add gpio chip\n");
-		return ret;
+		dev_err(state->dev, "can't add gpio chip, ret=%d\n", ret);
+		goto err_free;
 	}
 
 	ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, npins);
 	if (ret) {
-		dev_err(dev, "failed to add pin range\n");
-		goto err_range;
+		dev_err(dev, "failed to add pin range\n, ret=%d\n", ret);
+		gpiochip_remove(&state->chip);
+		goto err_free;
 	}
 
-	return 0;
+err_free:
+	kfree(disallowed);
 
-err_range:
-	gpiochip_remove(&state->chip);
 	return ret;
 }
 
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index fff8966..5fdb4e9 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -415,6 +415,16 @@
 {
 	uint32_t val;
 
+	/*
+	 * allocate new events for this channel first
+	 * before submitting the new TREs.
+	 * for TO_GSI channels the event ring doorbell is rang as part of
+	 * interrupt handling.
+	 */
+	if (ctx->evtr && ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
+		gsi_ring_evt_doorbell(ctx->evtr);
+	ctx->ring.wp = ctx->ring.wp_local;
+
 	/* write order MUST be MSB followed by LSB */
 	val = ((ctx->ring.wp_local >> 32) &
 			GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
@@ -470,8 +480,8 @@
 			cntr = 0;
 			rp = gsi_readl(gsi_ctx->base +
 				GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(i, ee));
-			rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
-				GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(i, ee))) << 32;
+			rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
+
 			ctx->ring.rp = rp;
 			while (ctx->ring.rp_local != rp) {
 				++cntr;
@@ -1529,6 +1539,7 @@
 static int gsi_validate_channel_props(struct gsi_chan_props *props)
 {
 	uint64_t ra;
+	uint64_t last;
 
 	if (props->ch_id >= gsi_ctx->max_ch) {
 		GSIERR("ch_id %u invalid\n", props->ch_id);
@@ -1556,6 +1567,17 @@
 		return -GSI_STATUS_INVALID_PARAMS;
 	}
 
+	last = props->ring_base_addr + props->ring_len - props->re_size;
+
+	/* MSB should stay same within the ring */
+	if ((props->ring_base_addr & 0xFFFFFFFF00000000ULL) !=
+	    (last & 0xFFFFFFFF00000000ULL)) {
+		GSIERR("MSB is not fixed on ring base 0x%llx size 0x%x\n",
+			props->ring_base_addr,
+			props->ring_len);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
 	if (props->prot == GSI_CHAN_PROT_GPI &&
 			!props->ring_base_vaddr) {
 		GSIERR("protocol %u requires ring base VA\n", props->prot);
@@ -2128,29 +2150,22 @@
 		uint16_t *num_free_re)
 {
 	uint16_t start;
-	uint16_t start_hw;
 	uint16_t end;
 	uint64_t rp;
-	uint64_t rp_hw;
 	int ee = gsi_ctx->per.ee;
 	uint16_t used;
-	uint16_t used_hw;
-
-	rp_hw = gsi_readl(gsi_ctx->base +
-		GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
-	rp_hw |= ((uint64_t)gsi_readl(gsi_ctx->base +
-		GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee)))
-		<< 32;
 
 	if (!ctx->evtr) {
-		rp = rp_hw;
+		rp = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
+		rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
+
 		ctx->ring.rp = rp;
 	} else {
 		rp = ctx->ring.rp_local;
 	}
 
 	start = gsi_find_idx_from_addr(&ctx->ring, rp);
-	start_hw = gsi_find_idx_from_addr(&ctx->ring, rp_hw);
 	end = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
 
 	if (end >= start)
@@ -2158,13 +2173,7 @@
 	else
 		used = ctx->ring.max_num_elem + 1 - (start - end);
 
-	if (end >= start_hw)
-		used_hw = end - start_hw;
-	else
-		used_hw = ctx->ring.max_num_elem + 1 - (start_hw - end);
-
 	*num_free_re = ctx->ring.max_num_elem - used;
-	gsi_update_ch_dp_stats(ctx, used_hw);
 }
 
 int gsi_query_channel_info(unsigned long chan_hdl,
@@ -2274,14 +2283,12 @@
 
 	rp = gsi_readl(gsi_ctx->base +
 		GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
-	rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
-		GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee))) << 32;
+	rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
 	ctx->ring.rp = rp;
 
 	wp = gsi_readl(gsi_ctx->base +
 		GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee));
-	wp |= ((uint64_t)gsi_readl(gsi_ctx->base +
-		GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(ctx->props.ch_id, ee))) << 32;
+	wp |= ctx->ring.wp & 0xFFFFFFFF00000000;
 	ctx->ring.wp = wp;
 
 	if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
@@ -2353,6 +2360,8 @@
 			tre.re_type = GSI_RE_XFER;
 		} else if (xfer[i].type == GSI_XFER_ELEM_IMME_CMD) {
 			tre.re_type = GSI_RE_IMMD_CMD;
+		} else if (xfer[i].type == GSI_XFER_ELEM_NOP) {
+			tre.re_type = GSI_RE_NOP;
 		} else {
 			GSIERR("chan_hdl=%lu bad RE type=%u\n", chan_hdl,
 				xfer[i].type);
@@ -2420,6 +2429,9 @@
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
 
+	if (ctx->ring.wp == ctx->ring.wp_local)
+		return GSI_STATUS_SUCCESS;
+
 	gsi_ring_chan_doorbell(ctx);
 
 	return GSI_STATUS_SUCCESS;
@@ -2457,19 +2469,22 @@
 	}
 
 	spin_lock_irqsave(&ctx->evtr->ring.slock, flags);
-	rp = gsi_readl(gsi_ctx->base +
-		GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
-	rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
-		GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(ctx->evtr->id, ee))) << 32;
-	ctx->evtr->ring.rp = rp;
-	if (rp == ctx->evtr->ring.rp_local) {
+	if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) {
+		/* update rp to see of we have anything new to process */
+		rp = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
+		rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
+
+		ctx->evtr->ring.rp = rp;
+	}
+
+	if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) {
 		spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags);
 		ctx->stats.poll_empty++;
 		return GSI_STATUS_POLL_EMPTY;
 	}
 
 	gsi_process_evt_re(ctx->evtr, notify, false);
-	gsi_ring_evt_doorbell(ctx->evtr);
 	spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags);
 	ctx->stats.poll_ok++;
 
@@ -2839,10 +2854,8 @@
 
 	gsi_ctx->ipc_logbuf = ipc_log_context_create(GSI_IPC_LOG_PAGES,
 		"gsi", 0);
-	if (gsi_ctx->ipc_logbuf == NULL) {
-		GSIERR("failed to get ipc_logbuf\n");
-		return -ENOMEM;
-	}
+	if (gsi_ctx->ipc_logbuf == NULL)
+		GSIERR("failed to create IPC log, continue...\n");
 
 	gsi_ctx->dev = dev;
 	init_completion(&gsi_ctx->gen_ee_cmd_compl);
diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h
index f53a4bd..32fb178 100644
--- a/drivers/platform/msm/gsi/gsi.h
+++ b/drivers/platform/msm/gsi/gsi.h
@@ -209,6 +209,7 @@
 enum gsi_re_type {
 	GSI_RE_XFER = 0x2,
 	GSI_RE_IMMD_CMD = 0x3,
+	GSI_RE_NOP = 0x4,
 };
 
 struct __packed gsi_tre {
diff --git a/drivers/platform/msm/gsi/gsi_dbg.c b/drivers/platform/msm/gsi/gsi_dbg.c
index 717c8917..b1d1dfa 100644
--- a/drivers/platform/msm/gsi/gsi_dbg.c
+++ b/drivers/platform/msm/gsi/gsi_dbg.c
@@ -490,11 +490,6 @@
 		goto error;
 	}
 
-	if (gsi_ctx->chan[ch_id].props.prot == GSI_CHAN_PROT_GPI) {
-		TERR("valid for non GPI channels only\n");
-		goto error;
-	}
-
 	if (gsi_ctx->chan[ch_id].enable_dp_stats == enable) {
 		TERR("ch_%d: already enabled/disabled\n", ch_id);
 		return -EFAULT;
@@ -631,7 +626,7 @@
 	else
 		used_hw = ctx->ring.max_num_elem + 1 - (start_hw - end_hw);
 
-	TERR("ch %d used %d\n", ctx->props.ch_id, used_hw);
+	TDBG("ch %d used %d\n", ctx->props.ch_id, used_hw);
 	gsi_update_ch_dp_stats(ctx, used_hw);
 }
 
@@ -641,7 +636,6 @@
 
 	for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) {
 		if (gsi_ctx->chan[ch_id].allocated &&
-		    gsi_ctx->chan[ch_id].props.prot != GSI_CHAN_PROT_GPI &&
 		    gsi_ctx->chan[ch_id].enable_dp_stats)
 			gsi_dbg_update_ch_dp_stats(&gsi_ctx->chan[ch_id]);
 	}
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 6dd371e..d45fa51 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -1635,6 +1635,25 @@
 EXPORT_SYMBOL(ipa_get_smem_restr_bytes);
 
 /**
+ * ipa_broadcast_wdi_quota_reach_ind() - quota reach
+ * @uint32_t fid: [in] input netdev ID
+ * @uint64_t num_bytes: [in] used bytes
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid,
+		uint64_t num_bytes)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_broadcast_wdi_quota_reach_ind,
+		fid, num_bytes);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_broadcast_wdi_quota_reach_ind);
+
+/**
  * ipa_uc_wdi_get_dbpa() - To retrieve
  * doorbell physical address of wlan pipes
  * @param:  [in/out] input/output parameters
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index 1b8e3d6..bfe1608 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -183,6 +183,9 @@
 
 	u16 (*ipa_get_smem_restr_bytes)(void);
 
+	int (*ipa_broadcast_wdi_quota_reach_ind)(uint32_t fid,
+		uint64_t num_bytes);
+
 	int (*ipa_uc_wdi_get_dbpa)(struct ipa_wdi_db_params *out);
 
 	int (*ipa_uc_reg_rdyCB)(struct ipa_wdi_uc_ready_params *param);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 964d6c8..3dca3e6 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -18,6 +18,7 @@
 #include "ipa_i.h"
 #include "ipa_trace.h"
 
+#define IPA_WAN_AGGR_PKT_CNT 5
 #define IPA_LAST_DESC_CNT 0xFFFF
 #define POLLING_INACTIVITY_RX 40
 #define POLLING_INACTIVITY_TX 40
@@ -1099,16 +1100,18 @@
 			break;
 
 		ipa_wq_rx_common(ep->sys, iov.size);
-		cnt += 5;
+		cnt += IPA_WAN_AGGR_PKT_CNT;
 	};
 
-	if (cnt == 0) {
+	if (cnt == 0 || cnt < weight) {
 		ep->inactive_cycles++;
 		ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
 
 		if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
 			ep->switch_to_intr = true;
 			delay = 0;
+		} else if (cnt < weight) {
+			delay = 0;
 		}
 		queue_delayed_work(ep->sys->wq,
 			&ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
@@ -3176,14 +3179,9 @@
 				sys->repl_hdlr =
 				   ipa_replenish_rx_cache;
 			}
-			if (in->napi_enabled) {
-				sys->rx_pool_sz =
-					   IPA_WAN_NAPI_CONS_RX_POOL_SZ;
-				if (in->recycle_enabled) {
-					sys->repl_hdlr =
-					   ipa_replenish_rx_cache_recycle;
-				}
-			}
+			if (in->napi_enabled && in->recycle_enabled)
+				sys->repl_hdlr =
+					ipa_replenish_rx_cache_recycle;
 			sys->ep->wakelock_client =
 			   IPA_WAKELOCK_REF_CLIENT_WAN_RX;
 			in->ipa_ep_cfg.aggr.aggr_sw_eof_active
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index 672c620..cd575fe 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -51,8 +51,6 @@
 #define IPA_UC_FINISH_MAX 6
 #define IPA_UC_WAIT_MIN_SLEEP 1000
 #define IPA_UC_WAII_MAX_SLEEP 1200
-#define IPA_WAN_NAPI_CONS_RX_POOL_SZ (IPA_GENERIC_RX_POOL_SZ*3)
-#define IPA_WAN_CONS_DESC_FIFO_SZ (IPA_SYS_DESC_FIFO_SZ*3)
 
 #define IPA_MAX_STATUS_STAT_NUM 30
 
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 560ffda..0af9387 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -64,6 +64,7 @@
 #define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
 
 #define NAPI_WEIGHT 60
+#define IPA_WWAN_CONS_DESC_FIFO_SZ 1024
 
 static struct net_device *ipa_netdevs[IPA_WWAN_DEVICE_COUNT];
 static struct ipa_sys_connect_params apps_to_ipa_ep_cfg, ipa_to_apps_ep_cfg;
@@ -102,6 +103,7 @@
 	bool ipa_loaduC;
 	bool ipa_advertise_sg_support;
 	bool ipa_napi_enable;
+	u32 wan_rx_desc_size;
 };
 
 static struct ipa_rmnet_plat_drv_res ipa_rmnet_res;
@@ -1072,6 +1074,8 @@
 		IPAWANDBG
 		("SW filtering out none QMAP packet received from %s",
 		current->comm);
+		dev_kfree_skb_any(skb);
+		dev->stats.tx_dropped++;
 		return NETDEV_TX_OK;
 	}
 
@@ -1113,6 +1117,8 @@
 	if (ret) {
 		pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
 		       dev->name, ret);
+		dev_kfree_skb_any(skb);
+		dev->stats.tx_dropped++;
 		return -EFAULT;
 	}
 	/* IPA_RM checking end */
@@ -1128,7 +1134,6 @@
 
 	if (ret) {
 		ret = NETDEV_TX_BUSY;
-		dev->stats.tx_dropped++;
 		goto out;
 	}
 
@@ -1307,10 +1312,8 @@
 	ipa_to_apps_ep_cfg.priv = dev;
 
 	ipa_to_apps_ep_cfg.napi_enabled = ipa_rmnet_res.ipa_napi_enable;
-	if (ipa_to_apps_ep_cfg.napi_enabled)
-		ipa_to_apps_ep_cfg.desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
-	else
-		ipa_to_apps_ep_cfg.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	ipa_to_apps_ep_cfg.desc_fifo_sz =
+		ipa_rmnet_res.wan_rx_desc_size * sizeof(struct sps_iovec);
 
 	mutex_lock(&ipa_to_apps_pipe_handle_guard);
 	if (atomic_read(&is_ssr)) {
@@ -1941,6 +1944,9 @@
 static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
 		struct ipa_rmnet_plat_drv_res *ipa_rmnet_drv_res)
 {
+	int result;
+
+	ipa_rmnet_drv_res->wan_rx_desc_size = IPA_WWAN_CONS_DESC_FIFO_SZ;
 	ipa_rmnet_drv_res->ipa_rmnet_ssr =
 			of_property_read_bool(pdev->dev.of_node,
 			"qcom,rmnet-ipa-ssr");
@@ -1963,6 +1969,18 @@
 			"qcom,ipa-napi-enable");
 	pr_info("IPA Napi Enable = %s\n",
 		ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
+
+	/* Get IPA WAN RX desc fifo size */
+	result = of_property_read_u32(pdev->dev.of_node,
+			"qcom,wan-rx-desc-size",
+			&ipa_rmnet_drv_res->wan_rx_desc_size);
+	if (result)
+		pr_info("using default for wan-rx-desc-size = %u\n",
+				ipa_rmnet_drv_res->wan_rx_desc_size);
+	else
+		IPAWANDBG(": found ipa_drv_res->wan-rx-desc-size = %u\n",
+				ipa_rmnet_drv_res->wan_rx_desc_size);
+
 	return 0;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 20b73d8..ca63518 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -2245,7 +2245,8 @@
 			reg_write.pipeline_clear_options =
 				IPAHAL_HPS_CLEAR;
 			reg_write.offset =
-				ipahal_get_reg_ofst(IPA_ENDP_STATUS_n);
+				ipahal_get_reg_n_ofst(IPA_ENDP_STATUS_n,
+					ep_idx);
 			ipahal_get_status_ep_valmask(
 				ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS),
 				&valmask);
@@ -2845,6 +2846,13 @@
 		}
 	}
 
+	/* allocate the common PROD event ring */
+	if (ipa3_alloc_common_event_ring()) {
+		IPAERR("ipa3_alloc_common_event_ring failed.\n");
+		result = -EPERM;
+		goto fail_ch20_wa;
+	}
+
 	/* CMD OUT (AP->IPA) */
 	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
 	sys_in.client = IPA_CLIENT_APPS_CMD_PROD;
@@ -4239,6 +4247,52 @@
 	return 0;
 }
 
+static int ipa3_alloc_pkt_init(void)
+{
+	struct ipa_mem_buffer mem;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipahal_imm_cmd_ip_packet_init cmd = {0};
+	int i;
+
+	cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
+		&cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct IMM cmd\n");
+		return -ENOMEM;
+	}
+
+	mem.size = cmd_pyld->len * ipa3_ctx->ipa_num_pipes;
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
+		&mem.phys_base, GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
+		ipahal_destroy_imm_cmd(cmd_pyld);
+		return -ENOMEM;
+	}
+	ipahal_destroy_imm_cmd(cmd_pyld);
+
+	memset(mem.base, 0, mem.size);
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		cmd.destination_pipe_index = i;
+		cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
+			&cmd, false);
+		if (!cmd_pyld) {
+			IPAERR("failed to construct IMM cmd\n");
+			dma_free_coherent(ipa3_ctx->pdev,
+				mem.size,
+				mem.base,
+				mem.phys_base);
+			return -ENOMEM;
+		}
+		memcpy(mem.base + i * cmd_pyld->len, cmd_pyld->data,
+			cmd_pyld->len);
+		ipa3_ctx->pkt_init_imm[i] = mem.phys_base + i * cmd_pyld->len;
+		ipahal_destroy_imm_cmd(cmd_pyld);
+	}
+
+	return 0;
+}
+
 /**
 * ipa3_pre_init() - Initialize the IPA Driver.
 * This part contains all initialization which doesn't require IPA HW, such
@@ -4648,6 +4702,13 @@
 		goto fail_create_apps_resource;
 	}
 
+	result = ipa3_alloc_pkt_init();
+	if (result) {
+		IPAERR("Failed to alloc pkt_init payload\n");
+		result = -ENODEV;
+		goto fail_create_apps_resource;
+	}
+
 	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)
 		ipa3_enable_dcd();
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 9bb3e0e..62e68dd 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -21,6 +21,7 @@
 #include "ipahal/ipahal.h"
 #include "ipahal/ipahal_fltrt.h"
 
+#define IPA_WAN_AGGR_PKT_CNT 5
 #define IPA_LAST_DESC_CNT 0xFFFF
 #define POLLING_INACTIVITY_RX 40
 #define POLLING_MIN_SLEEP_RX 1010
@@ -60,7 +61,6 @@
 #define IPA_ODU_RX_POOL_SZ 64
 #define IPA_SIZE_DL_CSUM_META_TRAILER 8
 
-#define IPA_GSI_EVT_RING_LEN 4096
 #define IPA_GSI_MAX_CH_LOW_WEIGHT 15
 #define IPA_GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
 
@@ -69,12 +69,9 @@
 #define IPA_GSI_CH_20_WA_VIRT_CHAN 29
 
 #define IPA_DEFAULT_SYS_YELLOW_WM 32
+#define IPA_REPL_XFER_THRESH 10
 
-/*
- * The transport descriptor size was changed to GSI_CHAN_RE_SIZE_16B, but
- * IPA users still use sps_iovec size as FIFO element size.
- */
-#define IPA_FIFO_ELEMENT_SIZE 8
+#define IPA_TX_SEND_COMPL_NOP_DELAY_NS (2 * 1000 * 1000)
 
 static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
 static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
@@ -183,106 +180,64 @@
 {
 	struct ipa3_tx_pkt_wrapper *tx_pkt;
 	struct ipa3_sys_context *sys;
+	struct ipa3_tx_pkt_wrapper *this_pkt;
 
 	tx_pkt = container_of(work, struct ipa3_tx_pkt_wrapper, work);
 	sys = tx_pkt->sys;
-
+	spin_lock_bh(&sys->spinlock);
+	this_pkt = list_first_entry(&sys->head_desc_list,
+		struct ipa3_tx_pkt_wrapper, link);
+	while (tx_pkt != this_pkt) {
+		spin_unlock_bh(&sys->spinlock);
+		ipa3_wq_write_done_common(sys, this_pkt);
+		spin_lock_bh(&sys->spinlock);
+		this_pkt = list_first_entry(&sys->head_desc_list,
+			struct ipa3_tx_pkt_wrapper, link);
+	}
+	spin_unlock_bh(&sys->spinlock);
 	ipa3_wq_write_done_common(sys, tx_pkt);
 }
 
-/**
- * ipa3_send_one() - Send a single descriptor
- * @sys:	system pipe context
- * @desc:	descriptor to send
- * @in_atomic:  whether caller is in atomic context
- *
- * - Allocate tx_packet wrapper
- * - transfer data to the IPA
- * - after the transfer was done the user will be notified via provided
- *   callback
- *
- * Return codes: 0: success, -EFAULT: failure
- */
-int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc,
-		bool in_atomic)
+
+static void ipa3_send_nop_desc(struct work_struct *work)
 {
+	struct ipa3_sys_context *sys = container_of(work,
+		struct ipa3_sys_context, work);
+	struct gsi_xfer_elem nop_xfer;
 	struct ipa3_tx_pkt_wrapper *tx_pkt;
-	struct gsi_xfer_elem gsi_xfer;
-	int result;
-	dma_addr_t dma_address;
-	u32 mem_flag = GFP_ATOMIC;
 
-	if (unlikely(!in_atomic))
-		mem_flag = GFP_KERNEL;
-
-	tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache, mem_flag);
+	IPADBG_LOW("gsi send NOP for ch: %lu\n", sys->ep->gsi_chan_hdl);
+	tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache, GFP_KERNEL);
 	if (!tx_pkt) {
 		IPAERR("failed to alloc tx wrapper\n");
-		goto fail_mem_alloc;
-	}
-
-	if (!desc->dma_address_valid) {
-		dma_address = dma_map_single(ipa3_ctx->pdev, desc->pyld,
-			desc->len, DMA_TO_DEVICE);
-	} else {
-		dma_address = desc->dma_address;
-		tx_pkt->no_unmap_dma = true;
-	}
-	if (!dma_address) {
-		IPAERR("failed to DMA wrap\n");
-		goto fail_dma_map;
+		queue_work(sys->wq, &sys->work);
+		return;
 	}
 
 	INIT_LIST_HEAD(&tx_pkt->link);
-	tx_pkt->type = desc->type;
-	tx_pkt->cnt = 1;    /* only 1 desc in this "set" */
-
-	tx_pkt->mem.phys_base = dma_address;
-	tx_pkt->mem.base = desc->pyld;
-	tx_pkt->mem.size = desc->len;
-	tx_pkt->sys = sys;
-	tx_pkt->callback = desc->callback;
-	tx_pkt->user1 = desc->user1;
-	tx_pkt->user2 = desc->user2;
-
-	memset(&gsi_xfer, 0, sizeof(gsi_xfer));
-	gsi_xfer.addr = dma_address;
-	gsi_xfer.flags |= GSI_XFER_FLAG_EOT;
-	gsi_xfer.xfer_user_data = tx_pkt;
-	if (desc->type == IPA_IMM_CMD_DESC) {
-		gsi_xfer.len = desc->opcode;
-		gsi_xfer.type = GSI_XFER_ELEM_IMME_CMD;
-	} else {
-		gsi_xfer.len = desc->len;
-		gsi_xfer.type = GSI_XFER_ELEM_DATA;
-	}
-
+	tx_pkt->cnt = 1;
 	INIT_WORK(&tx_pkt->work, ipa3_wq_write_done);
-
+	tx_pkt->no_unmap_dma = true;
+	tx_pkt->sys = sys;
 	spin_lock_bh(&sys->spinlock);
 	list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+	spin_unlock_bh(&sys->spinlock);
 
-	result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
-				&gsi_xfer, true);
-	if (result != GSI_STATUS_SUCCESS) {
-		IPAERR("GSI xfer failed.\n");
-		goto fail_transport_send;
+	memset(&nop_xfer, 0, sizeof(nop_xfer));
+	nop_xfer.type = GSI_XFER_ELEM_NOP;
+	nop_xfer.flags = GSI_XFER_FLAG_EOT;
+	nop_xfer.xfer_user_data = tx_pkt;
+	if (gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1, &nop_xfer, true)) {
+		IPAERR("gsi_queue_xfer for ch:%lu failed\n",
+			sys->ep->gsi_chan_hdl);
+		queue_work(sys->wq, &sys->work);
+		return;
 	}
+	sys->len_pending_xfer = 0;
 
-	spin_unlock_bh(&sys->spinlock);
-
-	return 0;
-
-fail_transport_send:
-	list_del(&tx_pkt->link);
-	spin_unlock_bh(&sys->spinlock);
-	dma_unmap_single(ipa3_ctx->pdev, dma_address, desc->len, DMA_TO_DEVICE);
-fail_dma_map:
-	kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
-fail_mem_alloc:
-	return -EFAULT;
 }
 
+
 /**
  * ipa3_send() - Send multiple descriptors in one HW transaction
  * @sys: system pipe context
@@ -437,19 +392,21 @@
 		}
 
 		if (i == (num_desc - 1)) {
-			gsi_xfer_elem_array[i].flags |=
-				GSI_XFER_FLAG_EOT;
-			if (sys->ep->client == IPA_CLIENT_APPS_WAN_PROD
-				&& sys->policy == IPA_POLICY_INTR_MODE)
+			if (!sys->use_comm_evt_ring) {
+				gsi_xfer_elem_array[i].flags |=
+					GSI_XFER_FLAG_EOT;
 				gsi_xfer_elem_array[i].flags |=
 					GSI_XFER_FLAG_BEI;
+			}
 			gsi_xfer_elem_array[i].xfer_user_data =
 				tx_pkt_first;
-		} else
-			gsi_xfer_elem_array[i].flags |=
-				GSI_XFER_FLAG_CHAIN;
+		} else {
+				gsi_xfer_elem_array[i].flags |=
+					GSI_XFER_FLAG_CHAIN;
+		}
 	}
 
+	IPADBG_LOW("ch:%lu queue xfer\n", sys->ep->gsi_chan_hdl);
 	result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, num_desc,
 			gsi_xfer_elem_array, true);
 	if (result != GSI_STATUS_SUCCESS) {
@@ -459,6 +416,16 @@
 	kfree(gsi_xfer_elem_array);
 
 	spin_unlock_bh(&sys->spinlock);
+
+	/* set the timer for sending the NOP descriptor */
+	if (sys->use_comm_evt_ring && !hrtimer_active(&sys->db_timer)) {
+		ktime_t time = ktime_set(0, IPA_TX_SEND_COMPL_NOP_DELAY_NS);
+
+		IPADBG_LOW("scheduling timer for ch %lu\n",
+			sys->ep->gsi_chan_hdl);
+		hrtimer_start(&sys->db_timer, time, HRTIMER_MODE_REL);
+	}
+
 	return 0;
 
 failure:
@@ -491,6 +458,25 @@
 }
 
 /**
+ * ipa3_send_one() - Send a single descriptor
+ * @sys:	system pipe context
+ * @desc:	descriptor to send
+ * @in_atomic:  whether caller is in atomic context
+ *
+ * - Allocate tx_packet wrapper
+ * - transfer data to the IPA
+ * - after the transfer was done the SPS will
+ *   notify the sending user via ipa_sps_irq_comp_tx()
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc,
+	bool in_atomic)
+{
+	return ipa3_send(sys, 1, desc, in_atomic);
+}
+
+/**
  * ipa3_transport_irq_cmd_ack - callback function which will be called by
  * the transport driver after an immediate command is complete.
  * @user1:	pointer to the descriptor of the transfer
@@ -771,15 +757,14 @@
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 	do {
 		cnt = ipa3_handle_rx_core(sys, true, true);
-		if (cnt == 0) {
+		if (cnt == 0)
 			inactive_cycles++;
-			trace_idle_sleep_enter3(sys->ep->client);
-			usleep_range(POLLING_MIN_SLEEP_RX,
-					POLLING_MAX_SLEEP_RX);
-			trace_idle_sleep_exit3(sys->ep->client);
-		} else {
+		else
 			inactive_cycles = 0;
-		}
+
+		trace_idle_sleep_enter3(sys->ep->client);
+		usleep_range(POLLING_MIN_SLEEP_RX, POLLING_MAX_SLEEP_RX);
+		trace_idle_sleep_exit3(sys->ep->client);
 	} while (inactive_cycles <= POLLING_INACTIVITY_RX);
 
 	trace_poll_to_intr3(sys->ep->client);
@@ -808,6 +793,15 @@
 		ipa3_handle_rx(sys);
 }
 
+enum hrtimer_restart ipa3_ring_doorbell_timer_fn(struct hrtimer *param)
+{
+	struct ipa3_sys_context *sys = container_of(param,
+		struct ipa3_sys_context, db_timer);
+
+	queue_work(sys->wq, &sys->work);
+	return HRTIMER_NORESTART;
+}
+
 /**
  * ipa3_setup_sys_pipe() - Setup an IPA GPI pipe and perform
  * IPA EP configuration
@@ -889,6 +883,9 @@
 		INIT_LIST_HEAD(&ep->sys->head_desc_list);
 		INIT_LIST_HEAD(&ep->sys->rcycl_list);
 		spin_lock_init(&ep->sys->spinlock);
+		hrtimer_init(&ep->sys->db_timer, CLOCK_MONOTONIC,
+			HRTIMER_MODE_REL);
+		ep->sys->db_timer.function = ipa3_ring_doorbell_timer_fn;
 	} else {
 		memset(ep->sys, 0, offsetof(struct ipa3_sys_context, ep));
 	}
@@ -1071,7 +1068,10 @@
 	}
 
 	/* free event ring only when it is present */
-	if (ep->gsi_evt_ring_hdl != ~0) {
+	if (ep->sys->use_comm_evt_ring) {
+		ipa3_ctx->gsi_evt_comm_ring_rem +=
+			ep->gsi_mem_info.chan_ring_len;
+	} else if (ep->gsi_evt_ring_hdl != ~0) {
 		result = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
 		if (result != GSI_STATUS_SUCCESS) {
 			IPAERR("Failed to reset evt ring: %d.\n",
@@ -1145,7 +1145,7 @@
 		dev_kfree_skb_any(skb);
 }
 
-static void ipa3_tx_cmd_comp(void *user1, int user2)
+void ipa3_tx_cmd_comp(void *user1, int user2)
 {
 	ipahal_destroy_imm_cmd(user1);
 }
@@ -1180,7 +1180,6 @@
 	struct ipa3_desc *desc;
 	struct ipa3_desc _desc[3];
 	int dst_ep_idx;
-	struct ipahal_imm_cmd_ip_packet_init cmd;
 	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
 	struct ipa3_sys_context *sys;
 	int src_ep_idx;
@@ -1267,54 +1266,58 @@
 
 	if (dst_ep_idx != -1) {
 		/* SW data path */
-		cmd.destination_pipe_index = dst_ep_idx;
-		cmd_pyld = ipahal_construct_imm_cmd(
-			IPA_IMM_CMD_IP_PACKET_INIT, &cmd, true);
-		if (unlikely(!cmd_pyld)) {
-			IPAERR("failed to construct ip_packet_init imm cmd\n");
-			goto fail_mem;
+		data_idx = 0;
+		if (sys->policy == IPA_POLICY_NOINTR_MODE) {
+			/*
+			 * For non-interrupt mode channel (where there is no
+			 * event ring) TAG STATUS are used for completion
+			 * notification. IPA will generate a status packet with
+			 * tag info as a result of the TAG STATUS command.
+			 */
+			desc[data_idx].opcode =
+				ipahal_imm_cmd_get_opcode(
+				IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
+			desc[data_idx].type = IPA_IMM_CMD_DESC;
+			desc[data_idx].callback = ipa3_tag_destroy_imm;
+			data_idx++;
 		}
-
-		/* the tag field will be populated in ipa3_send() function */
-		desc[0].opcode = ipahal_imm_cmd_get_opcode(
-			IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
-		desc[0].type = IPA_IMM_CMD_DESC;
-		desc[0].callback = ipa3_tag_destroy_imm;
-		desc[1].opcode =
+		desc[data_idx].opcode =
 			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_INIT);
-		desc[1].pyld = cmd_pyld->data;
-		desc[1].len = cmd_pyld->len;
-		desc[1].type = IPA_IMM_CMD_DESC;
-		desc[1].callback = ipa3_tx_cmd_comp;
-		desc[1].user1 = cmd_pyld;
-		desc[2].pyld = skb->data;
-		desc[2].len = skb_headlen(skb);
-		desc[2].type = IPA_DATA_DESC_SKB;
-		desc[2].callback = ipa3_tx_comp_usr_notify_release;
-		desc[2].user1 = skb;
-		desc[2].user2 = (meta && meta->pkt_init_dst_ep_valid &&
+		desc[data_idx].dma_address_valid = true;
+		desc[data_idx].dma_address = ipa3_ctx->pkt_init_imm[dst_ep_idx];
+		desc[data_idx].type = IPA_IMM_CMD_DESC;
+		desc[data_idx].callback = NULL;
+		data_idx++;
+		desc[data_idx].pyld = skb->data;
+		desc[data_idx].len = skb_headlen(skb);
+		desc[data_idx].type = IPA_DATA_DESC_SKB;
+		desc[data_idx].callback = ipa3_tx_comp_usr_notify_release;
+		desc[data_idx].user1 = skb;
+		desc[data_idx].user2 = (meta && meta->pkt_init_dst_ep_valid &&
 				meta->pkt_init_dst_ep_remote) ?
 				src_ep_idx :
 				dst_ep_idx;
 		if (meta && meta->dma_address_valid) {
-			desc[2].dma_address_valid = true;
-			desc[2].dma_address = meta->dma_address;
+			desc[data_idx].dma_address_valid = true;
+			desc[data_idx].dma_address = meta->dma_address;
 		}
+		data_idx++;
 
 		for (f = 0; f < num_frags; f++) {
-			desc[3+f].frag = &skb_shinfo(skb)->frags[f];
-			desc[3+f].type = IPA_DATA_DESC_SKB_PAGED;
-			desc[3+f].len = skb_frag_size(desc[3+f].frag);
+			desc[data_idx + f].frag = &skb_shinfo(skb)->frags[f];
+			desc[data_idx + f].type = IPA_DATA_DESC_SKB_PAGED;
+			desc[data_idx + f].len =
+				skb_frag_size(desc[data_idx + f].frag);
 		}
 		/* don't free skb till frag mappings are released */
 		if (num_frags) {
-			desc[3+f-1].callback = desc[2].callback;
-			desc[3+f-1].user1 = desc[2].user1;
-			desc[3+f-1].user2 = desc[2].user2;
-			desc[2].callback = NULL;
+			desc[data_idx + f - 1].callback = desc[2].callback;
+			desc[data_idx + f - 1].user1 = desc[2].user1;
+			desc[data_idx + f - 1].user2 = desc[2].user2;
+			desc[data_idx - 1].callback = NULL;
 		}
 
-		if (ipa3_send(sys, num_frags + 3, desc, true)) {
+		if (ipa3_send(sys, num_frags + data_idx, desc, true)) {
 			IPAERR("fail to send skb %p num_frags %u SWP\n",
 				skb, num_frags);
 			goto fail_send;
@@ -1699,12 +1702,21 @@
 		gsi_xfer_elem_one.xfer_user_data = rx_pkt;
 
 		ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl,
-				1, &gsi_xfer_elem_one, true);
+				1, &gsi_xfer_elem_one, false);
 		if (ret != GSI_STATUS_SUCCESS) {
 			IPAERR("failed to provide buffer: %d\n",
 				ret);
 			goto fail_provide_rx_buffer;
 		}
+
+		/*
+		 * As doorbell is a costly operation, notify to GSI
+		 * of new buffers if threshold is exceeded
+		 */
+		if (++sys->len_pending_xfer >= IPA_REPL_XFER_THRESH) {
+			sys->len_pending_xfer = 0;
+			gsi_start_xfer(sys->ep->gsi_chan_hdl);
+		}
 	}
 
 	return;
@@ -1719,7 +1731,7 @@
 fail_skb_alloc:
 	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
 fail_kmem_cache_alloc:
-	if (rx_len_cached == 0)
+	if (rx_len_cached - sys->len_pending_xfer == 0)
 		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
 				msecs_to_jiffies(1));
 }
@@ -1794,12 +1806,21 @@
 		gsi_xfer_elem_one.xfer_user_data = rx_pkt;
 
 		ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl,
-				1, &gsi_xfer_elem_one, true);
+				1, &gsi_xfer_elem_one, false);
 		if (ret != GSI_STATUS_SUCCESS) {
 			IPAERR("failed to provide buffer: %d\n",
 				ret);
 			goto fail_provide_rx_buffer;
 		}
+
+		/*
+		 * As doorbell is a costly operation, notify to GSI
+		 * of new buffers if threshold is exceeded
+		 */
+		if (++sys->len_pending_xfer >= IPA_REPL_XFER_THRESH) {
+			sys->len_pending_xfer = 0;
+			gsi_start_xfer(sys->ep->gsi_chan_hdl);
+		}
 	}
 
 	return;
@@ -1815,7 +1836,7 @@
 	INIT_LIST_HEAD(&rx_pkt->link);
 	spin_unlock_bh(&sys->spinlock);
 fail_kmem_cache_alloc:
-	if (rx_len_cached == 0)
+	if (rx_len_cached - sys->len_pending_xfer == 0)
 		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
 		msecs_to_jiffies(1));
 }
@@ -1848,12 +1869,22 @@
 		gsi_xfer_elem_one.xfer_user_data = rx_pkt;
 
 		ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
-			&gsi_xfer_elem_one, true);
+			&gsi_xfer_elem_one, false);
 		if (ret != GSI_STATUS_SUCCESS) {
 			IPAERR("failed to provide buffer: %d\n",
 				ret);
 			break;
 		}
+
+		/*
+		 * As doorbell is a costly operation, notify to GSI
+		 * of new buffers if threshold is exceeded
+		 */
+		if (++sys->len_pending_xfer >= IPA_REPL_XFER_THRESH) {
+			sys->len_pending_xfer = 0;
+			gsi_start_xfer(sys->ep->gsi_chan_hdl);
+		}
+
 		rx_len_cached = ++sys->len;
 		curr = (curr + 1) % sys->repl.capacity;
 		/* ensure write is done before setting head index */
@@ -1863,7 +1894,8 @@
 
 	queue_work(sys->repl_wq, &sys->repl_work);
 
-	if (rx_len_cached <= IPA_DEFAULT_SYS_YELLOW_WM) {
+	if (rx_len_cached - sys->len_pending_xfer
+		<= IPA_DEFAULT_SYS_YELLOW_WM) {
 		if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
 			IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_rx_empty);
 		else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
@@ -2638,9 +2670,9 @@
 static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
 		struct ipa3_sys_context *sys)
 {
-	if (in->client == IPA_CLIENT_APPS_CMD_PROD ||
-		in->client == IPA_CLIENT_APPS_WAN_PROD) {
+	if (in->client == IPA_CLIENT_APPS_CMD_PROD) {
 		sys->policy = IPA_POLICY_INTR_MODE;
+		sys->use_comm_evt_ring = false;
 		return 0;
 	}
 
@@ -2652,12 +2684,12 @@
 	if (IPA_CLIENT_IS_PROD(in->client)) {
 		if (sys->ep->skip_ep_cfg) {
 			sys->policy = IPA_POLICY_INTR_POLL_MODE;
+			sys->use_comm_evt_ring = true;
 			atomic_set(&sys->curr_polling_state, 0);
 		} else {
-			sys->policy = IPA_POLICY_NOINTR_MODE;
-			sys->ep->status.status_en = true;
-			sys->ep->status.status_ep = ipa3_get_ep_mapping(
-					IPA_CLIENT_APPS_LAN_CONS);
+			sys->policy = IPA_POLICY_INTR_MODE;
+			sys->use_comm_evt_ring = true;
+			INIT_WORK(&sys->work, ipa3_send_nop_desc);
 		}
 	} else {
 		if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
@@ -2703,9 +2735,6 @@
 					sys->repl_hdlr =
 					   ipa3_replenish_rx_cache;
 				}
-				if (in->napi_enabled)
-					sys->rx_pool_sz =
-					   IPA_WAN_NAPI_CONS_RX_POOL_SZ;
 				if (in->napi_enabled && in->recycle_enabled)
 					sys->repl_hdlr =
 					 ipa3_replenish_rx_cache_recycle;
@@ -3325,6 +3354,46 @@
 	}
 }
 
+int ipa3_alloc_common_event_ring(void)
+{
+	struct gsi_evt_ring_props gsi_evt_ring_props;
+	dma_addr_t evt_dma_addr;
+	int result;
+
+	memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
+	gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
+	gsi_evt_ring_props.intr = GSI_INTR_IRQ;
+	gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
+
+	gsi_evt_ring_props.ring_len = IPA_COMMON_EVENT_RING_SIZE;
+
+	gsi_evt_ring_props.ring_base_vaddr =
+		dma_alloc_coherent(ipa3_ctx->pdev,
+		gsi_evt_ring_props.ring_len, &evt_dma_addr, GFP_KERNEL);
+	if (!gsi_evt_ring_props.ring_base_vaddr) {
+		IPAERR("fail to dma alloc %u bytes\n",
+			gsi_evt_ring_props.ring_len);
+		return -ENOMEM;
+	}
+	gsi_evt_ring_props.ring_base_addr = evt_dma_addr;
+	gsi_evt_ring_props.int_modt = 0;
+	gsi_evt_ring_props.int_modc = 1; /* moderation comes from channel*/
+	gsi_evt_ring_props.rp_update_addr = 0;
+	gsi_evt_ring_props.exclusive = false;
+	gsi_evt_ring_props.err_cb = ipa_gsi_evt_ring_err_cb;
+	gsi_evt_ring_props.user_data = NULL;
+
+	result = gsi_alloc_evt_ring(&gsi_evt_ring_props,
+		ipa3_ctx->gsi_dev_hdl, &ipa3_ctx->gsi_evt_comm_hdl);
+	if (result) {
+		IPAERR("gsi_alloc_evt_ring failed %d\n", result);
+		return result;
+	}
+	ipa3_ctx->gsi_evt_comm_ring_rem = IPA_COMMON_EVENT_RING_SIZE;
+
+	return 0;
+}
+
 static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
 	struct ipa3_ep_context *ep)
 {
@@ -3344,18 +3413,31 @@
 	evt_dma_addr = 0;
 	ep->gsi_evt_ring_hdl = ~0;
 	memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
-	/*
-	 * allocate event ring for all interrupt-policy
-	 * pipes and IPA consumers pipes
-	 */
-	if (ep->sys->policy != IPA_POLICY_NOINTR_MODE ||
+	if (ep->sys->use_comm_evt_ring) {
+		if (ipa3_ctx->gsi_evt_comm_ring_rem < 2 * in->desc_fifo_sz) {
+			IPAERR("not enough space in common event ring\n");
+			IPAERR("available: %d needed: %d\n",
+				ipa3_ctx->gsi_evt_comm_ring_rem,
+				2 * in->desc_fifo_sz);
+			WARN_ON(1);
+			return -EFAULT;
+		}
+		ipa3_ctx->gsi_evt_comm_ring_rem -= (2 * in->desc_fifo_sz);
+		ep->gsi_evt_ring_hdl = ipa3_ctx->gsi_evt_comm_hdl;
+	} else if (ep->sys->policy != IPA_POLICY_NOINTR_MODE ||
 	     IPA_CLIENT_IS_CONS(ep->client)) {
 		gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
 		gsi_evt_ring_props.intr = GSI_INTR_IRQ;
 		gsi_evt_ring_props.re_size =
 			GSI_EVT_RING_RE_SIZE_16B;
 
+		/*
+		 * GSI ring length is calculated based on the desc_fifo_sz
+		 * which was meant to define the BAM desc fifo. GSI descriptors
+		 * are 16B as opposed to 8B for BAM.
+		 */
 		gsi_evt_ring_props.ring_len = 2 * in->desc_fifo_sz;
+
 		gsi_evt_ring_props.ring_base_vaddr =
 			dma_alloc_coherent(ipa3_ctx->pdev,
 			gsi_evt_ring_props.ring_len,
@@ -3375,10 +3457,7 @@
 			gsi_evt_ring_props.ring_base_vaddr;
 
 		gsi_evt_ring_props.int_modt = IPA_GSI_EVT_RING_INT_MODT;
-		if (ep->client == IPA_CLIENT_APPS_WAN_PROD)
-			gsi_evt_ring_props.int_modc = 248;
-		else
-			gsi_evt_ring_props.int_modc = 1;
+		gsi_evt_ring_props.int_modc = 1;
 
 		IPADBG("client=%d moderation threshold cycles=%u cnt=%u\n",
 			ep->client,
@@ -3603,16 +3682,18 @@
 			break;
 
 		ipa3_wq_rx_common(ep->sys, mem_info.size);
-		cnt += 5;
+		cnt += IPA_WAN_AGGR_PKT_CNT;
 	};
 
-	if (cnt == 0) {
+	if (cnt == 0 || cnt < weight) {
 		ep->inactive_cycles++;
 		ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
 
 		if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
 			ep->switch_to_intr = true;
 			delay = 0;
+		} else if (cnt < weight) {
+			delay = 0;
 		}
 		queue_delayed_work(ep->sys->wq,
 			&ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 244c80c..90577c0 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -45,6 +45,7 @@
 #define IPA3_MAX_NUM_PIPES 31
 #define IPA_SYS_DESC_FIFO_SZ 0x800
 #define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000
+#define IPA_COMMON_EVENT_RING_SIZE 0x7C00
 #define IPA_LAN_RX_HEADER_LENGTH (2)
 #define IPA_QMAP_HEADER_LENGTH (4)
 #define IPA_DL_CHECKSUM_LENGTH (8)
@@ -53,8 +54,11 @@
 #define IPA_UC_FINISH_MAX 6
 #define IPA_UC_WAIT_MIN_SLEEP 1000
 #define IPA_UC_WAII_MAX_SLEEP 1200
-#define IPA_WAN_NAPI_CONS_RX_POOL_SZ (IPA_GENERIC_RX_POOL_SZ*3)
-#define IPA_WAN_CONS_DESC_FIFO_SZ (IPA_SYS_DESC_FIFO_SZ*3)
+/*
+ * The transport descriptor size was changed to GSI_CHAN_RE_SIZE_16B, but
+ * IPA users still use sps_iovec size as FIFO element size.
+ */
+#define IPA_FIFO_ELEMENT_SIZE 8
 
 #define IPA_MAX_STATUS_STAT_NUM 30
 
@@ -591,9 +595,11 @@
  */
 struct ipa3_sys_context {
 	u32 len;
+	u32 len_pending_xfer;
 	atomic_t curr_polling_state;
 	struct delayed_work switch_to_intr_work;
 	enum ipa3_sys_pipe_policy policy;
+	bool use_comm_evt_ring;
 	int (*pyld_hdlr)(struct sk_buff *skb, struct ipa3_sys_context *sys);
 	struct sk_buff * (*get_skb)(unsigned int len, gfp_t flags);
 	void (*free_skb)(struct sk_buff *skb);
@@ -616,6 +622,7 @@
 	struct list_head head_desc_list;
 	struct list_head rcycl_list;
 	spinlock_t spinlock;
+	struct hrtimer db_timer;
 	struct workqueue_struct *wq;
 	struct workqueue_struct *repl_wq;
 	struct ipa3_status_stats *status_stat;
@@ -702,6 +709,7 @@
  * @user1: cookie1 for above callback
  * @user2: cookie2 for above callback
  * @xfer_done: completion object for sync completion
+ * @skip_db_ring: specifies whether GSI doorbell should not be rang
  */
 struct ipa3_desc {
 	enum ipa3_desc_type type;
@@ -715,6 +723,7 @@
 	void *user1;
 	int user2;
 	struct completion xfer_done;
+	bool skip_db_ring;
 };
 
 /**
@@ -937,6 +946,10 @@
 	struct IpaHwStatsWDIInfoData_t *wdi_uc_stats_mmio;
 	void *priv;
 	ipa_uc_ready_cb uc_ready_cb;
+	/* for AP+STA stats update */
+#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
+	ipa_wdi_meter_notifier_cb stats_notify;
+#endif
 };
 
 /**
@@ -1129,6 +1142,8 @@
 	struct workqueue_struct *transport_power_mgmt_wq;
 	bool tag_process_before_gating;
 	struct ipa3_transport_pm transport_pm;
+	unsigned long gsi_evt_comm_hdl;
+	u32 gsi_evt_comm_ring_rem;
 	u32 clnt_hdl_cmd;
 	u32 clnt_hdl_data_in;
 	u32 clnt_hdl_data_out;
@@ -1161,6 +1176,7 @@
 	u32 curr_ipa_clk_rate;
 	bool q6_proxy_clk_vote_valid;
 	u32 ipa_num_pipes;
+	dma_addr_t pkt_init_imm[IPA3_MAX_NUM_PIPES];
 
 	struct ipa3_wlan_comm_memb wc_memb;
 
@@ -1611,6 +1627,7 @@
 int ipa3_suspend_wdi_pipe(u32 clnt_hdl);
 int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
 u16 ipa3_get_smem_restr_bytes(void);
+int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid, uint64_t num_bytes);
 int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
 		ipa_notify_cb notify, void *priv, u8 hdr_len,
 		struct ipa_ntn_conn_out_params *outp);
@@ -1651,6 +1668,9 @@
 
 bool ipa3_get_client_uplink(int pipe_idx);
 
+int ipa3_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats);
+
+int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota);
 /*
  * IPADMA
  */
@@ -1963,4 +1983,5 @@
 struct device *ipa3_get_pdev(void);
 void ipa3_enable_dcd(void);
 void ipa3_disable_prefetch(enum ipa_client_type client);
+int ipa3_alloc_common_event_ring(void);
 #endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 343901f..19c3de4a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -884,7 +884,8 @@
 		IPAWANDBG("Quota reached indication on qmux(%d) Mbytes(%lu)\n",
 			  qmi_ind.apn.mux_id,
 			  (unsigned long int) qmi_ind.apn.num_Mbytes);
-		ipa3_broadcast_quota_reach_ind(qmi_ind.apn.mux_id);
+		ipa3_broadcast_quota_reach_ind(qmi_ind.apn.mux_id,
+			IPA_UPSTEAM_MODEM);
 	}
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index 3659a22..4fde261 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -181,7 +181,8 @@
 
 int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data);
 
-void ipa3_broadcast_quota_reach_ind(uint32_t mux_id);
+void ipa3_broadcast_quota_reach_ind(uint32_t mux_id,
+	enum ipa_upstream_type upstream_type);
 
 int rmnet_ipa3_set_tether_client_pipe(struct wan_ioctl_set_tether_client_pipe
 	*data);
@@ -189,6 +190,8 @@
 int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
 	bool reset);
 
+int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data);
+
 int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
 	struct ipa_get_data_stats_resp_msg_v01 *resp);
 
@@ -283,7 +286,8 @@
 	return -EPERM;
 }
 
-static inline void ipa3_broadcast_quota_reach_ind(uint32_t mux_id) { }
+static inline void ipa3_broadcast_quota_reach_ind(uint32_t mux_id,
+	enum ipa_upstream_type upstream_type) { }
 
 static inline int ipa3_qmi_get_data_stats(
 	struct ipa_get_data_stats_req_msg_v01 *req,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index 8f87baf..799246b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -13,6 +13,7 @@
 #include <linux/dmapool.h>
 #include <linux/delay.h>
 #include <linux/mm.h>
+#include "ipa_qmi_service.h"
 
 #define IPA_HOLB_TMR_DIS 0x0
 
@@ -1190,6 +1191,12 @@
 	ep->client_notify = in->sys.notify;
 	ep->priv = in->sys.priv;
 
+	/* for AP+STA stats update */
+	if (in->wdi_notify)
+		ipa3_ctx->uc_wdi_ctx.stats_notify = in->wdi_notify;
+	else
+		IPADBG("in->wdi_notify is null\n");
+
 	if (!ep->skip_ep_cfg) {
 		if (ipa3_cfg_ep(ipa_ep_idx, &in->sys.ipa_ep_cfg)) {
 			IPAERR("fail to configure EP.\n");
@@ -1200,6 +1207,8 @@
 		IPADBG("Skipping endpoint configuration.\n");
 	}
 
+	ipa3_enable_data_path(ipa_ep_idx);
+
 	out->clnt_hdl = ipa_ep_idx;
 
 	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys.client))
@@ -1281,6 +1290,12 @@
 
 	IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
 
+	/* for AP+STA stats update */
+	if (ipa3_ctx->uc_wdi_ctx.stats_notify)
+		ipa3_ctx->uc_wdi_ctx.stats_notify = NULL;
+	else
+		IPADBG("uc_wdi_ctx.stats_notify already null\n");
+
 uc_timeout:
 	return result;
 }
@@ -1626,6 +1641,23 @@
 	return result;
 }
 
+/**
+ * ipa_broadcast_wdi_quota_reach_ind() - quota reach
+ * @uint32_t fid: [in] input netdev ID
+ * @uint64_t num_bytes: [in] used bytes
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid,
+	uint64_t num_bytes)
+{
+	IPAERR("Quota reached indication on fid(%d) Mbytes(%lu)\n",
+			  fid,
+			  (unsigned long int) num_bytes);
+	ipa3_broadcast_quota_reach_ind(0, IPA_UPSTEAM_WLAN);
+	return 0;
+}
+
 int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
 {
 	int result = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index c48867a..bc9f693 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -335,12 +335,13 @@
 	[IPA_3_0][IPA_CLIENT_APPS_LAN_PROD] = {
 			14, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
-			QMB_MASTER_SELECT_DDR},
+			QMB_MASTER_SELECT_DDR,
+			{ 14, 11, 8, 16, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_APPS_WAN_PROD] = {
 			3, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 14, 11, 8, 16, IPA_EE_AP } },
+			{ 3, 5, 16, 32, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_APPS_CMD_PROD]	  = {
 			22, IPA_v3_0_GROUP_IMM_CMD, false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
@@ -578,8 +579,8 @@
 	[IPA_3_5][IPA_CLIENT_A2_EMBEDDED_PROD]    = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_A2_TETHERED_PROD]    = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_APPS_LAN_PROD]   = {
-			8, IPA_v3_5_GROUP_UL_DL, true,
-			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			8, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 8, 9, 8, 16, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_APPS_WAN_PROD] = {
@@ -1762,11 +1763,39 @@
 	}
 }
 
+/* ipa3_get_wlan_stats() - get ipa wifi stats
+ *
+ * Return value: success or failure
+ */
+int ipa3_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats)
+{
+	if (ipa3_ctx->uc_wdi_ctx.stats_notify) {
+		ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_GET_WDI_SAP_STATS,
+			wdi_sap_stats);
+	} else {
+		IPAERR("uc_wdi_ctx.stats_notify NULL\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota)
+{
+	if (ipa3_ctx->uc_wdi_ctx.stats_notify) {
+		ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_SET_WIFI_QUOTA,
+			wdi_quota);
+	} else {
+		IPAERR("uc_wdi_ctx.stats_notify NULL\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
 /**
  * ipa3_get_client() - provide client mapping
  * @client: client type
  *
- * Return value: none
+ * Return value: client mapping enum
  */
 enum ipacm_client_enum ipa3_get_client(int pipe_idx)
 {
@@ -3767,6 +3796,8 @@
 	api_ctrl->ipa_suspend_wdi_pipe = ipa3_suspend_wdi_pipe;
 	api_ctrl->ipa_get_wdi_stats = ipa3_get_wdi_stats;
 	api_ctrl->ipa_get_smem_restr_bytes = ipa3_get_smem_restr_bytes;
+	api_ctrl->ipa_broadcast_wdi_quota_reach_ind =
+			ipa3_broadcast_wdi_quota_reach_ind;
 	api_ctrl->ipa_uc_wdi_get_dbpa = ipa3_uc_wdi_get_dbpa;
 	api_ctrl->ipa_uc_reg_rdyCB = ipa3_uc_reg_rdyCB;
 	api_ctrl->ipa_uc_dereg_rdyCB = ipa3_uc_dereg_rdyCB;
@@ -4267,11 +4298,6 @@
 
 	memset(&mem, 0, sizeof(mem));
 
-	if (IPA_CLIENT_IS_PROD(ep->client)) {
-		res = gsi_stop_channel(ep->gsi_chan_hdl);
-		goto end_sequence;
-	}
-
 	for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
 		IPADBG("Calling gsi_stop_channel\n");
 		res = gsi_stop_channel(ep->gsi_chan_hdl);
@@ -4279,12 +4305,14 @@
 		if (res != -GSI_STATUS_AGAIN && res != -GSI_STATUS_TIMED_OUT)
 			goto end_sequence;
 
-		IPADBG("Inject a DMA_TASK with 1B packet to IPA and retry\n");
-		/* Send a 1B packet DMA_TASK to IPA and try again */
-		res = ipa3_inject_dma_task_for_gsi();
-		if (res) {
-			IPAERR("Failed to inject DMA TASk for GSI\n");
-			goto end_sequence;
+		if (IPA_CLIENT_IS_CONS(ep->client)) {
+			IPADBG("Inject a DMA_TASK with 1B packet to IPA\n");
+			/* Send a 1B packet DMA_TASK to IPA and try again */
+			res = ipa3_inject_dma_task_for_gsi();
+			if (res) {
+				IPAERR("Failed to inject DMA TASk for GSI\n");
+				goto end_sequence;
+			}
 		}
 
 		/* sleep for short period to flush IPA */
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 9a400d9..9e04518 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -52,6 +52,7 @@
 #define DEFAULT_OUTSTANDING_LOW 64
 
 #define IPA_WWAN_DEV_NAME "rmnet_ipa%d"
+#define IPA_UPSTEAM_WLAN_IFACE_NAME "wlan0"
 
 #define IPA_WWAN_RX_SOFTIRQ_THRESH 16
 
@@ -65,6 +66,7 @@
 	((rmnet_ipa3_ctx && rmnet_ipa3_ctx->wwan_priv) ? \
 	  rmnet_ipa3_ctx->wwan_priv->net : NULL)
 
+#define IPA_WWAN_CONS_DESC_FIFO_SZ 256
 
 static int ipa3_wwan_add_ul_flt_rule_to_ipa(void);
 static int ipa3_wwan_del_ul_flt_rule_to_ipa(void);
@@ -89,6 +91,7 @@
 	bool ipa_loaduC;
 	bool ipa_advertise_sg_support;
 	bool ipa_napi_enable;
+	u32 wan_rx_desc_size;
 };
 
 /**
@@ -781,6 +784,22 @@
 	return MAX_NUM_OF_MUX_CHANNEL;
 }
 
+static enum ipa_upstream_type find_upstream_type(const char *upstreamIface)
+{
+	int i;
+
+	for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
+		if (strcmp(rmnet_ipa3_ctx->mux_channel[i].vchannel_name,
+					upstreamIface) == 0)
+			return IPA_UPSTEAM_MODEM;
+	}
+
+	if (strcmp(IPA_UPSTEAM_WLAN_IFACE_NAME, upstreamIface) == 0)
+		return IPA_UPSTEAM_WLAN;
+	else
+		return MAX_NUM_OF_MUX_CHANNEL;
+}
+
 static int ipa3_wwan_register_to_ipa(int index)
 {
 	struct ipa_tx_intf tx_properties = {0};
@@ -1067,6 +1086,8 @@
 		IPAWANDBG_LOW
 		("SW filtering out none QMAP packet received from %s",
 		current->comm);
+		dev_kfree_skb_any(skb);
+		dev->stats.tx_dropped++;
 		return NETDEV_TX_OK;
 	}
 
@@ -1078,7 +1099,8 @@
 			pr_err("[%s]Queue stop, send ctrl pkts\n", dev->name);
 			goto send;
 		} else {
-			pr_err("[%s]fatal: ipa_wwan_xmit stopped\n", dev->name);
+			pr_err("[%s]fatal: ipa3_wwan_xmit stopped\n",
+				  dev->name);
 			return NETDEV_TX_BUSY;
 		}
 	}
@@ -1108,6 +1130,8 @@
 	if (ret) {
 		pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
 		       dev->name, ret);
+		dev_kfree_skb_any(skb);
+		dev->stats.tx_dropped++;
 		return -EFAULT;
 	}
 	/* IPA_RM checking end */
@@ -1124,7 +1148,6 @@
 
 	if (ret) {
 		ret = NETDEV_TX_BUSY;
-		dev->stats.tx_dropped++;
 		goto out;
 	}
 
@@ -1276,7 +1299,7 @@
 			ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_pkt_limit =
 			   in->u.ingress_format.agg_count;
 
-			if (ipa_wan_ep_cfg->napi_enabled) {
+			if (ipa3_rmnet_res.ipa_napi_enable) {
 				ipa_wan_ep_cfg->recycle_enabled = true;
 				ep_cfg = (struct rmnet_phys_ep_conf_s *)
 				   rcu_dereference(dev->rx_handler_data);
@@ -1304,10 +1327,8 @@
 	ipa_wan_ep_cfg->priv = dev;
 
 	ipa_wan_ep_cfg->napi_enabled = ipa3_rmnet_res.ipa_napi_enable;
-	if (ipa_wan_ep_cfg->napi_enabled)
-		ipa_wan_ep_cfg->desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
-	else
-		ipa_wan_ep_cfg->desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	ipa_wan_ep_cfg->desc_fifo_sz =
+		ipa3_rmnet_res.wan_rx_desc_size * IPA_FIFO_ELEMENT_SIZE;
 
 	mutex_lock(&rmnet_ipa3_ctx->pipe_handle_guard);
 
@@ -1991,6 +2012,9 @@
 static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
 		struct ipa3_rmnet_plat_drv_res *ipa_rmnet_drv_res)
 {
+	int result;
+
+	ipa_rmnet_drv_res->wan_rx_desc_size = IPA_WWAN_CONS_DESC_FIFO_SZ;
 	ipa_rmnet_drv_res->ipa_rmnet_ssr =
 			of_property_read_bool(pdev->dev.of_node,
 			"qcom,rmnet-ipa-ssr");
@@ -2013,6 +2037,18 @@
 			"qcom,ipa-napi-enable");
 	pr_info("IPA Napi Enable = %s\n",
 		ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
+
+	/* Get IPA WAN RX desc fifo size */
+	result = of_property_read_u32(pdev->dev.of_node,
+			"qcom,wan-rx-desc-size",
+			&ipa_rmnet_drv_res->wan_rx_desc_size);
+	if (result)
+		pr_info("using default for wan-rx-desc-size = %u\n",
+				ipa_rmnet_drv_res->wan_rx_desc_size);
+	else
+		IPAWANDBG(": found ipa_drv_res->wan-rx-desc-size = %u\n",
+				ipa_rmnet_drv_res->wan_rx_desc_size);
+
 	return 0;
 }
 
@@ -2623,10 +2659,10 @@
 }
 
 /**
- * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
+ * rmnet_ipa_set_data_quota_modem() - Data quota setting IOCTL handler
  * @data - IOCTL data
  *
- * This function handles WAN_IOC_SET_DATA_QUOTA.
+ * This function handles WAN_IOC_SET_DATA_QUOTA on modem interface.
  * It translates the given interface name to the Modem MUX ID and
  * sends the request of the quota to the IPA Modem driver via QMI.
  *
@@ -2635,12 +2671,17 @@
  * -EFAULT: Invalid interface name provided
  * other: See ipa_qmi_set_data_quota
  */
-int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data)
+static int rmnet_ipa3_set_data_quota_modem(
+	struct wan_ioctl_set_data_quota *data)
 {
 	u32 mux_id;
 	int index;
 	struct ipa_set_data_usage_quota_req_msg_v01 req;
 
+	/* stop quota */
+	if (!data->set_quota)
+		ipa3_qmi_stop_data_qouta();
+
 	index = find_vchannel_name_index(data->interface_name);
 	IPAWANERR("iface name %s, quota %lu\n",
 		  data->interface_name,
@@ -2664,6 +2705,64 @@
 	return ipa3_qmi_set_data_quota(&req);
 }
 
+static int rmnet_ipa3_set_data_quota_wifi(struct wan_ioctl_set_data_quota *data)
+{
+	struct ipa_set_wifi_quota wifi_quota;
+	int rc = 0;
+
+	memset(&wifi_quota, 0, sizeof(struct ipa_set_wifi_quota));
+	wifi_quota.set_quota = data->set_quota;
+	wifi_quota.quota_bytes = data->quota_mbytes;
+	IPAWANERR("iface name %s, quota %lu\n",
+		  data->interface_name,
+		  (unsigned long int) data->quota_mbytes);
+
+	rc = ipa3_set_wlan_quota(&wifi_quota);
+	/* check if wlan-fw takes this quota-set */
+	if (!wifi_quota.set_valid)
+		rc = -EFAULT;
+	return rc;
+}
+
+/**
+ * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SET_DATA_QUOTA.
+ * It translates the given interface name to the Modem MUX ID and
+ * sends the request of the quota to the IPA Modem driver via QMI.
+ *
+ * Return codes:
+ * 0: Success
+ * -EFAULT: Invalid interface name provided
+ * other: See ipa_qmi_set_data_quota
+ */
+int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data)
+{
+	enum ipa_upstream_type upstream_type;
+	int rc = 0;
+
+	/* get IPA backhaul type */
+	upstream_type = find_upstream_type(data->interface_name);
+
+	if (upstream_type == IPA_UPSTEAM_MAX) {
+		IPAWANERR("Wrong interface_name name %s\n",
+			data->interface_name);
+	} else if (upstream_type == IPA_UPSTEAM_WLAN) {
+		rc = rmnet_ipa3_set_data_quota_wifi(data);
+		if (rc) {
+			IPAWANERR("set quota on wifi failed\n");
+			return rc;
+		}
+	} else {
+		rc = rmnet_ipa3_set_data_quota_modem(data);
+		if (rc) {
+			IPAWANERR("set quota on modem failed\n");
+			return rc;
+		}
+	}
+	return rc;
+}
  /* rmnet_ipa_set_tether_client_pipe() -
  * @data - IOCTL data
  *
@@ -2728,8 +2827,61 @@
 	return 0;
 }
 
-int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
-	bool reset)
+static int rmnet_ipa3_query_tethering_stats_wifi(
+	struct wan_ioctl_query_tether_stats *data, bool reset)
+{
+	struct ipa_get_wdi_sap_stats *sap_stats;
+	int rc;
+
+	sap_stats = kzalloc(sizeof(struct ipa_get_wdi_sap_stats),
+			GFP_KERNEL);
+	if (!sap_stats) {
+		IPAWANERR("Can't allocate memory for stats message\n");
+		return -ENOMEM;
+	}
+	memset(sap_stats, 0, sizeof(struct ipa_get_wdi_sap_stats));
+
+	sap_stats->reset_stats = reset;
+	IPAWANDBG("reset the pipe stats %d\n", sap_stats->reset_stats);
+
+	rc = ipa3_get_wlan_stats(sap_stats);
+	if (rc) {
+		IPAWANERR("can't get ipa3_get_wlan_stats\n");
+		kfree(sap_stats);
+		return rc;
+	} else if (reset) {
+		kfree(sap_stats);
+		return 0;
+	}
+
+	if (sap_stats->stats_valid) {
+		data->ipv4_tx_packets = sap_stats->ipv4_tx_packets;
+		data->ipv4_tx_bytes = sap_stats->ipv4_tx_bytes;
+		data->ipv4_rx_packets = sap_stats->ipv4_rx_packets;
+		data->ipv4_rx_bytes = sap_stats->ipv4_rx_bytes;
+		data->ipv6_tx_packets = sap_stats->ipv6_tx_packets;
+		data->ipv6_tx_bytes = sap_stats->ipv6_tx_bytes;
+		data->ipv6_rx_packets = sap_stats->ipv6_rx_packets;
+		data->ipv6_rx_bytes = sap_stats->ipv6_rx_bytes;
+	}
+
+	IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
+		(unsigned long int) data->ipv4_rx_packets,
+		(unsigned long int) data->ipv6_rx_packets,
+		(unsigned long int) data->ipv4_rx_bytes,
+		(unsigned long int) data->ipv6_rx_bytes);
+	IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
+		(unsigned long int) data->ipv4_tx_packets,
+		(unsigned long  int) data->ipv6_tx_packets,
+		(unsigned long int) data->ipv4_tx_bytes,
+		(unsigned long int) data->ipv6_tx_bytes);
+
+	kfree(sap_stats);
+	return rc;
+}
+
+static int rmnet_ipa3_query_tethering_stats_modem(
+	struct wan_ioctl_query_tether_stats *data, bool reset)
 {
 	struct ipa_get_data_stats_req_msg_v01 *req;
 	struct ipa_get_data_stats_resp_msg_v01 *resp;
@@ -2816,7 +2968,7 @@
 			}
 		}
 	}
-	IPAWANDBG_LOW("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
+	IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
 		(unsigned long int) data->ipv4_rx_packets,
 		(unsigned long int) data->ipv6_rx_packets,
 		(unsigned long int) data->ipv4_rx_bytes,
@@ -2866,7 +3018,7 @@
 			}
 		}
 	}
-	IPAWANDBG_LOW("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
+	IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
 		(unsigned long int) data->ipv4_tx_packets,
 		(unsigned long  int) data->ipv6_tx_packets,
 		(unsigned long int) data->ipv4_tx_bytes,
@@ -2876,6 +3028,69 @@
 	return 0;
 }
 
+int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
+	bool reset)
+{
+	enum ipa_upstream_type upstream_type;
+	int rc = 0;
+
+	/* get IPA backhaul type */
+	upstream_type = find_upstream_type(data->upstreamIface);
+
+	if (upstream_type == IPA_UPSTEAM_MAX) {
+		IPAWANERR(" Wrong upstreamIface name %s\n",
+			data->upstreamIface);
+	} else if (upstream_type == IPA_UPSTEAM_WLAN) {
+		IPAWANDBG_LOW(" query wifi-backhaul stats\n");
+		rc = rmnet_ipa3_query_tethering_stats_wifi(
+			data, false);
+		if (rc) {
+			IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
+			return rc;
+		}
+	} else {
+		IPAWANDBG_LOW(" query modem-backhaul stats\n");
+		rc = rmnet_ipa3_query_tethering_stats_modem(
+			data, false);
+		if (rc) {
+			IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n");
+			return rc;
+		}
+	}
+	return rc;
+}
+
+int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
+{
+	enum ipa_upstream_type upstream_type;
+	int rc = 0;
+
+	/* get IPA backhaul type */
+	upstream_type = find_upstream_type(data->upstreamIface);
+
+	if (upstream_type == IPA_UPSTEAM_MAX) {
+		IPAWANERR(" Wrong upstreamIface name %s\n",
+			data->upstreamIface);
+	} else if (upstream_type == IPA_UPSTEAM_WLAN) {
+		IPAWANERR(" reset wifi-backhaul stats\n");
+		rc = rmnet_ipa3_query_tethering_stats_wifi(
+			NULL, true);
+		if (rc) {
+			IPAWANERR("reset WLAN stats failed\n");
+			return rc;
+		}
+	} else {
+		IPAWANERR(" reset modem-backhaul stats\n");
+		rc = rmnet_ipa3_query_tethering_stats_modem(
+			NULL, true);
+		if (rc) {
+			IPAWANERR("reset MODEM stats failed\n");
+			return rc;
+		}
+	}
+	return rc;
+}
+
 /**
  * ipa3_broadcast_quota_reach_ind() - Send Netlink broadcast on Quota
  * @mux_id - The MUX ID on which the quota has been reached
@@ -2885,23 +3100,28 @@
  * on the specific interface which matches the mux_id has been reached.
  *
  */
-void ipa3_broadcast_quota_reach_ind(u32 mux_id)
+void ipa3_broadcast_quota_reach_ind(u32 mux_id,
+	enum ipa_upstream_type upstream_type)
 {
 	char alert_msg[IPA_QUOTA_REACH_ALERT_MAX_SIZE];
 	char iface_name_m[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
 	char iface_name_l[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
 	char *envp[IPA_UEVENT_NUM_EVNP] = {
-		alert_msg, iface_name_l, iface_name_m, NULL };
+		alert_msg, iface_name_l, iface_name_m, NULL};
 	int res;
 	int index;
 
-	index = ipa3_find_mux_channel_index(mux_id);
-
-	if (index == MAX_NUM_OF_MUX_CHANNEL) {
-		IPAWANERR("%u is an mux ID\n", mux_id);
+	/* check upstream_type*/
+	if (upstream_type == IPA_UPSTEAM_MAX) {
+		IPAWANERR(" Wrong upstreamIface type %d\n", upstream_type);
 		return;
+	} else if (upstream_type == IPA_UPSTEAM_MODEM) {
+		index = ipa3_find_mux_channel_index(mux_id);
+		if (index == MAX_NUM_OF_MUX_CHANNEL) {
+			IPAWANERR("%u is an mux ID\n", mux_id);
+			return;
+		}
 	}
-
 	res = snprintf(alert_msg, IPA_QUOTA_REACH_ALERT_MAX_SIZE,
 			"ALERT_NAME=%s", "quotaReachedAlert");
 	if (res >= IPA_QUOTA_REACH_ALERT_MAX_SIZE) {
@@ -2909,15 +3129,25 @@
 		return;
 	}
 	/* posting msg for L-release for CNE */
+	if (upstream_type == IPA_UPSTEAM_MODEM) {
 	res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
 	    "UPSTREAM=%s", rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
+	} else {
+		res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
+			"UPSTREAM=%s", IPA_UPSTEAM_WLAN_IFACE_NAME);
+	}
 	if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
 		IPAWANERR("message too long (%d)", res);
 		return;
 	}
 	/* posting msg for M-release for CNE */
+	if (upstream_type == IPA_UPSTEAM_MODEM) {
 	res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
 	    "INTERFACE=%s", rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
+	} else {
+		res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
+			"INTERFACE=%s", IPA_UPSTEAM_WLAN_IFACE_NAME);
+	}
 	if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
 		IPAWANERR("message too long (%d)", res);
 		return;
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
index 2abfe17..3ef17f6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
@@ -279,8 +279,9 @@
 			break;
 		}
 
-		if (rmnet_ipa3_query_tethering_stats(NULL, true)) {
-			IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n");
+		if (rmnet_ipa3_reset_tethering_stats(
+				(struct wan_ioctl_reset_tether_stats *)param)) {
+			IPAWANERR("WAN_IOC_RESET_TETHER_STATS failed\n");
 			retval = -EFAULT;
 			break;
 		}
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 3330595..47da1b3 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -55,6 +55,8 @@
 #define VDDIO_MAX_UV	2040000
 #define VDDIO_MAX_UA	70300
 
+#define PCIE20_CAP_LINKCTRLSTATUS 0x80
+
 #define WIGIG_MIN_CPU_BOOST_KBPS	150000
 
 struct device;
@@ -87,6 +89,7 @@
 	u32 rc_index; /* PCIE root complex index */
 	struct pci_dev *pcidev;
 	struct pci_saved_state *pristine_state;
+	bool l1_enabled_in_enum;
 
 	/* SMMU */
 	bool use_smmu; /* have SMMU enabled? */
@@ -476,6 +479,47 @@
 	msm_11ad_disable_clk(ctx, &ctx->rf_clk3);
 }
 
+int msm_11ad_ctrl_aspm_l1(struct msm11ad_ctx *ctx, bool enable)
+{
+	int rc;
+	u32 val;
+	struct pci_dev *pdev = ctx->pcidev;
+	bool l1_enabled;
+
+	/* Read current state */
+	rc = pci_read_config_dword(pdev,
+				   PCIE20_CAP_LINKCTRLSTATUS, &val);
+	if (rc) {
+		dev_err(ctx->dev,
+			"reading PCIE20_CAP_LINKCTRLSTATUS failed:%d\n", rc);
+		return rc;
+	}
+	dev_dbg(ctx->dev, "PCIE20_CAP_LINKCTRLSTATUS read returns 0x%x\n", val);
+
+	l1_enabled = val & PCI_EXP_LNKCTL_ASPM_L1;
+	if (l1_enabled == enable) {
+		dev_dbg(ctx->dev, "ASPM_L1 is already %s\n",
+			l1_enabled ? "enabled" : "disabled");
+		return 0;
+	}
+
+	if (enable)
+		val |= PCI_EXP_LNKCTL_ASPM_L1; /* enable bit 1 */
+	else
+		val &= ~PCI_EXP_LNKCTL_ASPM_L1; /* disable bit 1 */
+
+	dev_dbg(ctx->dev, "writing PCIE20_CAP_LINKCTRLSTATUS (val 0x%x)\n",
+		val);
+	rc = pci_write_config_dword(pdev,
+				    PCIE20_CAP_LINKCTRLSTATUS, val);
+	if (rc)
+		dev_err(ctx->dev,
+			"writing PCIE20_CAP_LINKCTRLSTATUS (val 0x%x) failed:%d\n",
+			val, rc);
+
+	return rc;
+}
+
 static int ops_suspend(void *handle)
 {
 	int rc;
@@ -561,6 +605,16 @@
 		goto err_suspend_rc;
 	}
 
+	/* Disable L1, in case it is enabled */
+	if (ctx->l1_enabled_in_enum) {
+		rc = msm_11ad_ctrl_aspm_l1(ctx, false);
+		if (rc) {
+			dev_err(ctx->dev,
+				"failed to disable L1, rc %d\n", rc);
+			goto err_suspend_rc;
+		}
+	}
+
 	return 0;
 
 err_suspend_rc:
@@ -847,6 +901,7 @@
 	struct device_node *rc_node;
 	struct pci_dev *pcidev = NULL;
 	int rc;
+	u32 val;
 
 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
@@ -965,6 +1020,31 @@
 		goto out_rc;
 	}
 	ctx->pcidev = pcidev;
+
+	/* Read current state */
+	rc = pci_read_config_dword(pcidev,
+				   PCIE20_CAP_LINKCTRLSTATUS, &val);
+	if (rc) {
+		dev_err(ctx->dev,
+			"reading PCIE20_CAP_LINKCTRLSTATUS failed:%d\n",
+			rc);
+		goto out_rc;
+	}
+
+	ctx->l1_enabled_in_enum = val & PCI_EXP_LNKCTL_ASPM_L1;
+	dev_dbg(ctx->dev, "L1 is %s in enumeration\n",
+		ctx->l1_enabled_in_enum ? "enabled" : "disabled");
+
+	/* Disable L1, in case it is enabled */
+	if (ctx->l1_enabled_in_enum) {
+		rc = msm_11ad_ctrl_aspm_l1(ctx, false);
+		if (rc) {
+			dev_err(ctx->dev,
+				"failed to disable L1, rc %d\n", rc);
+			goto out_rc;
+		}
+	}
+
 	rc = pci_save_state(pcidev);
 	if (rc) {
 		dev_err(ctx->dev, "pci_save_state failed :%d\n", rc);
@@ -1212,6 +1292,13 @@
 		 * TODO: Enable rf_clk3 clock before resetting the device to
 		 * ensure stable ref clock during the device reset
 		 */
+		/* Re-enable L1 in case it was enabled in enumeration */
+		if (ctx->l1_enabled_in_enum) {
+			rc = msm_11ad_ctrl_aspm_l1(ctx, true);
+			if (rc)
+				dev_err(ctx->dev,
+					"failed to enable L1, rc %d\n", rc);
+		}
 		break;
 	case WIL_PLATFORM_EVT_FW_RDY:
 		/*
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index b0cafa9..b8bdffd 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -2,3 +2,5 @@
 obj-$(CONFIG_POWER_RESET)	+= reset/
 obj-$(CONFIG_POWER_SUPPLY)	+= supply/
 obj-$(CONFIG_ARCH_QCOM)		+= qcom/
+obj-$(CONFIG_ARCH_QCOM)		+= qcom/
+obj-$(CONFIG_POWER_SUPPLY)	+= supply/
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 76806a0..2d5d9bf 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -511,4 +511,6 @@
 	  This driver provides support for the power supply features of
 	  AXP20x PMIC.
 
+source "drivers/power/supply/qcom/Kconfig"
+
 endif # POWER_SUPPLY
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index 36c599d..cfbc992 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -71,4 +71,5 @@
 obj-$(CONFIG_CHARGER_TPS65090)	+= tps65090-charger.o
 obj-$(CONFIG_CHARGER_TPS65217)	+= tps65217_charger.o
 obj-$(CONFIG_AXP288_FUEL_GAUGE) += axp288_fuel_gauge.o
+obj-$(CONFIG_ARCH_QCOM)         += qcom/
 obj-$(CONFIG_AXP288_CHARGER)	+= axp288_charger.o
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index a74d8ca..d16e3e8 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -665,7 +665,7 @@
 	.set_cur_state = ps_set_cur_charge_cntl_limit,
 };
 
-static int psy_register_cooler(struct power_supply *psy)
+static int psy_register_cooler(struct device *dev, struct power_supply *psy)
 {
 	int i;
 
@@ -673,7 +673,13 @@
 	for (i = 0; i < psy->desc->num_properties; i++) {
 		if (psy->desc->properties[i] ==
 				POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT) {
-			psy->tcd = thermal_cooling_device_register(
+			if (dev)
+				psy->tcd = thermal_of_cooling_device_register(
+							dev_of_node(dev),
+							(char *)psy->desc->name,
+							psy, &psy_tcd_ops);
+			else
+				psy->tcd = thermal_cooling_device_register(
 							(char *)psy->desc->name,
 							psy, &psy_tcd_ops);
 			return PTR_ERR_OR_ZERO(psy->tcd);
@@ -698,7 +704,7 @@
 {
 }
 
-static int psy_register_cooler(struct power_supply *psy)
+static int psy_register_cooler(struct device *dev, struct power_supply *psy)
 {
 	return 0;
 }
@@ -770,7 +776,7 @@
 	if (rc)
 		goto register_thermal_failed;
 
-	rc = psy_register_cooler(psy);
+	rc = psy_register_cooler(parent, psy);
 	if (rc)
 		goto register_cooler_failed;
 
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 1480d9a..f6fa78f 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -44,20 +44,23 @@
 					  struct device_attribute *attr,
 					  char *buf) {
 	static char *type_text[] = {
-		"Unknown", "Battery", "UPS", "Mains", "USB",
-		"USB_DCP", "USB_CDP", "USB_ACA", "USB_C",
-		"USB_PD", "USB_PD_DRP"
+		"Unknown", "Battery", "UPS", "Mains", "USB", "USB_DCP",
+		"USB_CDP", "USB_ACA", "USB_HVDCP", "USB_HVDCP_3", "USB_PD",
+		"Wireless", "BMS", "Parallel", "Main", "Wipower",
+		"TYPEC", "TYPEC_UFP", "TYPEC_DFP"
 	};
 	static char *status_text[] = {
 		"Unknown", "Charging", "Discharging", "Not charging", "Full"
 	};
 	static char *charge_type[] = {
-		"Unknown", "N/A", "Trickle", "Fast"
+		"Unknown", "N/A", "Trickle", "Fast",
+		"Taper"
 	};
 	static char *health_text[] = {
 		"Unknown", "Good", "Overheat", "Dead", "Over voltage",
 		"Unspecified failure", "Cold", "Watchdog timer expire",
-		"Safety timer expire"
+		"Safety timer expire",
+		"Warm", "Cool", "Hot"
 	};
 	static char *technology_text[] = {
 		"Unknown", "NiMH", "Li-ion", "Li-poly", "LiFe", "NiCd",
@@ -102,30 +105,48 @@
 	}
 
 	if (off == POWER_SUPPLY_PROP_STATUS)
-		return sprintf(buf, "%s\n", status_text[value.intval]);
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				status_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_CHARGE_TYPE)
-		return sprintf(buf, "%s\n", charge_type[value.intval]);
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				charge_type[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_HEALTH)
-		return sprintf(buf, "%s\n", health_text[value.intval]);
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				health_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_TECHNOLOGY)
-		return sprintf(buf, "%s\n", technology_text[value.intval]);
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				technology_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_CAPACITY_LEVEL)
-		return sprintf(buf, "%s\n", capacity_level_text[value.intval]);
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				capacity_level_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_TYPE)
-		return sprintf(buf, "%s\n", type_text[value.intval]);
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				type_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_SCOPE)
-		return sprintf(buf, "%s\n", scope_text[value.intval]);
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				scope_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_TYPEC_MODE)
-		return sprintf(buf, "%s\n", typec_text[value.intval]);
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				typec_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_TYPEC_POWER_ROLE)
-		return sprintf(buf, "%s\n", typec_pr_text[value.intval]);
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				typec_pr_text[value.intval]);
+	else if (off == POWER_SUPPLY_PROP_DIE_HEALTH)
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				health_text[value.intval]);
+	else if (off == POWER_SUPPLY_PROP_CONNECTOR_HEALTH)
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				health_text[value.intval]);
 	else if (off >= POWER_SUPPLY_PROP_MODEL_NAME)
-		return sprintf(buf, "%s\n", value.strval);
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				value.strval);
 
 	if (off == POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT)
-		return sprintf(buf, "%lld\n", value.int64val);
+		return scnprintf(buf, PAGE_SIZE, "%lld\n",
+				value.int64val);
 	else
-		return sprintf(buf, "%d\n", value.intval);
+		return scnprintf(buf, PAGE_SIZE, "%d\n",
+				value.intval);
 }
 
 static ssize_t power_supply_store_property(struct device *dev,
@@ -181,6 +202,8 @@
 	POWER_SUPPLY_ATTR(charge_full),
 	POWER_SUPPLY_ATTR(charge_empty),
 	POWER_SUPPLY_ATTR(charge_now),
+	POWER_SUPPLY_ATTR(charge_now_raw),
+	POWER_SUPPLY_ATTR(charge_now_error),
 	POWER_SUPPLY_ATTR(charge_avg),
 	POWER_SUPPLY_ATTR(charge_counter),
 	POWER_SUPPLY_ATTR(constant_charge_current),
@@ -200,6 +223,7 @@
 	POWER_SUPPLY_ATTR(capacity_alert_min),
 	POWER_SUPPLY_ATTR(capacity_alert_max),
 	POWER_SUPPLY_ATTR(capacity_level),
+	POWER_SUPPLY_ATTR(capacity_raw),
 	POWER_SUPPLY_ATTR(temp),
 	POWER_SUPPLY_ATTR(temp_max),
 	POWER_SUPPLY_ATTR(temp_min),
@@ -216,11 +240,51 @@
 	POWER_SUPPLY_ATTR(scope),
 	POWER_SUPPLY_ATTR(charge_term_current),
 	POWER_SUPPLY_ATTR(calibrate),
-	POWER_SUPPLY_ATTR(resistance),
 	/* Local extensions */
 	POWER_SUPPLY_ATTR(usb_hc),
 	POWER_SUPPLY_ATTR(usb_otg),
-	POWER_SUPPLY_ATTR(charge_enabled),
+	POWER_SUPPLY_ATTR(battery_charging_enabled),
+	POWER_SUPPLY_ATTR(charging_enabled),
+	POWER_SUPPLY_ATTR(step_charging_enabled),
+	POWER_SUPPLY_ATTR(step_charging_step),
+	POWER_SUPPLY_ATTR(pin_enabled),
+	POWER_SUPPLY_ATTR(input_suspend),
+	POWER_SUPPLY_ATTR(input_voltage_regulation),
+	POWER_SUPPLY_ATTR(input_current_max),
+	POWER_SUPPLY_ATTR(input_current_trim),
+	POWER_SUPPLY_ATTR(input_current_settled),
+	POWER_SUPPLY_ATTR(input_voltage_settled),
+	POWER_SUPPLY_ATTR(bypass_vchg_loop_debouncer),
+	POWER_SUPPLY_ATTR(charge_counter_shadow),
+	POWER_SUPPLY_ATTR(hi_power),
+	POWER_SUPPLY_ATTR(low_power),
+	POWER_SUPPLY_ATTR(temp_cool),
+	POWER_SUPPLY_ATTR(temp_warm),
+	POWER_SUPPLY_ATTR(system_temp_level),
+	POWER_SUPPLY_ATTR(resistance),
+	POWER_SUPPLY_ATTR(resistance_capacitive),
+	POWER_SUPPLY_ATTR(resistance_id),
+	POWER_SUPPLY_ATTR(resistance_now),
+	POWER_SUPPLY_ATTR(flash_current_max),
+	POWER_SUPPLY_ATTR(update_now),
+	POWER_SUPPLY_ATTR(esr_count),
+	POWER_SUPPLY_ATTR(buck_freq),
+	POWER_SUPPLY_ATTR(boost_current),
+	POWER_SUPPLY_ATTR(safety_timer_enabled),
+	POWER_SUPPLY_ATTR(charge_done),
+	POWER_SUPPLY_ATTR(flash_active),
+	POWER_SUPPLY_ATTR(flash_trigger),
+	POWER_SUPPLY_ATTR(force_tlim),
+	POWER_SUPPLY_ATTR(dp_dm),
+	POWER_SUPPLY_ATTR(input_current_limited),
+	POWER_SUPPLY_ATTR(input_current_now),
+	POWER_SUPPLY_ATTR(current_qnovo),
+	POWER_SUPPLY_ATTR(voltage_qnovo),
+	POWER_SUPPLY_ATTR(rerun_aicl),
+	POWER_SUPPLY_ATTR(cycle_count_id),
+	POWER_SUPPLY_ATTR(safety_timer_expired),
+	POWER_SUPPLY_ATTR(restricted_charging),
+	POWER_SUPPLY_ATTR(current_capability),
 	POWER_SUPPLY_ATTR(typec_mode),
 	POWER_SUPPLY_ATTR(typec_cc_orientation),
 	POWER_SUPPLY_ATTR(typec_power_role),
@@ -229,16 +293,26 @@
 	POWER_SUPPLY_ATTR(pd_in_hard_reset),
 	POWER_SUPPLY_ATTR(pd_current_max),
 	POWER_SUPPLY_ATTR(pd_usb_suspend_supported),
+	POWER_SUPPLY_ATTR(charger_temp),
+	POWER_SUPPLY_ATTR(charger_temp_max),
+	POWER_SUPPLY_ATTR(parallel_disable),
 	POWER_SUPPLY_ATTR(pe_start),
 	POWER_SUPPLY_ATTR(set_ship_mode),
-	POWER_SUPPLY_ATTR(boost_current),
-	POWER_SUPPLY_ATTR(force_tlim),
+	POWER_SUPPLY_ATTR(soc_reporting_ready),
+	POWER_SUPPLY_ATTR(debug_battery),
+	POWER_SUPPLY_ATTR(fcc_delta),
+	POWER_SUPPLY_ATTR(icl_reduction),
+	POWER_SUPPLY_ATTR(parallel_mode),
+	POWER_SUPPLY_ATTR(die_health),
+	POWER_SUPPLY_ATTR(connector_health),
+	POWER_SUPPLY_ATTR(ctm_current_max),
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_ATTR(charge_counter_ext),
 	/* Properties of type `const char *' */
 	POWER_SUPPLY_ATTR(model_name),
 	POWER_SUPPLY_ATTR(manufacturer),
 	POWER_SUPPLY_ATTR(serial_number),
+	POWER_SUPPLY_ATTR(battery_type),
 };
 
 static struct attribute *
diff --git a/drivers/power/supply/qcom/Kconfig b/drivers/power/supply/qcom/Kconfig
new file mode 100644
index 0000000..79ea712
--- /dev/null
+++ b/drivers/power/supply/qcom/Kconfig
@@ -0,0 +1,66 @@
+menu "Qualcomm Technologies Inc Charger and Fuel Gauge support"
+
+config QPNP_FG_GEN3
+	tristate "QPNP GEN3 fuel gauge driver"
+	depends on MFD_SPMI_PMIC
+	help
+	  Say Y here to enable the GEN3 Fuel Gauge driver. This adds support
+	  for battery fuel gauging and state of charge of battery connected to
+	  the fuel gauge. The state of charge is reported through a BMS power
+	  supply property and also sends uevents when the capacity is updated.
+
+config SMB135X_CHARGER
+	tristate "SMB135X Battery Charger"
+	depends on I2C
+	help
+	  Say Y to include support for SMB135X Battery Charger.
+	  SMB135X is a dual path switching mode charger capable of charging
+	  the battery with 3Amps of current.
+	  The driver supports charger enable/disable.
+	  The driver reports the charger status via the power supply framework.
+	  A charger status change triggers an IRQ via the device STAT pin.
+
+config SMB1351_USB_CHARGER
+	tristate "smb1351 usb charger (with VBUS detection)"
+	depends on I2C
+	help
+	 Say Y to enable support for the SMB1351 switching mode based charger.
+	 The driver supports charging control (enable/disable) and
+	 charge-current limiting. It also provides USB VBUS detection and
+	 notification support. The driver controls SMB1351 via I2C and
+	 supports device-tree interface.
+
+config QPNP_SMB2
+	tristate "SMB2 Battery Charger"
+	depends on MFD_SPMI_PMIC
+	help
+	  Say Y to enables support for the SMB2 charging peripheral.
+	  The QPNP SMB2 charger driver supports the charger peripheral
+	  present in the PMICOBALT chip.
+	  The power supply framework is used to communicate battery and
+	  usb properties to userspace and other driver consumers such
+	  as fuel gauge, USB, and USB-PD.
+	  VBUS and VCONN regulators are registered for supporting OTG,
+	  and powered Type-C cables respectively.
+
+config SMB138X_CHARGER
+	tristate "SMB138X Battery Charger"
+	depends on MFD_I2C_PMIC
+	help
+	  Say Y to include support for SMB138X Battery Charger.
+	  SMB1380 is a dual phase 6A battery charger, and SMB1381 is a single
+	  phase 5A battery charger.
+	  The driver supports charger enable/disable.
+	  The driver reports the charger status via the power supply framework.
+	  A charger status change triggers an IRQ via the device STAT pin.
+
+config QPNP_QNOVO
+	bool "QPNP QNOVO driver"
+	depends on MFD_SPMI_PMIC
+	help
+	  Say Y here to enable the Qnovo pulse charging engine. Qnovo driver
+	  accepts pulse parameters via sysfs entries and programs the hardware
+	  module. It also allows userspace code to read diagnostics of voltage
+	  and current measured during certain phases of the pulses.
+
+endmenu
diff --git a/drivers/power/supply/qcom/Makefile b/drivers/power/supply/qcom/Makefile
new file mode 100644
index 0000000..171444f
--- /dev/null
+++ b/drivers/power/supply/qcom/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_QPNP_FG_GEN3)     += qpnp-fg-gen3.o fg-memif.o fg-util.o
+obj-$(CONFIG_SMB135X_CHARGER)   += smb135x-charger.o pmic-voter.o
+obj-$(CONFIG_SMB1351_USB_CHARGER) += smb1351-charger.o pmic-voter.o battery.o
+obj-$(CONFIG_QPNP_SMB2)		+= qpnp-smb2.o smb-lib.o pmic-voter.o storm-watch.o battery.o
+obj-$(CONFIG_SMB138X_CHARGER)	+= smb138x-charger.o smb-lib.o pmic-voter.o storm-watch.o battery.o
+obj-$(CONFIG_QPNP_QNOVO)	+= qpnp-qnovo.o battery.o
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
new file mode 100644
index 0000000..3659b92
--- /dev/null
+++ b/drivers/power/supply/qcom/battery.c
@@ -0,0 +1,936 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "QCOM-BATT: %s: " fmt, __func__
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/power_supply.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/printk.h>
+#include <linux/pm_wakeup.h>
+#include <linux/slab.h>
+#include "pmic-voter.h"
+
+#define DRV_MAJOR_VERSION	1
+#define DRV_MINOR_VERSION	0
+
+#define CHG_STATE_VOTER			"CHG_STATE_VOTER"
+#define TAPER_END_VOTER			"TAPER_END_VOTER"
+#define PL_TAPER_EARLY_BAD_VOTER	"PL_TAPER_EARLY_BAD_VOTER"
+#define PARALLEL_PSY_VOTER		"PARALLEL_PSY_VOTER"
+#define PL_HW_ABSENT_VOTER		"PL_HW_ABSENT_VOTER"
+#define PL_VOTER			"PL_VOTER"
+#define RESTRICT_CHG_VOTER		"RESTRICT_CHG_VOTER"
+
+struct pl_data {
+	int			pl_mode;
+	int			slave_pct;
+	int			taper_pct;
+	int			slave_fcc_ua;
+	int			restricted_current;
+	bool			restricted_charging_enabled;
+	struct votable		*fcc_votable;
+	struct votable		*fv_votable;
+	struct votable		*pl_disable_votable;
+	struct votable		*pl_awake_votable;
+	struct votable		*hvdcp_hw_inov_dis_votable;
+	struct work_struct	status_change_work;
+	struct work_struct	pl_disable_forever_work;
+	struct delayed_work	pl_taper_work;
+	struct power_supply	*main_psy;
+	struct power_supply	*pl_psy;
+	struct power_supply	*batt_psy;
+	int			charge_type;
+	int			main_settled_ua;
+	int			pl_settled_ua;
+	struct class		qcom_batt_class;
+	struct wakeup_source	*pl_ws;
+	struct notifier_block	nb;
+};
+
+struct pl_data *the_chip;
+
+enum print_reason {
+	PR_PARALLEL	= BIT(0),
+};
+
+static int debug_mask;
+module_param_named(debug_mask, debug_mask, int, 0600);
+
+#define pl_dbg(chip, reason, fmt, ...)				\
+	do {								\
+		if (debug_mask & (reason))				\
+			pr_info(fmt, ##__VA_ARGS__);	\
+		else							\
+			pr_debug(fmt, ##__VA_ARGS__);		\
+	} while (0)
+
+enum {
+	VER = 0,
+	SLAVE_PCT,
+	RESTRICT_CHG_ENABLE,
+	RESTRICT_CHG_CURRENT,
+};
+
+/*******
+ * ICL *
+ ********/
+static void split_settled(struct pl_data *chip)
+{
+	int slave_icl_pct;
+	int slave_ua = 0, main_settled_ua = 0;
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	/* TODO some parallel chargers do not have a fine ICL resolution. For
+	 * them implement a psy interface which returns the closest lower ICL
+	 * for desired split
+	 */
+
+	if ((chip->pl_mode != POWER_SUPPLY_PL_USBIN_USBIN)
+		&& (chip->pl_mode != POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+		return;
+
+	if (!chip->main_psy)
+		return;
+
+	if (!get_effective_result_locked(chip->pl_disable_votable)) {
+		/* read the aicl settled value */
+		rc = power_supply_get_property(chip->main_psy,
+			       POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+			return;
+		}
+		main_settled_ua = pval.intval;
+		/* slave gets 10 percent points less for ICL */
+		slave_icl_pct = max(0, chip->slave_pct - 10);
+		slave_ua = ((main_settled_ua + chip->pl_settled_ua)
+						* slave_icl_pct) / 100;
+	}
+
+	/* ICL_REDUCTION on main could be 0mA when pl is disabled */
+	pval.intval = slave_ua;
+	rc = power_supply_set_property(chip->main_psy,
+			POWER_SUPPLY_PROP_ICL_REDUCTION, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't change slave suspend state rc=%d\n", rc);
+		return;
+	}
+
+	/* set parallel's ICL  could be 0mA when pl is disabled */
+	pval.intval = slave_ua;
+	rc = power_supply_set_property(chip->pl_psy,
+			POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't set parallel icl, rc=%d\n", rc);
+		return;
+	}
+
+	/* main_settled_ua represents the total capability of adapter */
+	if (!chip->main_settled_ua)
+		chip->main_settled_ua = main_settled_ua;
+	chip->pl_settled_ua = slave_ua;
+}
+
+static ssize_t version_show(struct class *c, struct class_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d.%d\n",
+			DRV_MAJOR_VERSION, DRV_MINOR_VERSION);
+}
+
+/*************
+ * SLAVE PCT *
+ **************/
+static ssize_t slave_pct_show(struct class *c, struct class_attribute *attr,
+			char *ubuf)
+{
+	struct pl_data *chip = container_of(c, struct pl_data,
+			qcom_batt_class);
+
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", chip->slave_pct);
+}
+
+static ssize_t slave_pct_store(struct class *c, struct class_attribute *attr,
+			const char *ubuf, size_t count)
+{
+	struct pl_data *chip = container_of(c, struct pl_data,
+			qcom_batt_class);
+	unsigned long val;
+
+	if (kstrtoul(ubuf, 10, &val))
+		return -EINVAL;
+
+	chip->slave_pct = val;
+	rerun_election(chip->fcc_votable);
+	rerun_election(chip->fv_votable);
+	split_settled(chip);
+
+	return count;
+}
+
+/************************
+ * RESTRICTED CHARGIGNG *
+ ************************/
+static ssize_t restrict_chg_show(struct class *c, struct class_attribute *attr,
+			char *ubuf)
+{
+	struct pl_data *chip = container_of(c, struct pl_data,
+			qcom_batt_class);
+
+	return snprintf(ubuf, PAGE_SIZE, "%d\n",
+			chip->restricted_charging_enabled);
+}
+
+static ssize_t restrict_chg_store(struct class *c, struct class_attribute *attr,
+			const char *ubuf, size_t count)
+{
+	struct pl_data *chip = container_of(c, struct pl_data,
+			qcom_batt_class);
+	unsigned long val;
+
+	if (kstrtoul(ubuf, 10, &val))
+		return -EINVAL;
+
+	if (chip->restricted_charging_enabled == !!val)
+		goto no_change;
+
+	chip->restricted_charging_enabled = !!val;
+
+	vote(chip->fcc_votable, RESTRICT_CHG_VOTER,
+				chip->restricted_charging_enabled,
+				chip->restricted_current);
+
+no_change:
+	return count;
+}
+
+static ssize_t restrict_cur_show(struct class *c, struct class_attribute *attr,
+			char *ubuf)
+{
+	struct pl_data *chip = container_of(c, struct pl_data,
+			qcom_batt_class);
+
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", chip->restricted_current);
+}
+
+static ssize_t restrict_cur_store(struct class *c, struct class_attribute *attr,
+			const char *ubuf, size_t count)
+{
+	struct pl_data *chip = container_of(c, struct pl_data,
+			qcom_batt_class);
+	unsigned long val;
+
+	if (kstrtoul(ubuf, 10, &val))
+		return -EINVAL;
+
+	chip->restricted_current = val;
+
+	vote(chip->fcc_votable, RESTRICT_CHG_VOTER,
+				chip->restricted_charging_enabled,
+				chip->restricted_current);
+
+	return count;
+}
+
+static struct class_attribute pl_attributes[] = {
+	[VER]			= __ATTR_RO(version),
+	[SLAVE_PCT]		= __ATTR(parallel_pct, 0644,
+					slave_pct_show, slave_pct_store),
+	[RESTRICT_CHG_ENABLE]	= __ATTR(restricted_charging, 0644,
+					restrict_chg_show, restrict_chg_store),
+	[RESTRICT_CHG_CURRENT]	= __ATTR(restricted_current, 0644,
+					restrict_cur_show, restrict_cur_store),
+	__ATTR_NULL,
+};
+
+/***********
+ *  TAPER  *
+ ************/
+#define MINIMUM_PARALLEL_FCC_UA		500000
+#define PL_TAPER_WORK_DELAY_MS		100
+#define TAPER_RESIDUAL_PCT		75
+static void pl_taper_work(struct work_struct *work)
+{
+	struct pl_data *chip = container_of(work, struct pl_data,
+						pl_taper_work.work);
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	/* exit immediately if parallel is disabled */
+	if (get_effective_result(chip->pl_disable_votable)) {
+		pl_dbg(chip, PR_PARALLEL, "terminating parallel not in progress\n");
+		goto done;
+	}
+
+	pl_dbg(chip, PR_PARALLEL, "entering parallel taper work slave_fcc = %d\n",
+			chip->slave_fcc_ua);
+	if (chip->slave_fcc_ua < MINIMUM_PARALLEL_FCC_UA) {
+		pl_dbg(chip, PR_PARALLEL, "terminating parallel's share lower than 500mA\n");
+		vote(chip->pl_disable_votable, TAPER_END_VOTER, true, 0);
+		goto done;
+	}
+
+	rc = power_supply_get_property(chip->batt_psy,
+			       POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get batt charge type rc=%d\n", rc);
+		goto done;
+	}
+
+	chip->charge_type = pval.intval;
+	if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+		pl_dbg(chip, PR_PARALLEL, "master is taper charging; reducing slave FCC\n");
+
+		vote(chip->pl_awake_votable, TAPER_END_VOTER, true, 0);
+		/* Reduce the taper percent by 25 percent */
+		chip->taper_pct = chip->taper_pct * TAPER_RESIDUAL_PCT / 100;
+		rerun_election(chip->fcc_votable);
+		pl_dbg(chip, PR_PARALLEL, "taper entry scheduling work after %d ms\n",
+				PL_TAPER_WORK_DELAY_MS);
+		schedule_delayed_work(&chip->pl_taper_work,
+				msecs_to_jiffies(PL_TAPER_WORK_DELAY_MS));
+		return;
+	}
+
+	/*
+	 * Master back to Fast Charge, get out of this round of taper reduction
+	 */
+	pl_dbg(chip, PR_PARALLEL, "master is fast charging; waiting for next taper\n");
+
+done:
+	vote(chip->pl_awake_votable, TAPER_END_VOTER, false, 0);
+}
+
+/*********
+ *  FCC  *
+ **********/
+#define EFFICIENCY_PCT	80
+static void split_fcc(struct pl_data *chip, int total_ua,
+			int *master_ua, int *slave_ua)
+{
+	int rc, effective_total_ua, slave_limited_ua, hw_cc_delta_ua = 0,
+		icl_ua, adapter_uv, bcl_ua;
+	union power_supply_propval pval = {0, };
+
+	rc = power_supply_get_property(chip->main_psy,
+			       POWER_SUPPLY_PROP_FCC_DELTA, &pval);
+	if (rc < 0)
+		hw_cc_delta_ua = 0;
+	else
+		hw_cc_delta_ua = pval.intval;
+
+	bcl_ua = INT_MAX;
+	if (chip->pl_mode == POWER_SUPPLY_PL_USBMID_USBMID) {
+		rc = power_supply_get_property(chip->main_psy,
+			       POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+			return;
+		}
+		icl_ua = pval.intval;
+
+		rc = power_supply_get_property(chip->main_psy,
+			       POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't get adaptive voltage rc=%d\n", rc);
+			return;
+		}
+		adapter_uv = pval.intval;
+
+		bcl_ua = div64_s64((s64)icl_ua * adapter_uv * EFFICIENCY_PCT,
+			(s64)get_effective_result(chip->fv_votable) * 100);
+	}
+
+	effective_total_ua = max(0, total_ua + hw_cc_delta_ua);
+	slave_limited_ua = min(effective_total_ua, bcl_ua);
+	*slave_ua = (slave_limited_ua * chip->slave_pct) / 100;
+	*slave_ua = (*slave_ua * chip->taper_pct) / 100;
+	/*
+	 * In USBIN_USBIN configuration with internal rsense parallel
+	 * charger's current goes through main charger's BATFET, keep
+	 * the main charger's FCC to the votable result.
+	 */
+	if (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+		*master_ua = max(0, total_ua);
+	else
+		*master_ua = max(0, total_ua - *slave_ua);
+}
+
+static int pl_fcc_vote_callback(struct votable *votable, void *data,
+			int total_fcc_ua, const char *client)
+{
+	struct pl_data *chip = data;
+	union power_supply_propval pval = {0, };
+	int rc, master_fcc_ua = total_fcc_ua, slave_fcc_ua = 0;
+
+	if (total_fcc_ua < 0)
+		return 0;
+
+	if (!chip->main_psy)
+		return 0;
+
+	if (chip->batt_psy) {
+		rc = power_supply_get_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CURRENT_QNOVO,
+			&pval);
+		if (rc < 0) {
+			pr_err("Couldn't get qnovo fcc, rc=%d\n", rc);
+			return rc;
+		}
+
+		if (pval.intval != -EINVAL)
+			total_fcc_ua = pval.intval;
+	}
+
+	if (chip->pl_mode == POWER_SUPPLY_PL_NONE
+	    || get_effective_result_locked(chip->pl_disable_votable)) {
+		pval.intval = total_fcc_ua;
+		rc = power_supply_set_property(chip->main_psy,
+				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+				&pval);
+		if (rc < 0)
+			pr_err("Couldn't set main fcc, rc=%d\n", rc);
+		return rc;
+	}
+
+	split_fcc(chip, total_fcc_ua, &master_fcc_ua, &slave_fcc_ua);
+	pval.intval = slave_fcc_ua;
+	rc = power_supply_set_property(chip->pl_psy,
+			POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't set parallel fcc, rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->slave_fcc_ua = slave_fcc_ua;
+
+	pval.intval = master_fcc_ua;
+	rc = power_supply_set_property(chip->main_psy,
+			POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+	if (rc < 0) {
+		pr_err("Could not set main fcc, rc=%d\n", rc);
+		return rc;
+	}
+
+	pl_dbg(chip, PR_PARALLEL, "master_fcc=%d slave_fcc=%d distribution=(%d/%d)\n",
+		   master_fcc_ua, slave_fcc_ua,
+		   (master_fcc_ua * 100) / total_fcc_ua,
+		   (slave_fcc_ua * 100) / total_fcc_ua);
+
+	return 0;
+}
+
+#define PARALLEL_FLOAT_VOLTAGE_DELTA_UV 50000
+static int pl_fv_vote_callback(struct votable *votable, void *data,
+			int fv_uv, const char *client)
+{
+	struct pl_data *chip = data;
+	union power_supply_propval pval = {0, };
+	int rc = 0;
+	int effective_fv_uv = fv_uv;
+
+	if (fv_uv < 0)
+		return 0;
+
+	if (!chip->main_psy)
+		return 0;
+
+	if (chip->batt_psy) {
+		rc = power_supply_get_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_VOLTAGE_QNOVO,
+			&pval);
+		if (rc < 0) {
+			pr_err("Couldn't get qnovo fv, rc=%d\n", rc);
+			return rc;
+		}
+
+		if (pval.intval != -EINVAL)
+			effective_fv_uv = pval.intval;
+	}
+
+	pval.intval = effective_fv_uv;
+
+	rc = power_supply_set_property(chip->main_psy,
+			POWER_SUPPLY_PROP_VOLTAGE_MAX, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't set main fv, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (chip->pl_mode != POWER_SUPPLY_PL_NONE) {
+		pval.intval += PARALLEL_FLOAT_VOLTAGE_DELTA_UV;
+		rc = power_supply_set_property(chip->pl_psy,
+				POWER_SUPPLY_PROP_VOLTAGE_MAX, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't set float on parallel rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static void pl_disable_forever_work(struct work_struct *work)
+{
+	struct pl_data *chip = container_of(work,
+			struct pl_data, pl_disable_forever_work);
+
+	/* Disable Parallel charger forever */
+	vote(chip->pl_disable_votable, PL_HW_ABSENT_VOTER, true, 0);
+
+	/* Re-enable autonomous mode */
+	if (chip->hvdcp_hw_inov_dis_votable)
+		vote(chip->hvdcp_hw_inov_dis_votable, PL_VOTER, false, 0);
+}
+
+static int pl_disable_vote_callback(struct votable *votable,
+		void *data, int pl_disable, const char *client)
+{
+	struct pl_data *chip = data;
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	chip->taper_pct = 100;
+	chip->main_settled_ua = 0;
+	chip->pl_settled_ua = 0;
+
+	if (!pl_disable) { /* enable */
+		rc = power_supply_get_property(chip->pl_psy,
+				POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+		if (rc == -ENODEV) {
+			/*
+			 * -ENODEV is returned only if parallel chip
+			 * is not present in the system.
+			 * Disable parallel charger forever.
+			 */
+			schedule_work(&chip->pl_disable_forever_work);
+			return rc;
+		}
+
+		rerun_election(chip->fv_votable);
+		rerun_election(chip->fcc_votable);
+		/*
+		 * Enable will be called with a valid pl_psy always. The
+		 * PARALLEL_PSY_VOTER keeps it disabled unless a pl_psy
+		 * is seen.
+		 */
+		pval.intval = 0;
+		rc = power_supply_set_property(chip->pl_psy,
+				POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+		if (rc < 0)
+			pr_err("Couldn't change slave suspend state rc=%d\n",
+				rc);
+
+		if ((chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+			|| (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+			split_settled(chip);
+		/*
+		 * we could have been enabled while in taper mode,
+		 *  start the taper work if so
+		 */
+		rc = power_supply_get_property(chip->batt_psy,
+				       POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't get batt charge type rc=%d\n", rc);
+		} else {
+			if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+				pl_dbg(chip, PR_PARALLEL,
+					"pl enabled in Taper scheduing work\n");
+				schedule_delayed_work(&chip->pl_taper_work, 0);
+			}
+		}
+	} else {
+		if ((chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+			|| (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+			split_settled(chip);
+
+		/* pl_psy may be NULL while in the disable branch */
+		if (chip->pl_psy) {
+			pval.intval = 1;
+			rc = power_supply_set_property(chip->pl_psy,
+					POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+			if (rc < 0)
+				pr_err("Couldn't change slave suspend state rc=%d\n",
+					rc);
+		}
+		rerun_election(chip->fcc_votable);
+		rerun_election(chip->fv_votable);
+	}
+
+	pl_dbg(chip, PR_PARALLEL, "parallel charging %s\n",
+		   pl_disable ? "disabled" : "enabled");
+
+	return 0;
+}
+
+static int pl_awake_vote_callback(struct votable *votable,
+			void *data, int awake, const char *client)
+{
+	struct pl_data *chip = data;
+
+	if (awake)
+		__pm_stay_awake(chip->pl_ws);
+	else
+		__pm_relax(chip->pl_ws);
+
+	pr_debug("client: %s awake: %d\n", client, awake);
+	return 0;
+}
+
+static bool is_main_available(struct pl_data *chip)
+{
+	if (!chip->main_psy)
+		chip->main_psy = power_supply_get_by_name("main");
+
+	if (!chip->main_psy)
+		return false;
+
+	return true;
+}
+
+static bool is_batt_available(struct pl_data *chip)
+{
+	if (!chip->batt_psy)
+		chip->batt_psy = power_supply_get_by_name("battery");
+
+	if (!chip->batt_psy)
+		return false;
+
+	return true;
+}
+
+static bool is_parallel_available(struct pl_data *chip)
+{
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	if (chip->pl_psy)
+		return true;
+
+	chip->pl_psy = power_supply_get_by_name("parallel");
+	if (!chip->pl_psy)
+		return false;
+
+	rc = power_supply_get_property(chip->pl_psy,
+			       POWER_SUPPLY_PROP_PARALLEL_MODE, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get parallel mode from parallel rc=%d\n",
+				rc);
+		return false;
+	}
+	/*
+	 * Note that pl_mode will be updated to anything other than a _NONE
+	 * only after pl_psy is found. IOW pl_mode != _NONE implies that
+	 * pl_psy is present and valid.
+	 */
+	chip->pl_mode = pval.intval;
+
+	/* Disable autonomous votage increments for USBIN-USBIN */
+	if ((chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+		|| (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT)) {
+		if (!chip->hvdcp_hw_inov_dis_votable)
+			chip->hvdcp_hw_inov_dis_votable =
+					find_votable("HVDCP_HW_INOV_DIS");
+		if (chip->hvdcp_hw_inov_dis_votable)
+			/* Read current pulse count */
+			vote(chip->hvdcp_hw_inov_dis_votable, PL_VOTER,
+					true, 0);
+		else
+			return false;
+	}
+
+	vote(chip->pl_disable_votable, PARALLEL_PSY_VOTER, false, 0);
+
+	return true;
+}
+
+static void handle_main_charge_type(struct pl_data *chip)
+{
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	rc = power_supply_get_property(chip->batt_psy,
+			       POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get batt charge type rc=%d\n", rc);
+		return;
+	}
+
+	/* not fast/not taper state to disables parallel */
+	if ((pval.intval != POWER_SUPPLY_CHARGE_TYPE_FAST)
+		&& (pval.intval != POWER_SUPPLY_CHARGE_TYPE_TAPER)) {
+		vote(chip->pl_disable_votable, CHG_STATE_VOTER, true, 0);
+		chip->taper_pct = 100;
+		vote(chip->pl_disable_votable, TAPER_END_VOTER, false, 0);
+		vote(chip->pl_disable_votable, PL_TAPER_EARLY_BAD_VOTER,
+				false, 0);
+		chip->charge_type = pval.intval;
+		return;
+	}
+
+	/* handle taper charge entry */
+	if (chip->charge_type == POWER_SUPPLY_CHARGE_TYPE_FAST
+		&& (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER)) {
+		chip->charge_type = pval.intval;
+		pl_dbg(chip, PR_PARALLEL, "taper entry scheduling work\n");
+		schedule_delayed_work(&chip->pl_taper_work, 0);
+		return;
+	}
+
+	/* handle fast/taper charge entry */
+	if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER
+			|| pval.intval == POWER_SUPPLY_CHARGE_TYPE_FAST) {
+		pl_dbg(chip, PR_PARALLEL, "chg_state enabling parallel\n");
+		vote(chip->pl_disable_votable, CHG_STATE_VOTER, false, 0);
+		chip->charge_type = pval.intval;
+		return;
+	}
+
+	/* remember the new state only if it isn't any of the above */
+	chip->charge_type = pval.intval;
+}
+
+#define MIN_ICL_CHANGE_DELTA_UA		300000
+static void handle_settled_icl_change(struct pl_data *chip)
+{
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	if (get_effective_result(chip->pl_disable_votable))
+		return;
+
+	if (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN
+			|| chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT) {
+		/*
+		 * call aicl split only when USBIN_USBIN and enabled
+		 * and if aicl changed
+		 */
+		rc = power_supply_get_property(chip->main_psy,
+				       POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+				       &pval);
+		if (rc < 0) {
+			pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+			return;
+		}
+
+		/* If ICL change is small skip splitting */
+		if (abs((chip->main_settled_ua - chip->pl_settled_ua)
+				- pval.intval) > MIN_ICL_CHANGE_DELTA_UA)
+			split_settled(chip);
+	} else {
+		rerun_election(chip->fcc_votable);
+	}
+}
+
+static void handle_parallel_in_taper(struct pl_data *chip)
+{
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	if (get_effective_result_locked(chip->pl_disable_votable))
+		return;
+
+	if (!chip->pl_psy)
+		return;
+
+	rc = power_supply_get_property(chip->pl_psy,
+			       POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get pl charge type rc=%d\n", rc);
+		return;
+	}
+
+	/*
+	 * if parallel is seen in taper mode ever, that is an anomaly and
+	 * we disable parallel charger
+	 */
+	if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+		vote(chip->pl_disable_votable, PL_TAPER_EARLY_BAD_VOTER,
+				true, 0);
+		return;
+	}
+}
+
+static void status_change_work(struct work_struct *work)
+{
+	struct pl_data *chip = container_of(work,
+			struct pl_data, status_change_work);
+
+	if (!is_main_available(chip))
+		return;
+
+	if (!is_batt_available(chip))
+		return;
+
+	is_parallel_available(chip);
+
+	handle_main_charge_type(chip);
+	handle_settled_icl_change(chip);
+	handle_parallel_in_taper(chip);
+}
+
+static int pl_notifier_call(struct notifier_block *nb,
+		unsigned long ev, void *v)
+{
+	struct power_supply *psy = v;
+	struct pl_data *chip = container_of(nb, struct pl_data, nb);
+
+	if (ev != PSY_EVENT_PROP_CHANGED)
+		return NOTIFY_OK;
+
+	if ((strcmp(psy->desc->name, "parallel") == 0)
+	    || (strcmp(psy->desc->name, "battery") == 0)
+	    || (strcmp(psy->desc->name, "main") == 0))
+		schedule_work(&chip->status_change_work);
+
+	return NOTIFY_OK;
+}
+
+static int pl_register_notifier(struct pl_data *chip)
+{
+	int rc;
+
+	chip->nb.notifier_call = pl_notifier_call;
+	rc = power_supply_reg_notifier(&chip->nb);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int pl_determine_initial_status(struct pl_data *chip)
+{
+	status_change_work(&chip->status_change_work);
+	return 0;
+}
+
+#define DEFAULT_RESTRICTED_CURRENT_UA	1000000
+static int pl_init(void)
+{
+	struct pl_data *chip;
+	int rc = 0;
+
+	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+	chip->slave_pct = 50;
+	chip->restricted_current = DEFAULT_RESTRICTED_CURRENT_UA;
+
+	chip->pl_ws = wakeup_source_register("qcom-battery");
+	if (!chip->pl_ws)
+		goto cleanup;
+
+	chip->fcc_votable = create_votable("FCC", VOTE_MIN,
+					pl_fcc_vote_callback,
+					chip);
+	if (IS_ERR(chip->fcc_votable)) {
+		rc = PTR_ERR(chip->fcc_votable);
+		goto release_wakeup_source;
+	}
+
+	chip->fv_votable = create_votable("FV", VOTE_MAX,
+					pl_fv_vote_callback,
+					chip);
+	if (IS_ERR(chip->fv_votable)) {
+		rc = PTR_ERR(chip->fv_votable);
+		goto destroy_votable;
+	}
+
+	chip->pl_disable_votable = create_votable("PL_DISABLE", VOTE_SET_ANY,
+					pl_disable_vote_callback,
+					chip);
+	if (IS_ERR(chip->pl_disable_votable)) {
+		rc = PTR_ERR(chip->pl_disable_votable);
+		goto destroy_votable;
+	}
+	vote(chip->pl_disable_votable, CHG_STATE_VOTER, true, 0);
+	vote(chip->pl_disable_votable, TAPER_END_VOTER, false, 0);
+	vote(chip->pl_disable_votable, PARALLEL_PSY_VOTER, true, 0);
+
+	chip->pl_awake_votable = create_votable("PL_AWAKE", VOTE_SET_ANY,
+					pl_awake_vote_callback,
+					chip);
+	if (IS_ERR(chip->pl_awake_votable)) {
+		rc = PTR_ERR(chip->pl_disable_votable);
+		goto destroy_votable;
+	}
+
+	INIT_WORK(&chip->status_change_work, status_change_work);
+	INIT_DELAYED_WORK(&chip->pl_taper_work, pl_taper_work);
+	INIT_WORK(&chip->pl_disable_forever_work, pl_disable_forever_work);
+
+	rc = pl_register_notifier(chip);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		goto unreg_notifier;
+	}
+
+	rc = pl_determine_initial_status(chip);
+	if (rc < 0) {
+		pr_err("Couldn't determine initial status rc=%d\n", rc);
+		goto unreg_notifier;
+	}
+
+	chip->qcom_batt_class.name = "qcom-battery",
+	chip->qcom_batt_class.owner = THIS_MODULE,
+	chip->qcom_batt_class.class_attrs = pl_attributes;
+
+	rc = class_register(&chip->qcom_batt_class);
+	if (rc < 0) {
+		pr_err("couldn't register pl_data sysfs class rc = %d\n", rc);
+		goto unreg_notifier;
+	}
+
+	return rc;
+
+unreg_notifier:
+	power_supply_unreg_notifier(&chip->nb);
+destroy_votable:
+	destroy_votable(chip->pl_awake_votable);
+	destroy_votable(chip->pl_disable_votable);
+	destroy_votable(chip->fv_votable);
+	destroy_votable(chip->fcc_votable);
+release_wakeup_source:
+	wakeup_source_unregister(chip->pl_ws);
+cleanup:
+	kfree(chip);
+	return rc;
+}
+
+static void pl_deinit(void)
+{
+	struct pl_data *chip = the_chip;
+
+	power_supply_unreg_notifier(&chip->nb);
+	destroy_votable(chip->pl_awake_votable);
+	destroy_votable(chip->pl_disable_votable);
+	destroy_votable(chip->fv_votable);
+	destroy_votable(chip->fcc_votable);
+	wakeup_source_unregister(chip->pl_ws);
+	kfree(chip);
+}
+
+module_init(pl_init);
+module_exit(pl_deinit)
+
+MODULE_DESCRIPTION("");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
new file mode 100644
index 0000000..c0ba5a9
--- /dev/null
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -0,0 +1,442 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __FG_CORE_H__
+#define __FG_CORE_H__
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/string_helpers.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include "pmic-voter.h"
+
+#define fg_dbg(chip, reason, fmt, ...)			\
+	do {							\
+		if (*chip->debug_mask & (reason))		\
+			pr_info(fmt, ##__VA_ARGS__);	\
+		else						\
+			pr_debug(fmt, ##__VA_ARGS__);	\
+	} while (0)
+
+#define is_between(left, right, value) \
+		(((left) >= (right) && (left) >= (value) \
+			&& (value) >= (right)) \
+		|| ((left) <= (right) && (left) <= (value) \
+			&& (value) <= (right)))
+
+/* Awake votable reasons */
+#define SRAM_READ	"fg_sram_read"
+#define SRAM_WRITE	"fg_sram_write"
+#define PROFILE_LOAD	"fg_profile_load"
+#define DELTA_SOC	"fg_delta_soc"
+
+#define DEBUG_PRINT_BUFFER_SIZE		64
+/* 3 byte address + 1 space character */
+#define ADDR_LEN			4
+/* Format is 'XX ' */
+#define CHARS_PER_ITEM			3
+/* 4 data items per line */
+#define ITEMS_PER_LINE			4
+#define MAX_LINE_LENGTH			(ADDR_LEN + (ITEMS_PER_LINE *	\
+					CHARS_PER_ITEM) + 1)		\
+
+#define FG_SRAM_ADDRESS_MAX		255
+#define FG_SRAM_LEN			504
+#define PROFILE_LEN			224
+#define PROFILE_COMP_LEN		148
+#define BUCKET_COUNT			8
+#define BUCKET_SOC_PCT			(256 / BUCKET_COUNT)
+
+#define KI_COEFF_MAX			62200
+#define KI_COEFF_SOC_LEVELS		3
+
+#define SLOPE_LIMIT_COEFF_MAX		31
+
+#define BATT_THERM_NUM_COEFFS		3
+
+/* Debug flag definitions */
+enum fg_debug_flag {
+	FG_IRQ			= BIT(0), /* Show interrupts */
+	FG_STATUS		= BIT(1), /* Show FG status changes */
+	FG_POWER_SUPPLY		= BIT(2), /* Show POWER_SUPPLY */
+	FG_SRAM_WRITE		= BIT(3), /* Show SRAM writes */
+	FG_SRAM_READ		= BIT(4), /* Show SRAM reads */
+	FG_BUS_WRITE		= BIT(5), /* Show REGMAP writes */
+	FG_BUS_READ		= BIT(6), /* Show REGMAP reads */
+	FG_CAP_LEARN		= BIT(7), /* Show capacity learning */
+	FG_TTF			= BIT(8), /* Show time to full */
+};
+
+/* SRAM access */
+enum sram_access_flags {
+	FG_IMA_DEFAULT	= 0,
+	FG_IMA_ATOMIC	= BIT(0),
+	FG_IMA_NO_WLOCK	= BIT(1),
+};
+
+/* JEITA */
+enum {
+	JEITA_COLD = 0,
+	JEITA_COOL,
+	JEITA_WARM,
+	JEITA_HOT,
+	NUM_JEITA_LEVELS,
+};
+
+/* FG irqs */
+enum fg_irq_index {
+	MSOC_FULL_IRQ = 0,
+	MSOC_HIGH_IRQ,
+	MSOC_EMPTY_IRQ,
+	MSOC_LOW_IRQ,
+	MSOC_DELTA_IRQ,
+	BSOC_DELTA_IRQ,
+	SOC_READY_IRQ,
+	SOC_UPDATE_IRQ,
+	BATT_TEMP_DELTA_IRQ,
+	BATT_MISSING_IRQ,
+	ESR_DELTA_IRQ,
+	VBATT_LOW_IRQ,
+	VBATT_PRED_DELTA_IRQ,
+	DMA_GRANT_IRQ,
+	MEM_XCP_IRQ,
+	IMA_RDY_IRQ,
+	FG_IRQ_MAX,
+};
+
+/* WA flags */
+enum {
+	DELTA_SOC_IRQ_WA = BIT(0),
+};
+
+/*
+ * List of FG_SRAM parameters. Please add a parameter only if it is an entry
+ * that will be used either to configure an entity (e.g. termination current)
+ * which might need some encoding (or) it is an entry that will be read from
+ * SRAM and decoded (e.g. CC_SOC_SW) for SW to use at various places. For
+ * generic read/writes to SRAM registers, please use fg_sram_read/write APIs
+ * directly without adding an entry here.
+ */
+enum fg_sram_param_id {
+	FG_SRAM_BATT_SOC = 0,
+	FG_SRAM_FULL_SOC,
+	FG_SRAM_VOLTAGE_PRED,
+	FG_SRAM_OCV,
+	FG_SRAM_ESR,
+	FG_SRAM_RSLOW,
+	FG_SRAM_ALG_FLAGS,
+	FG_SRAM_CC_SOC,
+	FG_SRAM_CC_SOC_SW,
+	FG_SRAM_ACT_BATT_CAP,
+	/* Entries below here are configurable during initialization */
+	FG_SRAM_CUTOFF_VOLT,
+	FG_SRAM_EMPTY_VOLT,
+	FG_SRAM_VBATT_LOW,
+	FG_SRAM_FLOAT_VOLT,
+	FG_SRAM_VBATT_FULL,
+	FG_SRAM_ESR_TIMER_DISCHG_MAX,
+	FG_SRAM_ESR_TIMER_DISCHG_INIT,
+	FG_SRAM_ESR_TIMER_CHG_MAX,
+	FG_SRAM_ESR_TIMER_CHG_INIT,
+	FG_SRAM_SYS_TERM_CURR,
+	FG_SRAM_CHG_TERM_CURR,
+	FG_SRAM_DELTA_MSOC_THR,
+	FG_SRAM_DELTA_BSOC_THR,
+	FG_SRAM_RECHARGE_SOC_THR,
+	FG_SRAM_RECHARGE_VBATT_THR,
+	FG_SRAM_KI_COEFF_MED_DISCHG,
+	FG_SRAM_KI_COEFF_HI_DISCHG,
+	FG_SRAM_ESR_TIGHT_FILTER,
+	FG_SRAM_ESR_BROAD_FILTER,
+	FG_SRAM_SLOPE_LIMIT,
+	FG_SRAM_MAX,
+};
+
+struct fg_sram_param {
+	u16 addr_word;
+	int addr_byte;
+	u8  len;
+	int value;
+	int numrtr;
+	int denmtr;
+	int offset;
+	void (*encode)(struct fg_sram_param *sp, enum fg_sram_param_id id,
+		int val, u8 *buf);
+	int (*decode)(struct fg_sram_param *sp, enum fg_sram_param_id id,
+		int val);
+};
+
+enum fg_alg_flag_id {
+	ALG_FLAG_SOC_LT_OTG_MIN = 0,
+	ALG_FLAG_SOC_LT_RECHARGE,
+	ALG_FLAG_IBATT_LT_ITERM,
+	ALG_FLAG_IBATT_GT_HPM,
+	ALG_FLAG_IBATT_GT_UPM,
+	ALG_FLAG_VBATT_LT_RECHARGE,
+	ALG_FLAG_VBATT_GT_VFLOAT,
+	ALG_FLAG_MAX,
+};
+
+struct fg_alg_flag {
+	char	*name;
+	u8	bit;
+	bool	invalid;
+};
+
+enum wa_flags {
+	PMI8998_V1_REV_WA = BIT(0),
+};
+
+enum slope_limit_status {
+	LOW_TEMP_DISCHARGE = 0,
+	LOW_TEMP_CHARGE,
+	HIGH_TEMP_DISCHARGE,
+	HIGH_TEMP_CHARGE,
+	SLOPE_LIMIT_NUM_COEFFS,
+};
+
+/* DT parameters for FG device */
+struct fg_dt_props {
+	bool	force_load_profile;
+	bool	hold_soc_while_full;
+	bool	auto_recharge_soc;
+	int	cutoff_volt_mv;
+	int	empty_volt_mv;
+	int	vbatt_low_thr_mv;
+	int	chg_term_curr_ma;
+	int	sys_term_curr_ma;
+	int	delta_soc_thr;
+	int	recharge_soc_thr;
+	int	recharge_volt_thr_mv;
+	int	rsense_sel;
+	int	esr_timer_charging;
+	int	esr_timer_awake;
+	int	esr_timer_asleep;
+	int	rconn_mohms;
+	int	esr_clamp_mohms;
+	int	cl_start_soc;
+	int	cl_max_temp;
+	int	cl_min_temp;
+	int	cl_max_cap_inc;
+	int	cl_max_cap_dec;
+	int	cl_max_cap_limit;
+	int	cl_min_cap_limit;
+	int	jeita_hyst_temp;
+	int	batt_temp_delta;
+	int	esr_flt_switch_temp;
+	int	esr_tight_flt_upct;
+	int	esr_broad_flt_upct;
+	int	esr_tight_lt_flt_upct;
+	int	esr_broad_lt_flt_upct;
+	int	slope_limit_temp;
+	int	jeita_thresholds[NUM_JEITA_LEVELS];
+	int	ki_coeff_soc[KI_COEFF_SOC_LEVELS];
+	int	ki_coeff_med_dischg[KI_COEFF_SOC_LEVELS];
+	int	ki_coeff_hi_dischg[KI_COEFF_SOC_LEVELS];
+	int	slope_limit_coeffs[SLOPE_LIMIT_NUM_COEFFS];
+	u8	batt_therm_coeffs[BATT_THERM_NUM_COEFFS];
+};
+
+/* parameters from battery profile */
+struct fg_batt_props {
+	const char	*batt_type_str;
+	char		*batt_profile;
+	int		float_volt_uv;
+	int		vbatt_full_mv;
+	int		fastchg_curr_ma;
+};
+
+struct fg_cyc_ctr_data {
+	bool		en;
+	bool		started[BUCKET_COUNT];
+	u16		count[BUCKET_COUNT];
+	u8		last_soc[BUCKET_COUNT];
+	int		id;
+	struct mutex	lock;
+};
+
+struct fg_cap_learning {
+	bool		active;
+	int		init_cc_soc_sw;
+	int64_t		nom_cap_uah;
+	int64_t		init_cc_uah;
+	int64_t		final_cc_uah;
+	int64_t		learned_cc_uah;
+	struct mutex	lock;
+};
+
+struct fg_irq_info {
+	const char		*name;
+	const irq_handler_t	handler;
+	bool			wakeable;
+	int			irq;
+};
+
+struct fg_circ_buf {
+	int	arr[20];
+	int	size;
+	int	head;
+};
+
+struct fg_pt {
+	s32 x;
+	s32 y;
+};
+
+static const struct fg_pt fg_ln_table[] = {
+	{ 1000,		0 },
+	{ 2000,		693 },
+	{ 4000,		1386 },
+	{ 6000,		1792 },
+	{ 8000,		2079 },
+	{ 16000,	2773 },
+	{ 32000,	3466 },
+	{ 64000,	4159 },
+	{ 128000,	4852 },
+};
+
+struct fg_chip {
+	struct device		*dev;
+	struct pmic_revid_data	*pmic_rev_id;
+	struct regmap		*regmap;
+	struct dentry		*dfs_root;
+	struct power_supply	*fg_psy;
+	struct power_supply	*batt_psy;
+	struct power_supply	*usb_psy;
+	struct power_supply	*dc_psy;
+	struct power_supply	*parallel_psy;
+	struct iio_channel	*batt_id_chan;
+	struct fg_memif		*sram;
+	struct fg_irq_info	*irqs;
+	struct votable		*awake_votable;
+	struct fg_sram_param	*sp;
+	struct fg_alg_flag	*alg_flags;
+	int			*debug_mask;
+	char			batt_profile[PROFILE_LEN];
+	struct fg_dt_props	dt;
+	struct fg_batt_props	bp;
+	struct fg_cyc_ctr_data	cyc_ctr;
+	struct notifier_block	nb;
+	struct fg_cap_learning  cl;
+	struct mutex		bus_lock;
+	struct mutex		sram_rw_lock;
+	struct mutex		batt_avg_lock;
+	struct mutex		charge_full_lock;
+	u32			batt_soc_base;
+	u32			batt_info_base;
+	u32			mem_if_base;
+	u32			rradc_base;
+	u32			wa_flags;
+	int			batt_id_ohms;
+	int			charge_status;
+	int			prev_charge_status;
+	int			charge_done;
+	int			charge_type;
+	int			last_soc;
+	int			last_batt_temp;
+	int			health;
+	int			maint_soc;
+	int			delta_soc;
+	int			last_msoc;
+	enum slope_limit_status	slope_limit_sts;
+	bool			profile_available;
+	bool			profile_loaded;
+	bool			battery_missing;
+	bool			fg_restarting;
+	bool			charge_full;
+	bool			recharge_soc_adjusted;
+	bool			ki_coeff_dischg_en;
+	bool			esr_fcc_ctrl_en;
+	bool			soc_reporting_ready;
+	bool			esr_flt_cold_temp_en;
+	bool			bsoc_delta_irq_en;
+	bool			slope_limit_en;
+	struct completion	soc_update;
+	struct completion	soc_ready;
+	struct delayed_work	profile_load_work;
+	struct work_struct	status_change_work;
+	struct work_struct	cycle_count_work;
+	struct delayed_work	batt_avg_work;
+	struct delayed_work	sram_dump_work;
+	struct fg_circ_buf	ibatt_circ_buf;
+	struct fg_circ_buf	vbatt_circ_buf;
+};
+
+/* Debugfs data structures are below */
+
+/* Log buffer */
+struct fg_log_buffer {
+	size_t		rpos;
+	size_t		wpos;
+	size_t		len;
+	char		data[0];
+};
+
+/* transaction parameters */
+struct fg_trans {
+	struct fg_chip		*chip;
+	struct mutex		fg_dfs_lock; /* Prevent thread concurrency */
+	struct fg_log_buffer	*log;
+	u32			cnt;
+	u16			addr;
+	u32			offset;
+	u8			*data;
+};
+
+struct fg_dbgfs {
+	struct debugfs_blob_wrapper	help_msg;
+	struct fg_chip			*chip;
+	struct dentry			*root;
+	u32				cnt;
+	u32				addr;
+};
+
+extern int fg_sram_write(struct fg_chip *chip, u16 address, u8 offset,
+			u8 *val, int len, int flags);
+extern int fg_sram_read(struct fg_chip *chip, u16 address, u8 offset,
+			u8 *val, int len, int flags);
+extern int fg_sram_masked_write(struct fg_chip *chip, u16 address, u8 offset,
+			u8 mask, u8 val, int flags);
+extern int fg_interleaved_mem_read(struct fg_chip *chip, u16 address,
+			u8 offset, u8 *val, int len);
+extern int fg_interleaved_mem_write(struct fg_chip *chip, u16 address,
+			u8 offset, u8 *val, int len, bool atomic_access);
+extern int fg_read(struct fg_chip *chip, int addr, u8 *val, int len);
+extern int fg_write(struct fg_chip *chip, int addr, u8 *val, int len);
+extern int fg_masked_write(struct fg_chip *chip, int addr, u8 mask, u8 val);
+extern int fg_ima_init(struct fg_chip *chip);
+extern int fg_clear_ima_errors_if_any(struct fg_chip *chip, bool check_hw_sts);
+extern int fg_clear_dma_errors_if_any(struct fg_chip *chip);
+extern int fg_debugfs_create(struct fg_chip *chip);
+extern void fill_string(char *str, size_t str_len, u8 *buf, int buf_len);
+extern void dump_sram(u8 *buf, int addr, int len);
+extern int64_t twos_compliment_extend(int64_t val, int s_bit_pos);
+extern s64 fg_float_decode(u16 val);
+extern bool is_input_present(struct fg_chip *chip);
+extern void fg_circ_buf_add(struct fg_circ_buf *buf, int val);
+extern void fg_circ_buf_clr(struct fg_circ_buf *buf);
+extern int fg_circ_buf_avg(struct fg_circ_buf *buf, int *avg);
+extern int fg_lerp(const struct fg_pt *pts, size_t tablesize, s32 input,
+			s32 *output);
+#endif
diff --git a/drivers/power/supply/qcom/fg-memif.c b/drivers/power/supply/qcom/fg-memif.c
new file mode 100644
index 0000000..2dc7618
--- /dev/null
+++ b/drivers/power/supply/qcom/fg-memif.c
@@ -0,0 +1,740 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"FG: %s: " fmt, __func__
+
+#include "fg-core.h"
+#include "fg-reg.h"
+
+/* Generic definitions */
+#define RETRY_COUNT		3
+#define BYTES_PER_SRAM_WORD	4
+
+enum {
+	FG_READ = 0,
+	FG_WRITE,
+};
+
+static int fg_set_address(struct fg_chip *chip, u16 address)
+{
+	u8 buffer[2];
+	int rc;
+
+	buffer[0] = address & 0xFF;
+	/* MSB has to be written zero */
+	buffer[1] = 0;
+
+	rc = fg_write(chip, MEM_IF_ADDR_LSB(chip), buffer, 2);
+	if (rc < 0) {
+		pr_err("failed to write to 0x%04X, rc=%d\n",
+			MEM_IF_ADDR_LSB(chip), rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int fg_config_access_mode(struct fg_chip *chip, bool access, bool burst)
+{
+	int rc;
+	u8 intf_ctl = 0;
+
+	intf_ctl = ((access == FG_WRITE) ? IMA_WR_EN_BIT : 0) |
+			(burst ? MEM_ACS_BURST_BIT : 0);
+
+	rc = fg_masked_write(chip, MEM_IF_IMA_CTL(chip), IMA_CTL_MASK,
+			intf_ctl);
+	if (rc < 0) {
+		pr_err("failed to write to 0x%04x, rc=%d\n",
+			MEM_IF_IMA_CTL(chip), rc);
+		return -EIO;
+	}
+
+	return rc;
+}
+
+static int fg_run_iacs_clear_sequence(struct fg_chip *chip)
+{
+	u8 val, hw_sts, exp_sts;
+	int rc, tries = 250;
+
+	/*
+	 * Values to write for running IACS clear sequence comes from
+	 * hardware documentation.
+	 */
+	rc = fg_masked_write(chip, MEM_IF_IMA_CFG(chip),
+			IACS_CLR_BIT | STATIC_CLK_EN_BIT,
+			IACS_CLR_BIT | STATIC_CLK_EN_BIT);
+	if (rc < 0) {
+		pr_err("failed to write 0x%04x, rc=%d\n", MEM_IF_IMA_CFG(chip),
+			rc);
+		return rc;
+	}
+
+	rc = fg_config_access_mode(chip, FG_READ, false);
+	if (rc < 0) {
+		pr_err("failed to write to 0x%04x, rc=%d\n",
+			MEM_IF_IMA_CTL(chip), rc);
+		return rc;
+	}
+
+	rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip),
+				MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT,
+				MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT);
+	if (rc < 0) {
+		pr_err("failed to set ima_req_access bit rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Delay for the clock to reach FG */
+	usleep_range(35, 40);
+
+	while (1) {
+		val = 0;
+		rc = fg_write(chip, MEM_IF_ADDR_MSB(chip), &val, 1);
+		if (rc < 0) {
+			pr_err("failed to write 0x%04x, rc=%d\n",
+				MEM_IF_ADDR_MSB(chip), rc);
+			return rc;
+		}
+
+		val = 0;
+		rc = fg_write(chip, MEM_IF_WR_DATA3(chip), &val, 1);
+		if (rc < 0) {
+			pr_err("failed to write 0x%04x, rc=%d\n",
+				MEM_IF_WR_DATA3(chip), rc);
+			return rc;
+		}
+
+		rc = fg_read(chip, MEM_IF_RD_DATA3(chip), &val, 1);
+		if (rc < 0) {
+			pr_err("failed to read 0x%04x, rc=%d\n",
+				MEM_IF_RD_DATA3(chip), rc);
+			return rc;
+		}
+
+		/* Delay for IMA hardware to clear */
+		usleep_range(35, 40);
+
+		rc = fg_read(chip, MEM_IF_IMA_HW_STS(chip), &hw_sts, 1);
+		if (rc < 0) {
+			pr_err("failed to read ima_hw_sts rc=%d\n", rc);
+			return rc;
+		}
+
+		if (hw_sts != 0)
+			continue;
+
+		rc = fg_read(chip, MEM_IF_IMA_EXP_STS(chip), &exp_sts, 1);
+		if (rc < 0) {
+			pr_err("failed to read ima_exp_sts rc=%d\n", rc);
+			return rc;
+		}
+
+		if (exp_sts == 0 || !(--tries))
+			break;
+	}
+
+	if (!tries)
+		pr_err("Failed to clear the error? hw_sts: %x exp_sts: %d\n",
+			hw_sts, exp_sts);
+
+	rc = fg_masked_write(chip, MEM_IF_IMA_CFG(chip), IACS_CLR_BIT, 0);
+	if (rc < 0) {
+		pr_err("failed to write 0x%04x, rc=%d\n", MEM_IF_IMA_CFG(chip),
+			rc);
+		return rc;
+	}
+
+	udelay(5);
+
+	rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip),
+				MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT, 0);
+	if (rc < 0) {
+		pr_err("failed to write to 0x%04x, rc=%d\n",
+			MEM_IF_MEM_INTF_CFG(chip), rc);
+		return rc;
+	}
+
+	/* Delay before next transaction is attempted */
+	usleep_range(35, 40);
+	fg_dbg(chip, FG_SRAM_READ | FG_SRAM_WRITE, "IACS clear sequence complete\n");
+	return rc;
+}
+
+int fg_clear_dma_errors_if_any(struct fg_chip *chip)
+{
+	int rc;
+	u8 dma_sts;
+
+	rc = fg_read(chip, MEM_IF_DMA_STS(chip), &dma_sts, 1);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			MEM_IF_DMA_STS(chip), rc);
+		return rc;
+	}
+	fg_dbg(chip, FG_STATUS, "dma_sts: %x\n", dma_sts);
+
+	if (dma_sts & (DMA_WRITE_ERROR_BIT | DMA_READ_ERROR_BIT)) {
+		rc = fg_masked_write(chip, MEM_IF_DMA_CTL(chip),
+				DMA_CLEAR_LOG_BIT, DMA_CLEAR_LOG_BIT);
+		if (rc < 0) {
+			pr_err("failed to write addr=0x%04x, rc=%d\n",
+				MEM_IF_DMA_CTL(chip), rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+int fg_clear_ima_errors_if_any(struct fg_chip *chip, bool check_hw_sts)
+{
+	int rc = 0;
+	u8 err_sts, exp_sts = 0, hw_sts = 0;
+	bool run_err_clr_seq = false;
+
+	rc = fg_read(chip, MEM_IF_IMA_EXP_STS(chip), &exp_sts, 1);
+	if (rc < 0) {
+		pr_err("failed to read ima_exp_sts rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_read(chip, MEM_IF_IMA_HW_STS(chip), &hw_sts, 1);
+	if (rc < 0) {
+		pr_err("failed to read ima_hw_sts rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_read(chip, MEM_IF_IMA_ERR_STS(chip), &err_sts, 1);
+	if (rc < 0) {
+		pr_err("failed to read ima_err_sts rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_dbg(chip, FG_SRAM_READ | FG_SRAM_WRITE, "ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
+		err_sts, exp_sts, hw_sts);
+
+	if (check_hw_sts) {
+		/*
+		 * Lower nibble should be equal to upper nibble before SRAM
+		 * transactions begins from SW side. If they are unequal, then
+		 * the error clear sequence should be run irrespective of IMA
+		 * exception errors.
+		 */
+		if ((hw_sts & 0x0F) != hw_sts >> 4) {
+			pr_err("IMA HW not in correct state, hw_sts=%x\n",
+				hw_sts);
+			run_err_clr_seq = true;
+		}
+	}
+
+	if (exp_sts & (IACS_ERR_BIT | XCT_TYPE_ERR_BIT | DATA_RD_ERR_BIT |
+		DATA_WR_ERR_BIT | ADDR_BURST_WRAP_BIT | ADDR_STABLE_ERR_BIT)) {
+		pr_err("IMA exception bit set, exp_sts=%x\n", exp_sts);
+		run_err_clr_seq = true;
+	}
+
+	if (run_err_clr_seq) {
+		/* clear the error */
+		rc = fg_run_iacs_clear_sequence(chip);
+		if (rc < 0) {
+			pr_err("failed to run iacs clear sequence rc=%d\n", rc);
+			return rc;
+		}
+
+		/* Retry again as there was an error in the transaction */
+		return -EAGAIN;
+	}
+
+	return rc;
+}
+
+static int fg_check_iacs_ready(struct fg_chip *chip)
+{
+	int rc = 0, tries = 250;
+	u8 ima_opr_sts = 0;
+
+	/*
+	 * Additional delay to make sure IACS ready bit is set after
+	 * Read/Write operation.
+	 */
+
+	usleep_range(30, 35);
+	while (1) {
+		rc = fg_read(chip, MEM_IF_IMA_OPR_STS(chip), &ima_opr_sts, 1);
+		if (rc < 0) {
+			pr_err("failed to read 0x%04x, rc=%d\n",
+				MEM_IF_IMA_OPR_STS(chip), rc);
+			return rc;
+		}
+
+		if (ima_opr_sts & IACS_RDY_BIT)
+			break;
+
+		if (!(--tries))
+			break;
+
+		/* delay for iacs_ready to be asserted */
+		usleep_range(5000, 7000);
+	}
+
+	if (!tries) {
+		pr_err("IACS_RDY not set, opr_sts: %d\n", ima_opr_sts);
+		/* check for error condition */
+		rc = fg_clear_ima_errors_if_any(chip, false);
+		if (rc < 0) {
+			pr_err("Failed to check for ima errors rc=%d\n", rc);
+			return rc;
+		}
+
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int __fg_interleaved_mem_write(struct fg_chip *chip, u16 address,
+				int offset, u8 *val, int len)
+{
+	int rc = 0, i;
+	u8 *ptr = val, byte_enable = 0, num_bytes = 0;
+
+	fg_dbg(chip, FG_SRAM_WRITE, "length %d addr=%02X offset=%d\n", len,
+		address, offset);
+
+	while (len > 0) {
+		num_bytes = (offset + len) > BYTES_PER_SRAM_WORD ?
+				(BYTES_PER_SRAM_WORD - offset) : len;
+
+		/* write to byte_enable */
+		for (i = offset; i < (offset + num_bytes); i++)
+			byte_enable |= BIT(i);
+
+		rc = fg_write(chip, MEM_IF_IMA_BYTE_EN(chip), &byte_enable, 1);
+		if (rc < 0) {
+			pr_err("Unable to write to byte_en_reg rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		/* write data */
+		rc = fg_write(chip, MEM_IF_WR_DATA0(chip) + offset, ptr,
+				num_bytes);
+		if (rc < 0) {
+			pr_err("failed to write to 0x%04x, rc=%d\n",
+				MEM_IF_WR_DATA0(chip) + offset, rc);
+			return rc;
+		}
+
+		/*
+		 * The last-byte WR_DATA3 starts the write transaction.
+		 * Write a dummy value to WR_DATA3 if it does not have
+		 * valid data. This dummy data is not written to the
+		 * SRAM as byte_en for WR_DATA3 is not set.
+		 */
+		if (!(byte_enable & BIT(3))) {
+			u8 dummy_byte = 0x0;
+
+			rc = fg_write(chip, MEM_IF_WR_DATA3(chip), &dummy_byte,
+					1);
+			if (rc < 0) {
+				pr_err("failed to write dummy-data to WR_DATA3 rc=%d\n",
+					rc);
+				return rc;
+			}
+		}
+
+		/* check for error condition */
+		rc = fg_clear_ima_errors_if_any(chip, false);
+		if (rc < 0) {
+			pr_err("Failed to check for ima errors rc=%d\n", rc);
+			return rc;
+		}
+
+		ptr += num_bytes;
+		len -= num_bytes;
+		offset = byte_enable = 0;
+
+		rc = fg_check_iacs_ready(chip);
+		if (rc < 0) {
+			pr_debug("IACS_RDY failed rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int __fg_interleaved_mem_read(struct fg_chip *chip, u16 address,
+				int offset, u8 *val, int len)
+{
+	int rc = 0, total_len;
+	u8 *rd_data = val, num_bytes;
+	char str[DEBUG_PRINT_BUFFER_SIZE];
+
+	fg_dbg(chip, FG_SRAM_READ, "length %d addr=%02X\n", len, address);
+
+	total_len = len;
+	while (len > 0) {
+		num_bytes = (offset + len) > BYTES_PER_SRAM_WORD ?
+				(BYTES_PER_SRAM_WORD - offset) : len;
+		rc = fg_read(chip, MEM_IF_RD_DATA0(chip) + offset, rd_data,
+				num_bytes);
+		if (rc < 0) {
+			pr_err("failed to read 0x%04x, rc=%d\n",
+				MEM_IF_RD_DATA0(chip) + offset, rc);
+			return rc;
+		}
+
+		rd_data += num_bytes;
+		len -= num_bytes;
+		offset = 0;
+
+		/* check for error condition */
+		rc = fg_clear_ima_errors_if_any(chip, false);
+		if (rc < 0) {
+			pr_err("Failed to check for ima errors rc=%d\n", rc);
+			return rc;
+		}
+
+		if (len && len < BYTES_PER_SRAM_WORD) {
+			/*
+			 * Move to single mode. Changing address is not
+			 * required here as it must be in burst mode. Address
+			 * will get incremented internally by FG HW once the MSB
+			 * of RD_DATA is read.
+			 */
+			rc = fg_config_access_mode(chip, FG_READ, 0);
+			if (rc < 0) {
+				pr_err("failed to move to single mode rc=%d\n",
+					rc);
+				return -EIO;
+			}
+		}
+
+		rc = fg_check_iacs_ready(chip);
+		if (rc < 0) {
+			pr_debug("IACS_RDY failed rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (*chip->debug_mask & FG_SRAM_READ) {
+		fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, total_len);
+		pr_info("data read: %s\n", str);
+	}
+
+	return rc;
+}
+
+static int fg_get_mem_access_status(struct fg_chip *chip, bool *status)
+{
+	int rc;
+	u8 mem_if_sts;
+
+	rc = fg_read(chip, MEM_IF_MEM_INTF_CFG(chip), &mem_if_sts, 1);
+	if (rc < 0) {
+		pr_err("failed to read rif_mem status rc=%d\n", rc);
+		return rc;
+	}
+
+	*status = mem_if_sts & MEM_ACCESS_REQ_BIT;
+	return 0;
+}
+
+static bool is_mem_access_available(struct fg_chip *chip, int access)
+{
+	bool rif_mem_sts = true;
+	int rc, time_count = 0;
+
+	while (1) {
+		rc = fg_get_mem_access_status(chip, &rif_mem_sts);
+		if (rc < 0)
+			return rc;
+
+		/* This is an inverting logic */
+		if (!rif_mem_sts)
+			break;
+
+		fg_dbg(chip, FG_SRAM_READ | FG_SRAM_WRITE, "MEM_ACCESS_REQ is not clear yet for IMA_%s\n",
+			access ? "write" : "read");
+
+		/*
+		 * Try this no more than 4 times. If MEM_ACCESS_REQ is not
+		 * clear, then return an error instead of waiting for it again.
+		 */
+		if  (time_count > 4) {
+			pr_err("Tried 4 times(~16ms) polling MEM_ACCESS_REQ\n");
+			return false;
+		}
+
+		/* Wait for 4ms before reading MEM_ACCESS_REQ again */
+		usleep_range(4000, 4100);
+		time_count++;
+	}
+	return true;
+}
+
+static int fg_interleaved_mem_config(struct fg_chip *chip, u8 *val,
+		u16 address, int offset, int len, bool access)
+{
+	int rc = 0;
+
+	if (!is_mem_access_available(chip, access))
+		return -EBUSY;
+
+	/* configure for IMA access */
+	rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip),
+				MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT,
+				MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT);
+	if (rc < 0) {
+		pr_err("failed to set ima_req_access bit rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure for the read/write, single/burst mode */
+	rc = fg_config_access_mode(chip, access, (offset + len) > 4);
+	if (rc < 0) {
+		pr_err("failed to set memory access rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = fg_check_iacs_ready(chip);
+	if (rc < 0) {
+		pr_err_ratelimited("IACS_RDY failed rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_set_address(chip, address);
+	if (rc < 0) {
+		pr_err("failed to set address rc = %d\n", rc);
+		return rc;
+	}
+
+	if (access == FG_READ) {
+		rc = fg_check_iacs_ready(chip);
+		if (rc < 0) {
+			pr_debug("IACS_RDY failed rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int fg_get_beat_count(struct fg_chip *chip, u8 *count)
+{
+	int rc;
+
+	rc = fg_read(chip, MEM_IF_FG_BEAT_COUNT(chip), count, 1);
+	*count &= BEAT_COUNT_MASK;
+	return rc;
+}
+
+int fg_interleaved_mem_read(struct fg_chip *chip, u16 address, u8 offset,
+				u8 *val, int len)
+{
+	int rc = 0, ret;
+	u8 start_beat_count, end_beat_count, count = 0;
+	bool retry = false;
+
+	if (offset > 3) {
+		pr_err("offset too large %d\n", offset);
+		return -EINVAL;
+	}
+
+retry:
+	if (count >= RETRY_COUNT) {
+		pr_err("Tried %d times\n", RETRY_COUNT);
+		retry = false;
+		goto out;
+	}
+
+	rc = fg_interleaved_mem_config(chip, val, address, offset, len,
+					FG_READ);
+	if (rc < 0) {
+		pr_err("failed to configure SRAM for IMA rc = %d\n", rc);
+		count++;
+		retry = true;
+		goto out;
+	}
+
+	/* read the start beat count */
+	rc = fg_get_beat_count(chip, &start_beat_count);
+	if (rc < 0) {
+		pr_err("failed to read beat count rc=%d\n", rc);
+		count++;
+		retry = true;
+		goto out;
+	}
+
+	/* read data */
+	rc = __fg_interleaved_mem_read(chip, address, offset, val, len);
+	if (rc < 0) {
+		count++;
+		if (rc == -EAGAIN) {
+			pr_err("IMA access failed retry_count = %d\n", count);
+			goto retry;
+		}
+		pr_err("failed to read SRAM address rc = %d\n", rc);
+		retry = true;
+		goto out;
+	}
+
+	/* read the end beat count */
+	rc = fg_get_beat_count(chip, &end_beat_count);
+	if (rc < 0) {
+		pr_err("failed to read beat count rc=%d\n", rc);
+		count++;
+		retry = true;
+		goto out;
+	}
+
+	fg_dbg(chip, FG_SRAM_READ, "Start beat_count = %x End beat_count = %x\n",
+		start_beat_count, end_beat_count);
+
+	if (start_beat_count != end_beat_count) {
+		fg_dbg(chip, FG_SRAM_READ, "Beat count(%d/%d) do not match - retry transaction\n",
+			start_beat_count, end_beat_count);
+		count++;
+		retry = true;
+	}
+out:
+	/* Release IMA access */
+	ret = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip),
+				MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT, 0);
+	if (rc < 0 && ret < 0) {
+		pr_err("failed to reset IMA access bit ret = %d\n", ret);
+		return ret;
+	}
+
+	if (retry) {
+		retry = false;
+		goto retry;
+	}
+
+	return rc;
+}
+
+int fg_interleaved_mem_write(struct fg_chip *chip, u16 address, u8 offset,
+				u8 *val, int len, bool atomic_access)
+{
+	int rc = 0, ret;
+	u8 start_beat_count, end_beat_count, count = 0;
+	bool retry = false;
+
+	if (offset > 3) {
+		pr_err("offset too large %d\n", offset);
+		return -EINVAL;
+	}
+
+retry:
+	if (count >= RETRY_COUNT) {
+		pr_err("Tried %d times\n", RETRY_COUNT);
+		retry = false;
+		goto out;
+	}
+
+	rc = fg_interleaved_mem_config(chip, val, address, offset, len,
+					FG_WRITE);
+	if (rc < 0) {
+		pr_err("failed to configure SRAM for IMA rc = %d\n", rc);
+		count++;
+		retry = true;
+		goto out;
+	}
+
+	/* read the start beat count */
+	rc = fg_get_beat_count(chip, &start_beat_count);
+	if (rc < 0) {
+		pr_err("failed to read beat count rc=%d\n", rc);
+		count++;
+		retry = true;
+		goto out;
+	}
+
+	/* write data */
+	rc = __fg_interleaved_mem_write(chip, address, offset, val, len);
+	if (rc < 0) {
+		count++;
+		if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
+			pr_err("IMA access failed retry_count = %d\n", count);
+			goto retry;
+		}
+		pr_err("failed to write SRAM address rc = %d\n", rc);
+		retry = true;
+		goto out;
+	}
+
+	/* read the end beat count */
+	rc = fg_get_beat_count(chip, &end_beat_count);
+	if (rc < 0) {
+		pr_err("failed to read beat count rc=%d\n", rc);
+		count++;
+		retry = true;
+		goto out;
+	}
+
+	if (atomic_access && start_beat_count != end_beat_count)
+		pr_err("Start beat_count = %x End beat_count = %x\n",
+			start_beat_count, end_beat_count);
+out:
+	/* Release IMA access */
+	ret = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip),
+				MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT, 0);
+	if (rc < 0 && ret < 0) {
+		pr_err("failed to reset IMA access bit ret = %d\n", ret);
+		return ret;
+	}
+
+	if (retry) {
+		retry = false;
+		goto retry;
+	}
+
+	/* Return the error we got before releasing memory access */
+	return rc;
+}
+
+int fg_ima_init(struct fg_chip *chip)
+{
+	int rc;
+
+	/*
+	 * Change the FG_MEM_INT interrupt to track IACS_READY
+	 * condition instead of end-of-transaction. This makes sure
+	 * that the next transaction starts only after the hw is ready.
+	 */
+	rc = fg_masked_write(chip, MEM_IF_IMA_CFG(chip), IACS_INTR_SRC_SLCT_BIT,
+				IACS_INTR_SRC_SLCT_BIT);
+	if (rc < 0) {
+		pr_err("failed to configure interrupt source %d\n", rc);
+		return rc;
+	}
+
+	/* Clear DMA errors if any before clearing IMA errors */
+	rc = fg_clear_dma_errors_if_any(chip);
+	if (rc < 0) {
+		pr_err("Error in checking DMA errors rc:%d\n", rc);
+		return rc;
+	}
+
+	/* Clear IMA errors if any before SRAM transactions can begin */
+	rc = fg_clear_ima_errors_if_any(chip, true);
+	if (rc < 0 && rc != -EAGAIN) {
+		pr_err("Error in checking IMA errors rc:%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
diff --git a/drivers/power/supply/qcom/fg-reg.h b/drivers/power/supply/qcom/fg-reg.h
new file mode 100644
index 0000000..bf2827f
--- /dev/null
+++ b/drivers/power/supply/qcom/fg-reg.h
@@ -0,0 +1,328 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __FG_REG_H__
+#define __FG_REG_H__
+
+/* FG_ADC_RR register definitions used only for READ */
+#define ADC_RR_FAKE_BATT_LOW_LSB(chip)		(chip->rradc_base + 0x58)
+#define ADC_RR_FAKE_BATT_HIGH_LSB(chip)		(chip->rradc_base + 0x5A)
+
+/* FG_BATT_SOC register definitions */
+#define BATT_SOC_FG_ALG_STS(chip)		(chip->batt_soc_base + 0x06)
+#define BATT_SOC_FG_ALG_AUX_STS0(chip)		(chip->batt_soc_base + 0x07)
+#define BATT_SOC_SLEEP_SHUTDOWN_STS(chip)	(chip->batt_soc_base + 0x08)
+#define BATT_SOC_FG_MONOTONIC_SOC(chip)		(chip->batt_soc_base + 0x09)
+#define BATT_SOC_FG_MONOTONIC_SOC_CP(chip)	(chip->batt_soc_base + 0x0A)
+#define BATT_SOC_INT_RT_STS(chip)		(chip->batt_soc_base + 0x10)
+#define BATT_SOC_EN_CTL(chip)			(chip->batt_soc_base + 0x46)
+#define BATT_SOC_RESTART(chip)			(chip->batt_soc_base + 0x48)
+#define BATT_SOC_STS_CLR(chip)			(chip->batt_soc_base + 0x4A)
+#define BATT_SOC_LOW_PWR_CFG(chip)		(chip->batt_soc_base + 0x52)
+#define BATT_SOC_LOW_PWR_STS(chip)		(chip->batt_soc_base + 0x56)
+
+/* BATT_SOC_INT_RT_STS */
+#define MSOC_EMPTY_BIT				BIT(5)
+
+/* BATT_SOC_EN_CTL */
+#define FG_ALGORITHM_EN_BIT			BIT(7)
+
+/* BATT_SOC_RESTART */
+#define RESTART_GO_BIT				BIT(0)
+
+/* FG_BATT_INFO register definitions */
+#define BATT_INFO_BATT_TEMP_STS(chip)		(chip->batt_info_base + 0x06)
+#define BATT_INFO_SYS_BATT(chip)		(chip->batt_info_base + 0x07)
+#define BATT_INFO_FG_STS(chip)			(chip->batt_info_base + 0x09)
+#define BATT_INFO_INT_RT_STS(chip)		(chip->batt_info_base + 0x10)
+#define BATT_INFO_BATT_REM_LATCH(chip)		(chip->batt_info_base + 0x4F)
+#define BATT_INFO_BATT_TEMP_LSB(chip)		(chip->batt_info_base + 0x50)
+#define BATT_INFO_BATT_TEMP_MSB(chip)		(chip->batt_info_base + 0x51)
+#define BATT_INFO_BATT_TEMP_CFG(chip)		(chip->batt_info_base + 0x56)
+#define BATT_INFO_BATT_TMPR_INTR(chip)		(chip->batt_info_base + 0x59)
+#define BATT_INFO_THERM_C1(chip)		(chip->batt_info_base + 0x5C)
+#define BATT_INFO_THERM_C2(chip)		(chip->batt_info_base + 0x5D)
+#define BATT_INFO_THERM_C3(chip)		(chip->batt_info_base + 0x5E)
+#define BATT_INFO_THERM_HALF_RANGE(chip)	(chip->batt_info_base + 0x5F)
+#define BATT_INFO_JEITA_CTLS(chip)		(chip->batt_info_base + 0x61)
+#define BATT_INFO_JEITA_TOO_COLD(chip)		(chip->batt_info_base + 0x62)
+#define BATT_INFO_JEITA_COLD(chip)		(chip->batt_info_base + 0x63)
+#define BATT_INFO_JEITA_HOT(chip)		(chip->batt_info_base + 0x64)
+#define BATT_INFO_JEITA_TOO_HOT(chip)		(chip->batt_info_base + 0x65)
+
+/* only for v1.1 */
+#define BATT_INFO_ESR_CFG(chip)			(chip->batt_info_base + 0x69)
+/* starting from v2.0 */
+#define BATT_INFO_ESR_GENERAL_CFG(chip)		(chip->batt_info_base + 0x68)
+#define BATT_INFO_ESR_PULL_DN_CFG(chip)		(chip->batt_info_base + 0x69)
+#define BATT_INFO_ESR_FAST_CRG_CFG(chip)	(chip->batt_info_base + 0x6A)
+
+#define BATT_INFO_BATT_MISS_CFG(chip)		(chip->batt_info_base + 0x6B)
+#define BATT_INFO_WATCHDOG_COUNT(chip)		(chip->batt_info_base + 0x70)
+#define BATT_INFO_WATCHDOG_CFG(chip)		(chip->batt_info_base + 0x71)
+#define BATT_INFO_IBATT_SENSING_CFG(chip)	(chip->batt_info_base + 0x73)
+#define BATT_INFO_QNOVO_CFG(chip)		(chip->batt_info_base + 0x74)
+#define BATT_INFO_QNOVO_SCALER(chip)		(chip->batt_info_base + 0x75)
+
+/* starting from v2.0 */
+#define BATT_INFO_CRG_SERVICES(chip)		(chip->batt_info_base + 0x90)
+
+/* Following LSB/MSB address are for v2.0 and above; v1.1 have them swapped */
+#define BATT_INFO_VBATT_LSB(chip)		(chip->batt_info_base + 0xA0)
+#define BATT_INFO_VBATT_MSB(chip)		(chip->batt_info_base + 0xA1)
+#define BATT_INFO_IBATT_LSB(chip)		(chip->batt_info_base + 0xA2)
+#define BATT_INFO_IBATT_MSB(chip)		(chip->batt_info_base + 0xA3)
+#define BATT_INFO_ESR_LSB(chip)			(chip->batt_info_base + 0xA4)
+#define BATT_INFO_ESR_MSB(chip)			(chip->batt_info_base + 0xA5)
+#define BATT_INFO_VBATT_LSB_CP(chip)		(chip->batt_info_base + 0xA6)
+#define BATT_INFO_VBATT_MSB_CP(chip)		(chip->batt_info_base + 0xA7)
+#define BATT_INFO_IBATT_LSB_CP(chip)		(chip->batt_info_base + 0xA8)
+#define BATT_INFO_IBATT_MSB_CP(chip)		(chip->batt_info_base + 0xA9)
+#define BATT_INFO_ESR_LSB_CP(chip)		(chip->batt_info_base + 0xAA)
+#define BATT_INFO_ESR_MSB_CP(chip)		(chip->batt_info_base + 0xAB)
+#define BATT_INFO_VADC_LSB(chip)		(chip->batt_info_base + 0xAC)
+#define BATT_INFO_VADC_MSB(chip)		(chip->batt_info_base + 0xAD)
+#define BATT_INFO_IADC_LSB(chip)		(chip->batt_info_base + 0xAE)
+#define BATT_INFO_IADC_MSB(chip)		(chip->batt_info_base + 0xAF)
+#define BATT_INFO_TM_MISC(chip)			(chip->batt_info_base + 0xE5)
+#define BATT_INFO_TM_MISC1(chip)		(chip->batt_info_base + 0xE6)
+
+/* BATT_INFO_BATT_TEMP_STS */
+#define JEITA_TOO_HOT_STS_BIT			BIT(7)
+#define JEITA_HOT_STS_BIT			BIT(6)
+#define JEITA_COLD_STS_BIT			BIT(5)
+#define JEITA_TOO_COLD_STS_BIT			BIT(4)
+#define BATT_TEMP_DELTA_BIT			BIT(1)
+#define BATT_TEMP_AVAIL_BIT			BIT(0)
+
+/* BATT_INFO_SYS_BATT */
+#define BATT_REM_LATCH_STS_BIT			BIT(4)
+#define BATT_MISSING_HW_BIT			BIT(2)
+#define BATT_MISSING_ALG_BIT			BIT(1)
+#define BATT_MISSING_CMP_BIT			BIT(0)
+
+/* BATT_INFO_FG_STS */
+#define FG_WD_RESET_BIT				BIT(7)
+/* This bit is not present in v1.1 */
+#define FG_CRG_TRM_BIT				BIT(0)
+
+/* BATT_INFO_INT_RT_STS */
+#define BT_TMPR_DELTA_BIT			BIT(6)
+#define WDOG_EXP_BIT				BIT(5)
+#define BT_ATTN_BIT				BIT(4)
+#define BT_MISS_BIT				BIT(3)
+#define ESR_DELTA_BIT				BIT(2)
+#define VBT_LOW_BIT				BIT(1)
+#define VBT_PRD_DELTA_BIT			BIT(0)
+
+/* BATT_INFO_INT_RT_STS */
+#define BATT_REM_LATCH_CLR_BIT			BIT(7)
+
+/* BATT_INFO_BATT_TEMP_LSB/MSB */
+#define BATT_TEMP_LSB_MASK			GENMASK(7, 0)
+#define BATT_TEMP_MSB_MASK			GENMASK(2, 0)
+
+/* BATT_INFO_BATT_TEMP_CFG */
+#define JEITA_TEMP_HYST_MASK			GENMASK(5, 4)
+#define JEITA_TEMP_HYST_SHIFT			4
+#define JEITA_TEMP_NO_HYST			0x0
+#define JEITA_TEMP_HYST_1C			0x1
+#define JEITA_TEMP_HYST_2C			0x2
+#define JEITA_TEMP_HYST_3C			0x3
+
+/* BATT_INFO_BATT_TMPR_INTR */
+#define CHANGE_THOLD_MASK			GENMASK(1, 0)
+#define BTEMP_DELTA_2K				0x0
+#define BTEMP_DELTA_4K				0x1
+#define BTEMP_DELTA_6K				0x2
+#define BTEMP_DELTA_10K				0x3
+
+/* BATT_INFO_THERM_C1/C2/C3 */
+#define BATT_INFO_THERM_COEFF_MASK		GENMASK(7, 0)
+
+/* BATT_INFO_THERM_HALF_RANGE */
+#define BATT_INFO_THERM_TEMP_MASK		GENMASK(7, 0)
+
+/* BATT_INFO_JEITA_CTLS */
+#define JEITA_STS_CLEAR_BIT			BIT(0)
+
+/* BATT_INFO_JEITA_TOO_COLD/COLD/HOT/TOO_HOT */
+#define JEITA_THOLD_MASK			GENMASK(7, 0)
+
+/* BATT_INFO_ESR_CFG */
+#define CFG_ACTIVE_PD_MASK			GENMASK(2, 1)
+#define CFG_FCC_DEC_MASK			GENMASK(4, 3)
+
+/* BATT_INFO_ESR_GENERAL_CFG */
+#define ESR_DEEP_TAPER_EN_BIT			BIT(0)
+
+/* BATT_INFO_ESR_PULL_DN_CFG */
+#define ESR_PULL_DOWN_IVAL_MASK			GENMASK(3, 2)
+#define ESR_MEAS_CUR_60MA			0x0
+#define ESR_MEAS_CUR_120MA			0x1
+#define ESR_MEAS_CUR_180MA			0x2
+#define ESR_MEAS_CUR_240MA			0x3
+#define ESR_PULL_DOWN_MODE_MASK			GENMASK(1, 0)
+#define ESR_NO_PULL_DOWN			0x0
+#define ESR_STATIC_PULL_DOWN			0x1
+#define ESR_CRG_DSC_PULL_DOWN			0x2
+#define ESR_DSC_PULL_DOWN			0x3
+
+/* BATT_INFO_ESR_FAST_CRG_CFG */
+#define ESR_FAST_CRG_IVAL_MASK			GENMASK(3, 1)
+#define ESR_FCC_300MA				0x0
+#define ESR_FCC_600MA				0x1
+#define ESR_FCC_1A				0x2
+#define ESR_FCC_2A				0x3
+#define ESR_FCC_3A				0x4
+#define ESR_FCC_4A				0x5
+#define ESR_FCC_5A				0x6
+#define ESR_FCC_6A				0x7
+#define ESR_FAST_CRG_CTL_EN_BIT			BIT(0)
+
+/* BATT_INFO_BATT_MISS_CFG */
+#define BM_THERM_TH_MASK			GENMASK(5, 4)
+#define RES_TH_0P75_MOHM			0x0
+#define RES_TH_1P00_MOHM			0x1
+#define RES_TH_1P50_MOHM			0x2
+#define RES_TH_3P00_MOHM			0x3
+#define BM_BATT_ID_TH_MASK			GENMASK(3, 2)
+#define BM_FROM_THERM_BIT			BIT(1)
+#define BM_FROM_BATT_ID_BIT			BIT(0)
+
+/* BATT_INFO_WATCHDOG_COUNT */
+#define WATCHDOG_COUNTER			GENMASK(7, 0)
+
+/* BATT_INFO_WATCHDOG_CFG */
+#define RESET_CAPABLE_BIT			BIT(2)
+#define PET_CTRL_BIT				BIT(1)
+#define ENABLE_CTRL_BIT				BIT(0)
+
+/* BATT_INFO_IBATT_SENSING_CFG */
+#define ADC_BITSTREAM_INV_BIT			BIT(4)
+#define SOURCE_SELECT_MASK			GENMASK(1, 0)
+#define SRC_SEL_BATFET				0x0
+#define SRC_SEL_BATFET_SMB			0x2
+#define SRC_SEL_RESERVED			0x3
+
+/* BATT_INFO_QNOVO_CFG */
+#define LD_REG_FORCE_CTL_BIT			BIT(2)
+#define LD_REG_CTRL_FORCE_HIGH			LD_REG_FORCE_CTL_BIT
+#define LD_REG_CTRL_FORCE_LOW			0
+#define LD_REG_CTRL_BIT				BIT(1)
+#define LD_REG_CTRL_REGISTER			LD_REG_CTRL_BIT
+#define LD_REG_CTRL_LOGIC			0
+#define BIT_STREAM_CFG_BIT			BIT(0)
+
+/* BATT_INFO_QNOVO_SCALER */
+#define QNOVO_SCALER_MASK			GENMASK(7, 0)
+
+/* BATT_INFO_CRG_SERVICES */
+#define FG_CRC_TRM_EN_BIT			BIT(0)
+
+/* BATT_INFO_VBATT_LSB/MSB */
+#define VBATT_MASK				GENMASK(7, 0)
+
+/* BATT_INFO_IBATT_LSB/MSB */
+#define IBATT_MASK				GENMASK(7, 0)
+
+/* BATT_INFO_ESR_LSB/MSB */
+#define ESR_LSB_MASK				GENMASK(7, 0)
+#define ESR_MSB_MASK				GENMASK(5, 0)
+
+/* BATT_INFO_VADC_LSB/MSB */
+#define VADC_LSB_MASK				GENMASK(7, 0)
+#define VADC_MSB_MASK				GENMASK(6, 0)
+
+/* BATT_INFO_IADC_LSB/MSB */
+#define IADC_LSB_MASK				GENMASK(7, 0)
+#define IADC_MSB_MASK				GENMASK(6, 0)
+
+/* BATT_INFO_TM_MISC */
+#define FORCE_SEQ_RESP_TOGGLE_BIT		BIT(6)
+#define ALG_DIRECT_VALID_DATA_BIT		BIT(5)
+#define ALG_DIRECT_MODE_EN_BIT			BIT(4)
+#define BATT_VADC_CONV_BIT			BIT(3)
+#define BATT_IADC_CONV_BIT			BIT(2)
+#define ADC_ENABLE_REG_CTRL_BIT			BIT(1)
+#define WDOG_FORCE_EXP_BIT			BIT(0)
+/* only for v1.1 */
+#define ESR_PULSE_FORCE_CTRL_BIT		BIT(7)
+
+/* BATT_INFO_TM_MISC1 */
+/* for v2.0 and above */
+#define ESR_REQ_CTL_BIT				BIT(1)
+#define ESR_REQ_CTL_EN_BIT			BIT(0)
+
+/* FG_MEM_IF register and bit definitions */
+#define MEM_IF_INT_RT_STS(chip)			((chip->mem_if_base) + 0x10)
+#define MEM_IF_MEM_INTF_CFG(chip)		((chip->mem_if_base) + 0x50)
+#define MEM_IF_IMA_CTL(chip)			((chip->mem_if_base) + 0x51)
+#define MEM_IF_IMA_CFG(chip)			((chip->mem_if_base) + 0x52)
+#define MEM_IF_IMA_OPR_STS(chip)		((chip->mem_if_base) + 0x54)
+#define MEM_IF_IMA_EXP_STS(chip)		((chip->mem_if_base) + 0x55)
+#define MEM_IF_IMA_HW_STS(chip)			((chip->mem_if_base) + 0x56)
+#define MEM_IF_FG_BEAT_COUNT(chip)		((chip->mem_if_base) + 0x57)
+#define MEM_IF_IMA_ERR_STS(chip)		((chip->mem_if_base) + 0x5F)
+#define MEM_IF_IMA_BYTE_EN(chip)		((chip->mem_if_base) + 0x60)
+#define MEM_IF_ADDR_LSB(chip)			((chip->mem_if_base) + 0x61)
+#define MEM_IF_ADDR_MSB(chip)			((chip->mem_if_base) + 0x62)
+#define MEM_IF_WR_DATA0(chip)			((chip->mem_if_base) + 0x63)
+#define MEM_IF_WR_DATA3(chip)			((chip->mem_if_base) + 0x66)
+#define MEM_IF_RD_DATA0(chip)			((chip->mem_if_base) + 0x67)
+#define MEM_IF_RD_DATA3(chip)			((chip->mem_if_base) + 0x6A)
+#define MEM_IF_DMA_STS(chip)			((chip->mem_if_base) + 0x70)
+#define MEM_IF_DMA_CTL(chip)			((chip->mem_if_base) + 0x71)
+
+/* MEM_IF_INT_RT_STS */
+#define MEM_XCP_BIT				BIT(1)
+
+/* MEM_IF_MEM_INTF_CFG */
+#define MEM_ACCESS_REQ_BIT			BIT(7)
+#define IACS_SLCT_BIT				BIT(5)
+
+/* MEM_IF_IMA_CTL */
+#define MEM_ACS_BURST_BIT			BIT(7)
+#define IMA_WR_EN_BIT				BIT(6)
+#define IMA_CTL_MASK				GENMASK(7, 6)
+
+/* MEM_IF_IMA_CFG */
+#define IACS_CLR_BIT				BIT(2)
+#define IACS_INTR_SRC_SLCT_BIT			BIT(3)
+#define STATIC_CLK_EN_BIT			BIT(4)
+
+/* MEM_IF_IMA_OPR_STS */
+#define IACS_RDY_BIT				BIT(1)
+
+/* MEM_IF_IMA_EXP_STS */
+#define IACS_ERR_BIT				BIT(0)
+#define XCT_TYPE_ERR_BIT			BIT(1)
+#define DATA_RD_ERR_BIT				BIT(3)
+#define DATA_WR_ERR_BIT				BIT(4)
+#define ADDR_BURST_WRAP_BIT			BIT(5)
+#define ADDR_STABLE_ERR_BIT			BIT(7)
+
+/* MEM_IF_IMA_ERR_STS */
+#define ADDR_STBL_ERR_BIT			BIT(7)
+#define WR_ACS_ERR_BIT				BIT(6)
+#define RD_ACS_ERR_BIT				BIT(5)
+
+/* MEM_IF_FG_BEAT_COUNT */
+#define BEAT_COUNT_MASK				GENMASK(3, 0)
+
+/* MEM_IF_DMA_STS */
+#define DMA_WRITE_ERROR_BIT			BIT(1)
+#define DMA_READ_ERROR_BIT			BIT(2)
+
+/* MEM_IF_DMA_CTL */
+#define DMA_CLEAR_LOG_BIT			BIT(0)
+#endif
diff --git a/drivers/power/supply/qcom/fg-util.c b/drivers/power/supply/qcom/fg-util.c
new file mode 100644
index 0000000..839a771
--- /dev/null
+++ b/drivers/power/supply/qcom/fg-util.c
@@ -0,0 +1,901 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "fg-core.h"
+
+void fg_circ_buf_add(struct fg_circ_buf *buf, int val)
+{
+	buf->arr[buf->head] = val;
+	buf->head = (buf->head + 1) % ARRAY_SIZE(buf->arr);
+	buf->size = min(++buf->size, (int)ARRAY_SIZE(buf->arr));
+}
+
+void fg_circ_buf_clr(struct fg_circ_buf *buf)
+{
+	memset(buf, 0, sizeof(*buf));
+}
+
+int fg_circ_buf_avg(struct fg_circ_buf *buf, int *avg)
+{
+	s64 result = 0;
+	int i;
+
+	if (buf->size == 0)
+		return -ENODATA;
+
+	for (i = 0; i < buf->size; i++)
+		result += buf->arr[i];
+
+	*avg = div_s64(result, buf->size);
+	return 0;
+}
+
+int fg_lerp(const struct fg_pt *pts, size_t tablesize, s32 input, s32 *output)
+{
+	int i;
+	s64 temp;
+
+	if (pts == NULL) {
+		pr_err("Table is NULL\n");
+		return -EINVAL;
+	}
+
+	if (tablesize < 1) {
+		pr_err("Table has no entries\n");
+		return -ENOENT;
+	}
+
+	if (tablesize == 1) {
+		*output = pts[0].y;
+		return 0;
+	}
+
+	if (pts[0].x > pts[1].x) {
+		pr_err("Table is not in acending order\n");
+		return -EINVAL;
+	}
+
+	if (input <= pts[0].x) {
+		*output = pts[0].y;
+		return 0;
+	}
+
+	if (input >= pts[tablesize - 1].x) {
+		*output = pts[tablesize - 1].y;
+		return 0;
+	}
+
+	for (i = 1; i < tablesize; i++) {
+		if (input >= pts[i].x)
+			continue;
+
+		temp = (s64)(pts[i].y - pts[i - 1].y) *
+						(s64)(input - pts[i - 1].x);
+		temp = div_s64(temp, pts[i].x - pts[i - 1].x);
+		*output = temp + pts[i - 1].y;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static struct fg_dbgfs dbgfs_data = {
+	.help_msg = {
+	.data =
+	"FG Debug-FS support\n"
+	"\n"
+	"Hierarchy schema:\n"
+	"/sys/kernel/debug/fg_sram\n"
+	"       /help            -- Static help text\n"
+	"       /address  -- Starting register address for reads or writes\n"
+	"       /count    -- Number of registers to read (only used for reads)\n"
+	"       /data     -- Initiates the SRAM read (formatted output)\n"
+	"\n",
+	},
+};
+
+static bool is_usb_present(struct fg_chip *chip)
+{
+	union power_supply_propval pval = {0, };
+
+	if (!chip->usb_psy)
+		chip->usb_psy = power_supply_get_by_name("usb");
+
+	if (chip->usb_psy)
+		power_supply_get_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_PRESENT, &pval);
+	else
+		return false;
+
+	return pval.intval != 0;
+}
+
+static bool is_dc_present(struct fg_chip *chip)
+{
+	union power_supply_propval pval = {0, };
+
+	if (!chip->dc_psy)
+		chip->dc_psy = power_supply_get_by_name("dc");
+
+	if (chip->dc_psy)
+		power_supply_get_property(chip->dc_psy,
+				POWER_SUPPLY_PROP_PRESENT, &pval);
+	else
+		return false;
+
+	return pval.intval != 0;
+}
+
+bool is_input_present(struct fg_chip *chip)
+{
+	return is_usb_present(chip) || is_dc_present(chip);
+}
+
+#define EXPONENT_SHIFT		11
+#define EXPONENT_OFFSET		-9
+#define MANTISSA_SIGN_BIT	10
+#define MICRO_UNIT		1000000
+s64 fg_float_decode(u16 val)
+{
+	s8 exponent;
+	s32 mantissa;
+
+	/* mantissa bits are shifted out during sign extension */
+	exponent = ((s16)val >> EXPONENT_SHIFT) + EXPONENT_OFFSET;
+	/* exponent bits are shifted out during sign extension */
+	mantissa = sign_extend32(val, MANTISSA_SIGN_BIT) * MICRO_UNIT;
+
+	if (exponent < 0)
+		return (s64)mantissa >> -exponent;
+
+	return (s64)mantissa << exponent;
+}
+
+void fill_string(char *str, size_t str_len, u8 *buf, int buf_len)
+{
+	int pos = 0;
+	int i;
+
+	for (i = 0; i < buf_len; i++) {
+		pos += scnprintf(str + pos, str_len - pos, "%02x", buf[i]);
+		if (i < buf_len - 1)
+			pos += scnprintf(str + pos, str_len - pos, " ");
+	}
+}
+
+void dump_sram(u8 *buf, int addr, int len)
+{
+	int i;
+	char str[16];
+
+	/*
+	 * Length passed should be in multiple of 4 as each FG SRAM word
+	 * holds 4 bytes. To keep this simple, even if a length which is
+	 * not a multiple of 4 bytes or less than 4 bytes is passed, SRAM
+	 * registers dumped will be always in multiple of 4 bytes.
+	 */
+	for (i = 0; i < len; i += 4) {
+		str[0] = '\0';
+		fill_string(str, sizeof(str), buf + i, 4);
+		pr_info("%03d %s\n", addr + (i / 4), str);
+	}
+}
+
+static inline bool fg_sram_address_valid(u16 address, int len)
+{
+	if (address > FG_SRAM_ADDRESS_MAX)
+		return false;
+
+	if ((address + DIV_ROUND_UP(len, 4)) > FG_SRAM_ADDRESS_MAX + 1)
+		return false;
+
+	return true;
+}
+
+#define SOC_UPDATE_WAIT_MS	1500
+int fg_sram_write(struct fg_chip *chip, u16 address, u8 offset,
+			u8 *val, int len, int flags)
+{
+	int rc = 0;
+	bool tried_again = false;
+	bool atomic_access = false;
+
+	if (!chip)
+		return -ENXIO;
+
+	if (chip->battery_missing)
+		return -ENODATA;
+
+	if (!fg_sram_address_valid(address, len))
+		return -EFAULT;
+
+	if (!(flags & FG_IMA_NO_WLOCK))
+		vote(chip->awake_votable, SRAM_WRITE, true, 0);
+	mutex_lock(&chip->sram_rw_lock);
+
+	if ((flags & FG_IMA_ATOMIC) && chip->irqs[SOC_UPDATE_IRQ].irq) {
+		/*
+		 * This interrupt need to be enabled only when it is
+		 * required. It will be kept disabled other times.
+		 */
+		reinit_completion(&chip->soc_update);
+		enable_irq(chip->irqs[SOC_UPDATE_IRQ].irq);
+		atomic_access = true;
+	} else {
+		flags = FG_IMA_DEFAULT;
+	}
+wait:
+	/*
+	 * Atomic access mean waiting upon SOC_UPDATE interrupt from
+	 * FG_ALG and do the transaction after that. This is to make
+	 * sure that there will be no SOC update happening when an
+	 * IMA write is happening. SOC_UPDATE interrupt fires every
+	 * FG cycle (~1.47 seconds).
+	 */
+	if (atomic_access) {
+		/* Wait for SOC_UPDATE completion */
+		rc = wait_for_completion_interruptible_timeout(
+			&chip->soc_update,
+			msecs_to_jiffies(SOC_UPDATE_WAIT_MS));
+
+		/* If we were interrupted wait again one more time. */
+		if (rc == -ERESTARTSYS && !tried_again) {
+			tried_again = true;
+			goto wait;
+		} else if (rc <= 0) {
+			pr_err("wait for soc_update timed out rc=%d\n", rc);
+			goto out;
+		}
+	}
+
+	rc = fg_interleaved_mem_write(chip, address, offset, val, len,
+			atomic_access);
+	if (rc < 0)
+		pr_err("Error in writing SRAM address 0x%x[%d], rc=%d\n",
+			address, offset, rc);
+out:
+	if (atomic_access)
+		disable_irq_nosync(chip->irqs[SOC_UPDATE_IRQ].irq);
+
+	mutex_unlock(&chip->sram_rw_lock);
+	if (!(flags & FG_IMA_NO_WLOCK))
+		vote(chip->awake_votable, SRAM_WRITE, false, 0);
+	return rc;
+}
+
+int fg_sram_read(struct fg_chip *chip, u16 address, u8 offset,
+			u8 *val, int len, int flags)
+{
+	int rc = 0;
+
+	if (!chip)
+		return -ENXIO;
+
+	if (chip->battery_missing)
+		return -ENODATA;
+
+	if (!fg_sram_address_valid(address, len))
+		return -EFAULT;
+
+	if (!(flags & FG_IMA_NO_WLOCK))
+		vote(chip->awake_votable, SRAM_READ, true, 0);
+	mutex_lock(&chip->sram_rw_lock);
+
+	rc = fg_interleaved_mem_read(chip, address, offset, val, len);
+	if (rc < 0)
+		pr_err("Error in reading SRAM address 0x%x[%d], rc=%d\n",
+			address, offset, rc);
+
+	mutex_unlock(&chip->sram_rw_lock);
+	if (!(flags & FG_IMA_NO_WLOCK))
+		vote(chip->awake_votable, SRAM_READ, false, 0);
+	return rc;
+}
+
+int fg_sram_masked_write(struct fg_chip *chip, u16 address, u8 offset,
+			u8 mask, u8 val, int flags)
+{
+	int rc = 0;
+	u8 buf[4];
+
+	rc = fg_sram_read(chip, address, 0, buf, 4, flags);
+	if (rc < 0) {
+		pr_err("sram read failed: address=%03X, rc=%d\n", address, rc);
+		return rc;
+	}
+
+	buf[offset] &= ~mask;
+	buf[offset] |= val & mask;
+
+	rc = fg_sram_write(chip, address, 0, buf, 4, flags);
+	if (rc < 0) {
+		pr_err("sram write failed: address=%03X, rc=%d\n", address, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+int fg_read(struct fg_chip *chip, int addr, u8 *val, int len)
+{
+	int rc, i;
+
+	if (!chip || !chip->regmap)
+		return -ENXIO;
+
+	rc = regmap_bulk_read(chip->regmap, addr, val, len);
+
+	if (rc < 0) {
+		dev_err(chip->dev, "regmap_read failed for address %04x rc=%d\n",
+			addr, rc);
+		return rc;
+	}
+
+	if (*chip->debug_mask & FG_BUS_READ) {
+		pr_info("length %d addr=%04x\n", len, addr);
+		for (i = 0; i < len; i++)
+			pr_info("val[%d]: %02x\n", i, val[i]);
+	}
+
+	return 0;
+}
+
+int fg_write(struct fg_chip *chip, int addr, u8 *val, int len)
+{
+	int rc, i;
+	bool sec_access = false;
+
+	if (!chip || !chip->regmap)
+		return -ENXIO;
+
+	mutex_lock(&chip->bus_lock);
+	sec_access = (addr & 0x00FF) > 0xD0;
+	if (sec_access) {
+		rc = regmap_write(chip->regmap, (addr & 0xFF00) | 0xD0, 0xA5);
+		if (rc < 0) {
+			dev_err(chip->dev, "regmap_write failed for address %x rc=%d\n",
+				addr, rc);
+			goto out;
+		}
+	}
+
+	if (len > 1)
+		rc = regmap_bulk_write(chip->regmap, addr, val, len);
+	else
+		rc = regmap_write(chip->regmap, addr, *val);
+
+	if (rc < 0) {
+		dev_err(chip->dev, "regmap_write failed for address %04x rc=%d\n",
+			addr, rc);
+		goto out;
+	}
+
+	if (*chip->debug_mask & FG_BUS_WRITE) {
+		pr_info("length %d addr=%04x\n", len, addr);
+		for (i = 0; i < len; i++)
+			pr_info("val[%d]: %02x\n", i, val[i]);
+	}
+out:
+	mutex_unlock(&chip->bus_lock);
+	return rc;
+}
+
+int fg_masked_write(struct fg_chip *chip, int addr, u8 mask, u8 val)
+{
+	int rc;
+	bool sec_access = false;
+
+	if (!chip || !chip->regmap)
+		return -ENXIO;
+
+	mutex_lock(&chip->bus_lock);
+	sec_access = (addr & 0x00FF) > 0xD0;
+	if (sec_access) {
+		rc = regmap_write(chip->regmap, (addr & 0xFF00) | 0xD0, 0xA5);
+		if (rc < 0) {
+			dev_err(chip->dev, "regmap_write failed for address %x rc=%d\n",
+				addr, rc);
+			goto out;
+		}
+	}
+
+	rc = regmap_update_bits(chip->regmap, addr, mask, val);
+	if (rc < 0) {
+		dev_err(chip->dev, "regmap_update_bits failed for address %04x rc=%d\n",
+			addr, rc);
+		goto out;
+	}
+
+	fg_dbg(chip, FG_BUS_WRITE, "addr=%04x mask: %02x val: %02x\n", addr,
+		mask, val);
+out:
+	mutex_unlock(&chip->bus_lock);
+	return rc;
+}
+
+int64_t twos_compliment_extend(int64_t val, int sign_bit_pos)
+{
+	int i, nbytes = DIV_ROUND_UP(sign_bit_pos, 8);
+	int64_t mask, val_out;
+
+	val_out = val;
+	mask = 1 << sign_bit_pos;
+	if (val & mask) {
+		for (i = 8; i > nbytes; i--) {
+			mask = 0xFFLL << ((i - 1) * 8);
+			val_out |= mask;
+		}
+
+		if ((nbytes * 8) - 1 > sign_bit_pos) {
+			mask = 1 << sign_bit_pos;
+			for (i = 1; i <= (nbytes * 8) - sign_bit_pos; i++)
+				val_out |= mask << i;
+		}
+	}
+
+	pr_debug("nbytes: %d val: %llx val_out: %llx\n", nbytes, val, val_out);
+	return val_out;
+}
+
+/* All the debugfs related functions are defined below */
+static int fg_sram_dfs_open(struct inode *inode, struct file *file)
+{
+	struct fg_log_buffer *log;
+	struct fg_trans *trans;
+	u8 *data_buf;
+
+	size_t logbufsize = SZ_4K;
+	size_t databufsize = SZ_4K;
+
+	if (!dbgfs_data.chip) {
+		pr_err("Not initialized data\n");
+		return -EINVAL;
+	}
+
+	/* Per file "transaction" data */
+	trans = devm_kzalloc(dbgfs_data.chip->dev, sizeof(*trans), GFP_KERNEL);
+	if (!trans)
+		return -ENOMEM;
+
+	/* Allocate log buffer */
+	log = devm_kzalloc(dbgfs_data.chip->dev, logbufsize, GFP_KERNEL);
+	if (!log)
+		return -ENOMEM;
+
+	log->rpos = 0;
+	log->wpos = 0;
+	log->len = logbufsize - sizeof(*log);
+
+	/* Allocate data buffer */
+	data_buf = devm_kzalloc(dbgfs_data.chip->dev, databufsize, GFP_KERNEL);
+	if (!data_buf)
+		return -ENOMEM;
+
+	trans->log = log;
+	trans->data = data_buf;
+	trans->cnt = dbgfs_data.cnt;
+	trans->addr = dbgfs_data.addr;
+	trans->chip = dbgfs_data.chip;
+	trans->offset = trans->addr;
+	mutex_init(&trans->fg_dfs_lock);
+
+	file->private_data = trans;
+	return 0;
+}
+
+static int fg_sram_dfs_close(struct inode *inode, struct file *file)
+{
+	struct fg_trans *trans = file->private_data;
+
+	if (trans && trans->log && trans->data) {
+		file->private_data = NULL;
+		mutex_destroy(&trans->fg_dfs_lock);
+		devm_kfree(trans->chip->dev, trans->log);
+		devm_kfree(trans->chip->dev, trans->data);
+		devm_kfree(trans->chip->dev, trans);
+	}
+
+	return 0;
+}
+
+/**
+ * print_to_log: format a string and place into the log buffer
+ * @log: The log buffer to place the result into.
+ * @fmt: The format string to use.
+ * @...: The arguments for the format string.
+ *
+ * The return value is the number of characters written to @log buffer
+ * not including the trailing '\0'.
+ */
+static int print_to_log(struct fg_log_buffer *log, const char *fmt, ...)
+{
+	va_list args;
+	int cnt;
+	char *buf = &log->data[log->wpos];
+	size_t size = log->len - log->wpos;
+
+	va_start(args, fmt);
+	cnt = vscnprintf(buf, size, fmt, args);
+	va_end(args);
+
+	log->wpos += cnt;
+	return cnt;
+}
+
+/**
+ * write_next_line_to_log: Writes a single "line" of data into the log buffer
+ * @trans: Pointer to SRAM transaction data.
+ * @offset: SRAM address offset to start reading from.
+ * @pcnt: Pointer to 'cnt' variable.  Indicates the number of bytes to read.
+ *
+ * The 'offset' is a 12-bit SRAM address.
+ *
+ * On a successful read, the pcnt is decremented by the number of data
+ * bytes read from the SRAM.  When the cnt reaches 0, all requested bytes have
+ * been read.
+ */
+static int write_next_line_to_log(struct fg_trans *trans, int offset,
+				size_t *pcnt)
+{
+	int i;
+	u8 data[ITEMS_PER_LINE];
+	u16 address;
+	struct fg_log_buffer *log = trans->log;
+	int cnt = 0;
+	int items_to_read = min(ARRAY_SIZE(data), *pcnt);
+	int items_to_log = min(ITEMS_PER_LINE, items_to_read);
+
+	/* Buffer needs enough space for an entire line */
+	if ((log->len - log->wpos) < MAX_LINE_LENGTH)
+		goto done;
+
+	memcpy(data, trans->data + (offset - trans->addr), items_to_read);
+	*pcnt -= items_to_read;
+
+	/* address is in word now and it increments by 1. */
+	address = trans->addr + ((offset - trans->addr) / ITEMS_PER_LINE);
+	cnt = print_to_log(log, "%3.3d ", address & 0xfff);
+	if (cnt == 0)
+		goto done;
+
+	/* Log the data items */
+	for (i = 0; i < items_to_log; ++i) {
+		cnt = print_to_log(log, "%2.2X ", data[i]);
+		if (cnt == 0)
+			goto done;
+	}
+
+	/* If the last character was a space, then replace it with a newline */
+	if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+		log->data[log->wpos - 1] = '\n';
+
+done:
+	return cnt;
+}
+
+/**
+ * get_log_data - reads data from SRAM and saves to the log buffer
+ * @trans: Pointer to SRAM transaction data.
+ *
+ * Returns the number of "items" read or SPMI error code for read failures.
+ */
+static int get_log_data(struct fg_trans *trans)
+{
+	int cnt, rc;
+	int last_cnt;
+	int items_read;
+	int total_items_read = 0;
+	u32 offset = trans->offset;
+	size_t item_cnt = trans->cnt;
+	struct fg_log_buffer *log = trans->log;
+
+	if (item_cnt == 0)
+		return 0;
+
+	if (item_cnt > SZ_4K) {
+		pr_err("Reading too many bytes\n");
+		return -EINVAL;
+	}
+
+	pr_debug("addr: %d offset: %d count: %d\n", trans->addr, trans->offset,
+		trans->cnt);
+	rc = fg_sram_read(trans->chip, trans->addr, 0,
+			trans->data, trans->cnt, 0);
+	if (rc < 0) {
+		pr_err("SRAM read failed: rc = %d\n", rc);
+		return rc;
+	}
+	/* Reset the log buffer 'pointers' */
+	log->wpos = log->rpos = 0;
+
+	/* Keep reading data until the log is full */
+	do {
+		last_cnt = item_cnt;
+		cnt = write_next_line_to_log(trans, offset, &item_cnt);
+		items_read = last_cnt - item_cnt;
+		offset += items_read;
+		total_items_read += items_read;
+	} while (cnt && item_cnt > 0);
+
+	/* Adjust the transaction offset and count */
+	trans->cnt = item_cnt;
+	trans->offset += total_items_read;
+
+	return total_items_read;
+}
+
+/**
+ * fg_sram_dfs_reg_read: reads value(s) from SRAM and fills user's buffer a
+ *  byte array (coded as string)
+ * @file: file pointer
+ * @buf: where to put the result
+ * @count: maximum space available in @buf
+ * @ppos: starting position
+ * @return number of user bytes read, or negative error value
+ */
+static ssize_t fg_sram_dfs_reg_read(struct file *file, char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	struct fg_trans *trans = file->private_data;
+	struct fg_log_buffer *log = trans->log;
+	size_t ret;
+	size_t len;
+
+	mutex_lock(&trans->fg_dfs_lock);
+	/* Is the the log buffer empty */
+	if (log->rpos >= log->wpos) {
+		if (get_log_data(trans) <= 0) {
+			len = 0;
+			goto unlock_mutex;
+		}
+	}
+
+	len = min(count, log->wpos - log->rpos);
+
+	ret = copy_to_user(buf, &log->data[log->rpos], len);
+	if (ret == len) {
+		pr_err("error copy sram register values to user\n");
+		len = -EFAULT;
+		goto unlock_mutex;
+	}
+
+	/* 'ret' is the number of bytes not copied */
+	len -= ret;
+
+	*ppos += len;
+	log->rpos += len;
+
+unlock_mutex:
+	mutex_unlock(&trans->fg_dfs_lock);
+	return len;
+}
+
+/**
+ * fg_sram_dfs_reg_write: write user's byte array (coded as string) to SRAM.
+ * @file: file pointer
+ * @buf: user data to be written.
+ * @count: maximum space available in @buf
+ * @ppos: starting position
+ * @return number of user byte written, or negative error value
+ */
+static ssize_t fg_sram_dfs_reg_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	int bytes_read;
+	int data;
+	int pos = 0;
+	int cnt = 0;
+	u8  *values;
+	char *kbuf;
+	size_t ret = 0;
+	struct fg_trans *trans = file->private_data;
+	u32 address = trans->addr;
+
+	mutex_lock(&trans->fg_dfs_lock);
+	/* Make a copy of the user data */
+	kbuf = kmalloc(count + 1, GFP_KERNEL);
+	if (!kbuf) {
+		ret = -ENOMEM;
+		goto unlock_mutex;
+	}
+
+	ret = copy_from_user(kbuf, buf, count);
+	if (ret == count) {
+		pr_err("failed to copy data from user\n");
+		ret = -EFAULT;
+		goto free_buf;
+	}
+
+	count -= ret;
+	*ppos += count;
+	kbuf[count] = '\0';
+
+	/* Override the text buffer with the raw data */
+	values = kbuf;
+
+	/* Parse the data in the buffer.  It should be a string of numbers */
+	while ((pos < count) &&
+		sscanf(kbuf + pos, "%i%n", &data, &bytes_read) == 1) {
+		/*
+		 * We shouldn't be receiving a string of characters that
+		 * exceeds a size of 5 to keep this functionally correct.
+		 * Also, we should make sure that pos never gets overflowed
+		 * beyond the limit.
+		 */
+		if (bytes_read > 5 || bytes_read > INT_MAX - pos) {
+			cnt = 0;
+			ret = -EINVAL;
+			break;
+		}
+		pos += bytes_read;
+		values[cnt++] = data & 0xff;
+	}
+
+	if (!cnt)
+		goto free_buf;
+
+	pr_debug("address %d, count %d\n", address, cnt);
+	/* Perform the write(s) */
+
+	ret = fg_sram_write(trans->chip, address, 0, values, cnt, 0);
+	if (ret) {
+		pr_err("SRAM write failed, err = %zu\n", ret);
+	} else {
+		ret = count;
+		trans->offset += cnt > 4 ? 4 : cnt;
+	}
+
+free_buf:
+	kfree(kbuf);
+unlock_mutex:
+	mutex_unlock(&trans->fg_dfs_lock);
+	return ret;
+}
+
+static const struct file_operations fg_sram_dfs_reg_fops = {
+	.open		= fg_sram_dfs_open,
+	.release	= fg_sram_dfs_close,
+	.read		= fg_sram_dfs_reg_read,
+	.write		= fg_sram_dfs_reg_write,
+};
+
+/*
+ * fg_debugfs_create: adds new fg_sram debugfs entry
+ * @return zero on success
+ */
+static int fg_sram_debugfs_create(struct fg_chip *chip)
+{
+	struct dentry *dfs_sram;
+	struct dentry *file;
+	mode_t dfs_mode = 0600;
+
+	pr_debug("Creating FG_SRAM debugfs file-system\n");
+	dfs_sram = debugfs_create_dir("sram", chip->dfs_root);
+	if (!dfs_sram) {
+		pr_err("error creating fg sram dfs rc=%ld\n",
+		       (long)dfs_sram);
+		return -ENOMEM;
+	}
+
+	dbgfs_data.help_msg.size = strlen(dbgfs_data.help_msg.data);
+	file = debugfs_create_blob("help", 0444, dfs_sram,
+					&dbgfs_data.help_msg);
+	if (!file) {
+		pr_err("error creating help entry\n");
+		goto err_remove_fs;
+	}
+
+	dbgfs_data.chip = chip;
+
+	file = debugfs_create_u32("count", dfs_mode, dfs_sram,
+					&(dbgfs_data.cnt));
+	if (!file) {
+		pr_err("error creating 'count' entry\n");
+		goto err_remove_fs;
+	}
+
+	file = debugfs_create_x32("address", dfs_mode, dfs_sram,
+					&(dbgfs_data.addr));
+	if (!file) {
+		pr_err("error creating 'address' entry\n");
+		goto err_remove_fs;
+	}
+
+	file = debugfs_create_file("data", dfs_mode, dfs_sram, &dbgfs_data,
+					&fg_sram_dfs_reg_fops);
+	if (!file) {
+		pr_err("error creating 'data' entry\n");
+		goto err_remove_fs;
+	}
+
+	return 0;
+
+err_remove_fs:
+	debugfs_remove_recursive(dfs_sram);
+	return -ENOMEM;
+}
+
+static int fg_alg_flags_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t fg_alg_flags_read(struct file *file, char __user *userbuf,
+				 size_t count, loff_t *ppos)
+{
+	struct fg_chip *chip = file->private_data;
+	char buf[512];
+	u8 alg_flags = 0;
+	int rc, i, len;
+
+	rc = fg_sram_read(chip, chip->sp[FG_SRAM_ALG_FLAGS].addr_word,
+			  chip->sp[FG_SRAM_ALG_FLAGS].addr_byte, &alg_flags, 1,
+			  FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("failed to read algorithm flags rc=%d\n", rc);
+		return -EFAULT;
+	}
+
+	len = 0;
+	for (i = 0; i < ALG_FLAG_MAX; ++i) {
+		if (len > ARRAY_SIZE(buf) - 1)
+			return -EFAULT;
+		if (chip->alg_flags[i].invalid)
+			continue;
+
+		len += snprintf(buf + len, sizeof(buf) - sizeof(*buf) * len,
+				"%s = %d\n", chip->alg_flags[i].name,
+				(bool)(alg_flags & chip->alg_flags[i].bit));
+	}
+
+	return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fg_alg_flags_fops = {
+	.open = fg_alg_flags_open,
+	.read = fg_alg_flags_read,
+};
+
+int fg_debugfs_create(struct fg_chip *chip)
+{
+	int rc;
+
+	pr_debug("Creating debugfs file-system\n");
+	chip->dfs_root = debugfs_create_dir("fg", NULL);
+	if (IS_ERR_OR_NULL(chip->dfs_root)) {
+		if (PTR_ERR(chip->dfs_root) == -ENODEV)
+			pr_err("debugfs is not enabled in the kernel\n");
+		else
+			pr_err("error creating fg dfs root rc=%ld\n",
+			       (long)chip->dfs_root);
+		return -ENODEV;
+	}
+
+	rc = fg_sram_debugfs_create(chip);
+	if (rc < 0) {
+		pr_err("failed to create sram dfs rc=%d\n", rc);
+		goto err_remove_fs;
+	}
+
+	if (!debugfs_create_file("alg_flags", 0400, chip->dfs_root, chip,
+				 &fg_alg_flags_fops)) {
+		pr_err("failed to create alg_flags file\n");
+		goto err_remove_fs;
+	}
+
+	return 0;
+
+err_remove_fs:
+	debugfs_remove_recursive(chip->dfs_root);
+	return -ENOMEM;
+}
diff --git a/drivers/power/supply/qcom/pmic-voter.c b/drivers/power/supply/qcom/pmic-voter.c
new file mode 100644
index 0000000..39a0dcb6
--- /dev/null
+++ b/drivers/power/supply/qcom/pmic-voter.c
@@ -0,0 +1,662 @@
+/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "pmic-voter.h"
+
+#define NUM_MAX_CLIENTS	8
+#define DEBUG_FORCE_CLIENT	"DEBUG_FORCE_CLIENT"
+
+static DEFINE_SPINLOCK(votable_list_slock);
+static LIST_HEAD(votable_list);
+
+static struct dentry *debug_root;
+
+struct client_vote {
+	bool	enabled;
+	int	value;
+};
+
+struct votable {
+	const char		*name;
+	struct list_head	list;
+	struct client_vote	votes[NUM_MAX_CLIENTS];
+	int			num_clients;
+	int			type;
+	int			effective_client_id;
+	int			effective_result;
+	struct mutex		vote_lock;
+	void			*data;
+	int			(*callback)(struct votable *votable,
+						void *data,
+						int effective_result,
+						const char *effective_client);
+	char			*client_strs[NUM_MAX_CLIENTS];
+	bool			voted_on;
+	struct dentry		*root;
+	struct dentry		*status_ent;
+	u32			force_val;
+	struct dentry		*force_val_ent;
+	bool			force_active;
+	struct dentry		*force_active_ent;
+};
+
+/**
+ * vote_set_any()
+ * @votable:	votable object
+ * @client_id:	client number of the latest voter
+ * @eff_res:	sets 0 or 1 based on the voting
+ * @eff_id:	Always returns the client_id argument
+ *
+ * Note that for SET_ANY voter, the value is always same as enabled. There is
+ * no idea of a voter abstaining from the election. Hence there is never a
+ * situation when the effective_id will be invalid, during election.
+ *
+ * Context:
+ *	Must be called with the votable->lock held
+ */
+static void vote_set_any(struct votable *votable, int client_id,
+				int *eff_res, int *eff_id)
+{
+	int i;
+
+	*eff_res = 0;
+
+	for (i = 0; i < votable->num_clients && votable->client_strs[i]; i++)
+		*eff_res |= votable->votes[i].enabled;
+
+	*eff_id = client_id;
+}
+
+/**
+ * vote_min() -
+ * @votable:	votable object
+ * @client_id:	client number of the latest voter
+ * @eff_res:	sets this to the min. of all the values amongst enabled voters.
+ *		If there is no enabled client, this is set to INT_MAX
+ * @eff_id:	sets this to the client id that has the min value amongst all
+ *		the enabled clients. If there is no enabled client, sets this
+ *		to -EINVAL
+ *
+ * Context:
+ *	Must be called with the votable->lock held
+ */
+static void vote_min(struct votable *votable, int client_id,
+				int *eff_res, int *eff_id)
+{
+	int i;
+
+	*eff_res = INT_MAX;
+	*eff_id = -EINVAL;
+	for (i = 0; i < votable->num_clients && votable->client_strs[i]; i++) {
+		if (votable->votes[i].enabled
+			&& *eff_res > votable->votes[i].value) {
+			*eff_res = votable->votes[i].value;
+			*eff_id = i;
+		}
+	}
+	if (*eff_id == -EINVAL)
+		*eff_res = -EINVAL;
+}
+
+/**
+ * vote_max() -
+ * @votable:	votable object
+ * @client_id:	client number of the latest voter
+ * @eff_res:	sets this to the max. of all the values amongst enabled voters.
+ *		If there is no enabled client, this is set to -EINVAL
+ * @eff_id:	sets this to the client id that has the max value amongst all
+ *		the enabled clients. If there is no enabled client, sets this to
+ *		-EINVAL
+ *
+ * Context:
+ *	Must be called with the votable->lock held
+ */
+static void vote_max(struct votable *votable, int client_id,
+				int *eff_res, int *eff_id)
+{
+	int i;
+
+	*eff_res = INT_MIN;
+	*eff_id = -EINVAL;
+	for (i = 0; i < votable->num_clients && votable->client_strs[i]; i++) {
+		if (votable->votes[i].enabled &&
+				*eff_res < votable->votes[i].value) {
+			*eff_res = votable->votes[i].value;
+			*eff_id = i;
+		}
+	}
+	if (*eff_id == -EINVAL)
+		*eff_res = -EINVAL;
+}
+
+static int get_client_id(struct votable *votable, const char *client_str)
+{
+	int i;
+
+	for (i = 0; i < votable->num_clients; i++) {
+		if (votable->client_strs[i]
+		 && (strcmp(votable->client_strs[i], client_str) == 0))
+			return i;
+	}
+
+	/* new client */
+	for (i = 0; i < votable->num_clients; i++) {
+		if (!votable->client_strs[i]) {
+			votable->client_strs[i]
+				= kstrdup(client_str, GFP_KERNEL);
+			if (!votable->client_strs[i])
+				return -ENOMEM;
+			return i;
+		}
+	}
+	return -EINVAL;
+}
+
+static char *get_client_str(struct votable *votable, int client_id)
+{
+	if (client_id == -EINVAL)
+		return NULL;
+
+	return votable->client_strs[client_id];
+}
+
+void lock_votable(struct votable *votable)
+{
+	mutex_lock(&votable->vote_lock);
+}
+
+void unlock_votable(struct votable *votable)
+{
+	mutex_unlock(&votable->vote_lock);
+}
+
+/**
+ * get_client_vote() -
+ * get_client_vote_locked() -
+ *		The unlocked and locked variants of getting a client's voted
+ *		value.
+ * @votable:	the votable object
+ * @client_str: client of interest
+ *
+ * Returns:
+ *	The value the client voted for. -EINVAL is returned if the client
+ *	is not enabled or the client is not found.
+ */
+int get_client_vote_locked(struct votable *votable, const char *client_str)
+{
+	int client_id = get_client_id(votable, client_str);
+
+	if (client_id < 0)
+		return -EINVAL;
+
+	if ((votable->type != VOTE_SET_ANY)
+		&& !votable->votes[client_id].enabled)
+		return -EINVAL;
+
+	return votable->votes[client_id].value;
+}
+
+int get_client_vote(struct votable *votable, const char *client_str)
+{
+	int value;
+
+	lock_votable(votable);
+	value = get_client_vote_locked(votable, client_str);
+	unlock_votable(votable);
+	return value;
+}
+
+/**
+ * get_effective_result() -
+ * get_effective_result_locked() -
+ *		The unlocked and locked variants of getting the effective value
+ *		amongst all the enabled voters.
+ *
+ * @votable:	the votable object
+ *
+ * Returns:
+ *	The effective result.
+ *	For MIN and MAX votable, returns -EINVAL when the votable
+ *	object has been created but no clients have casted their votes or
+ *	the last enabled client disables its vote.
+ *	For SET_ANY votable it returns 0 when no clients have casted their votes
+ *	because for SET_ANY there is no concept of abstaining from election. The
+ *	votes for all the clients of SET_ANY votable is defaulted to false.
+ */
+int get_effective_result_locked(struct votable *votable)
+{
+	if (votable->force_active)
+		return votable->force_val;
+
+	return votable->effective_result;
+}
+
+int get_effective_result(struct votable *votable)
+{
+	int value;
+
+	lock_votable(votable);
+	value = get_effective_result_locked(votable);
+	unlock_votable(votable);
+	return value;
+}
+
+/**
+ * get_effective_client() -
+ * get_effective_client_locked() -
+ *		The unlocked and locked variants of getting the effective client
+ *		amongst all the enabled voters.
+ *
+ * @votable:	the votable object
+ *
+ * Returns:
+ *	The effective client.
+ *	For MIN and MAX votable, returns NULL when the votable
+ *	object has been created but no clients have casted their votes or
+ *	the last enabled client disables its vote.
+ *	For SET_ANY votable it returns NULL too when no clients have casted
+ *	their votes. But for SET_ANY since there is no concept of abstaining
+ *	from election, the only client that casts a vote or the client that
+ *	caused the result to change is returned.
+ */
+const char *get_effective_client_locked(struct votable *votable)
+{
+	if (votable->force_active)
+		return DEBUG_FORCE_CLIENT;
+
+	return get_client_str(votable, votable->effective_client_id);
+}
+
+const char *get_effective_client(struct votable *votable)
+{
+	const char *client_str;
+
+	lock_votable(votable);
+	client_str = get_effective_client_locked(votable);
+	unlock_votable(votable);
+	return client_str;
+}
+
+/**
+ * vote() -
+ *
+ * @votable:	the votable object
+ * @client_str: the voting client
+ * @enabled:	This provides a means for the client to exclude himself from
+ *		election. This clients val (the next argument) will be
+ *		considered only when he has enabled his participation.
+ *		Note that this takes a differnt meaning for SET_ANY type, as
+ *		there is no concept of abstaining from participation.
+ *		Enabled is treated as the boolean value the client is voting.
+ * @val:	The vote value. This is ignored for SET_ANY votable types.
+ *		For MIN, MAX votable types this value is used as the
+ *		clients vote value when the enabled is true, this value is
+ *		ignored if enabled is false.
+ *
+ * The callback is called only when there is a change in the election results or
+ * if it is the first time someone is voting.
+ *
+ * Returns:
+ *	The return from the callback when present and needs to be called
+ *	or zero.
+ */
+int vote(struct votable *votable, const char *client_str, bool enabled, int val)
+{
+	int effective_id = -EINVAL;
+	int effective_result;
+	int client_id;
+	int rc = 0;
+	bool similar_vote = false;
+
+	lock_votable(votable);
+
+	client_id = get_client_id(votable, client_str);
+	if (client_id < 0) {
+		rc = client_id;
+		goto out;
+	}
+
+	/*
+	 * for SET_ANY the val is to be ignored, set it
+	 * to enabled so that the election still works based on
+	 * value regardless of the type
+	 */
+	if (votable->type == VOTE_SET_ANY)
+		val = enabled;
+
+	if ((votable->votes[client_id].enabled == enabled) &&
+		(votable->votes[client_id].value == val)) {
+		pr_debug("%s: %s,%d same vote %s of val=%d\n",
+				votable->name,
+				client_str, client_id,
+				enabled ? "on" : "off",
+				val);
+		similar_vote = true;
+	}
+
+	votable->votes[client_id].enabled = enabled;
+	votable->votes[client_id].value = val;
+
+	if (similar_vote && votable->voted_on) {
+		pr_debug("%s: %s,%d Ignoring similar vote %s of val=%d\n",
+			votable->name,
+			client_str, client_id, enabled ? "on" : "off", val);
+		goto out;
+	}
+
+	pr_debug("%s: %s,%d voting %s of val=%d\n",
+		votable->name,
+		client_str, client_id, enabled ? "on" : "off", val);
+	switch (votable->type) {
+	case VOTE_MIN:
+		vote_min(votable, client_id, &effective_result, &effective_id);
+		break;
+	case VOTE_MAX:
+		vote_max(votable, client_id, &effective_result, &effective_id);
+		break;
+	case VOTE_SET_ANY:
+		vote_set_any(votable, client_id,
+				&effective_result, &effective_id);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/*
+	 * Note that the callback is called with a NULL string and -EINVAL
+	 * result when there are no enabled votes
+	 */
+	if (!votable->voted_on
+			|| (effective_result != votable->effective_result)) {
+		votable->effective_client_id = effective_id;
+		votable->effective_result = effective_result;
+		pr_debug("%s: effective vote is now %d voted by %s,%d\n",
+			votable->name, effective_result,
+			get_client_str(votable, effective_id),
+			effective_id);
+		if (votable->callback && !votable->force_active)
+			rc = votable->callback(votable, votable->data,
+					effective_result,
+					get_client_str(votable, effective_id));
+	}
+
+	votable->voted_on = true;
+out:
+	unlock_votable(votable);
+	return rc;
+}
+
+int rerun_election(struct votable *votable)
+{
+	int rc = 0;
+
+	lock_votable(votable);
+	if (votable->callback)
+		rc = votable->callback(votable,
+				votable->data,
+			votable->effective_result,
+			get_client_str(votable, votable->effective_client_id));
+	unlock_votable(votable);
+	return rc;
+}
+
+struct votable *find_votable(const char *name)
+{
+	unsigned long flags;
+	struct votable *v;
+	bool found = false;
+
+	spin_lock_irqsave(&votable_list_slock, flags);
+	if (list_empty(&votable_list))
+		goto out;
+
+	list_for_each_entry(v, &votable_list, list) {
+		if (strcmp(v->name, name) == 0) {
+			found = true;
+			break;
+		}
+	}
+out:
+	spin_unlock_irqrestore(&votable_list_slock, flags);
+
+	if (found)
+		return v;
+	else
+		return NULL;
+}
+
+static int force_active_get(void *data, u64 *val)
+{
+	struct votable *votable = data;
+
+	*val = votable->force_active;
+
+	return 0;
+}
+
+static int force_active_set(void *data, u64 val)
+{
+	struct votable *votable = data;
+	int rc = 0;
+
+	lock_votable(votable);
+	votable->force_active = !!val;
+
+	if (!votable->callback)
+		goto out;
+
+	if (votable->force_active) {
+		rc = votable->callback(votable, votable->data,
+			votable->force_val,
+			DEBUG_FORCE_CLIENT);
+	} else {
+		rc = votable->callback(votable, votable->data,
+			votable->effective_result,
+			get_client_str(votable, votable->effective_client_id));
+	}
+out:
+	unlock_votable(votable);
+	return rc;
+}
+DEFINE_SIMPLE_ATTRIBUTE(votable_force_ops, force_active_get, force_active_set,
+		"%lld\n");
+
+static int show_votable_clients(struct seq_file *m, void *data)
+{
+	struct votable *votable = m->private;
+	int i;
+	char *type_str = "Unkonwn";
+	const char *effective_client_str;
+
+	lock_votable(votable);
+
+	seq_printf(m, "Votable %s:\n", votable->name);
+	seq_puts(m, "clients:\n");
+	for (i = 0; i < votable->num_clients; i++) {
+		if (votable->client_strs[i]) {
+			seq_printf(m, "%-15s:\t\ten=%d\t\tv=%d\n",
+					votable->client_strs[i],
+					votable->votes[i].enabled,
+					votable->votes[i].value);
+		}
+	}
+
+	switch (votable->type) {
+	case VOTE_MIN:
+		type_str = "Min";
+		break;
+	case VOTE_MAX:
+		type_str = "Max";
+		break;
+	case VOTE_SET_ANY:
+		type_str = "Set_any";
+		break;
+	}
+
+	seq_printf(m, "type: %s\n", type_str);
+	seq_puts(m, "Effective:\n");
+	effective_client_str = get_effective_client_locked(votable);
+	seq_printf(m, "%-15s:\t\tv=%d\n",
+			effective_client_str ? effective_client_str : "none",
+			get_effective_result_locked(votable));
+	unlock_votable(votable);
+
+	return 0;
+}
+
+static int votable_status_open(struct inode *inode, struct file *file)
+{
+	struct votable *votable = inode->i_private;
+
+	return single_open(file, show_votable_clients, votable);
+}
+
+static const struct file_operations votable_status_ops = {
+	.owner		= THIS_MODULE,
+	.open		= votable_status_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+struct votable *create_votable(const char *name,
+				int votable_type,
+				int (*callback)(struct votable *votable,
+					void *data,
+					int effective_result,
+					const char *effective_client),
+				void *data)
+{
+	struct votable *votable;
+	unsigned long flags;
+
+	votable = find_votable(name);
+	if (votable)
+		return ERR_PTR(-EEXIST);
+
+	if (debug_root == NULL) {
+		debug_root = debugfs_create_dir("pmic-votable", NULL);
+		if (!debug_root) {
+			pr_err("Couldn't create debug dir\n");
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	if (votable_type >= NUM_VOTABLE_TYPES) {
+		pr_err("Invalid votable_type specified for voter\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	votable = kzalloc(sizeof(struct votable), GFP_KERNEL);
+	if (!votable)
+		return ERR_PTR(-ENOMEM);
+
+	votable->name = kstrdup(name, GFP_KERNEL);
+	if (!votable->name) {
+		kfree(votable);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	votable->num_clients = NUM_MAX_CLIENTS;
+	votable->callback = callback;
+	votable->type = votable_type;
+	votable->data = data;
+	mutex_init(&votable->vote_lock);
+
+	/*
+	 * Because effective_result and client states are invalid
+	 * before the first vote, initialize them to -EINVAL
+	 */
+	votable->effective_result = -EINVAL;
+	if (votable->type == VOTE_SET_ANY)
+		votable->effective_result = 0;
+	votable->effective_client_id = -EINVAL;
+
+	spin_lock_irqsave(&votable_list_slock, flags);
+	list_add(&votable->list, &votable_list);
+	spin_unlock_irqrestore(&votable_list_slock, flags);
+
+	votable->root = debugfs_create_dir(name, debug_root);
+	if (!votable->root) {
+		pr_err("Couldn't create debug dir %s\n", name);
+		kfree(votable->name);
+		kfree(votable);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	votable->status_ent = debugfs_create_file("status", S_IFREG | 0444,
+				  votable->root, votable,
+				  &votable_status_ops);
+	if (!votable->status_ent) {
+		pr_err("Couldn't create status dbg file for %s\n", name);
+		debugfs_remove_recursive(votable->root);
+		kfree(votable->name);
+		kfree(votable);
+		return ERR_PTR(-EEXIST);
+	}
+
+	votable->force_val_ent = debugfs_create_u32("force_val",
+					S_IFREG | 0644,
+					votable->root,
+					&(votable->force_val));
+
+	if (!votable->force_val_ent) {
+		pr_err("Couldn't create force_val dbg file for %s\n", name);
+		debugfs_remove_recursive(votable->root);
+		kfree(votable->name);
+		kfree(votable);
+		return ERR_PTR(-EEXIST);
+	}
+
+	votable->force_active_ent = debugfs_create_file("force_active",
+					S_IFREG | 0444,
+					votable->root, votable,
+					&votable_force_ops);
+	if (!votable->force_active_ent) {
+		pr_err("Couldn't create force_active dbg file for %s\n", name);
+		debugfs_remove_recursive(votable->root);
+		kfree(votable->name);
+		kfree(votable);
+		return ERR_PTR(-EEXIST);
+	}
+
+	return votable;
+}
+
+void destroy_votable(struct votable *votable)
+{
+	unsigned long flags;
+	int i;
+
+	if (!votable)
+		return;
+
+	spin_lock_irqsave(&votable_list_slock, flags);
+	list_del(&votable->list);
+	spin_unlock_irqrestore(&votable_list_slock, flags);
+
+	debugfs_remove_recursive(votable->root);
+
+	for (i = 0; i < votable->num_clients && votable->client_strs[i]; i++)
+		kfree(votable->client_strs[i]);
+
+	kfree(votable->name);
+	kfree(votable);
+}
diff --git a/drivers/power/supply/qcom/pmic-voter.h b/drivers/power/supply/qcom/pmic-voter.h
new file mode 100644
index 0000000..031b9a0
--- /dev/null
+++ b/drivers/power/supply/qcom/pmic-voter.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PMIC_VOTER_H
+#define __PMIC_VOTER_H
+
+#include <linux/mutex.h>
+
+struct votable;
+
+enum votable_type {
+	VOTE_MIN,
+	VOTE_MAX,
+	VOTE_SET_ANY,
+	NUM_VOTABLE_TYPES,
+};
+
+int get_client_vote(struct votable *votable, const char *client_str);
+int get_client_vote_locked(struct votable *votable, const char *client_str);
+int get_effective_result(struct votable *votable);
+int get_effective_result_locked(struct votable *votable);
+const char *get_effective_client(struct votable *votable);
+const char *get_effective_client_locked(struct votable *votable);
+int vote(struct votable *votable, const char *client_str, bool state, int val);
+int rerun_election(struct votable *votable);
+struct votable *find_votable(const char *name);
+struct votable *create_votable(const char *name,
+				int votable_type,
+				int (*callback)(struct votable *votable,
+						void *data,
+						int effective_result,
+						const char *effective_client),
+				void *data);
+void destroy_votable(struct votable *votable);
+void lock_votable(struct votable *votable);
+void unlock_votable(struct votable *votable);
+
+#endif /* __PMIC_VOTER_H */
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
new file mode 100644
index 0000000..304d0cf
--- /dev/null
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -0,0 +1,4262 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"FG: %s: " fmt, __func__
+
+#include <linux/ktime.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_batterydata.h>
+#include <linux/platform_device.h>
+#include <linux/iio/consumer.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include "fg-core.h"
+#include "fg-reg.h"
+
+#define FG_GEN3_DEV_NAME	"qcom,fg-gen3"
+
+#define PERPH_SUBTYPE_REG		0x05
+#define FG_BATT_SOC_PMI8998		0x10
+#define FG_BATT_INFO_PMI8998		0x11
+#define FG_MEM_INFO_PMI8998		0x0D
+
+/* SRAM address and offset in ascending order */
+#define SLOPE_LIMIT_WORD		3
+#define SLOPE_LIMIT_OFFSET		0
+#define CUTOFF_VOLT_WORD		5
+#define CUTOFF_VOLT_OFFSET		0
+#define SYS_TERM_CURR_WORD		6
+#define SYS_TERM_CURR_OFFSET		0
+#define VBATT_FULL_WORD			7
+#define VBATT_FULL_OFFSET		0
+#define ESR_FILTER_WORD			8
+#define ESR_UPD_TIGHT_OFFSET		0
+#define ESR_UPD_BROAD_OFFSET		1
+#define ESR_UPD_TIGHT_LOW_TEMP_OFFSET	2
+#define ESR_UPD_BROAD_LOW_TEMP_OFFSET	3
+#define KI_COEFF_MED_DISCHG_WORD	9
+#define KI_COEFF_MED_DISCHG_OFFSET	3
+#define KI_COEFF_HI_DISCHG_WORD		10
+#define KI_COEFF_HI_DISCHG_OFFSET	0
+#define KI_COEFF_LOW_DISCHG_WORD	10
+#define KI_COEFF_LOW_DISCHG_OFFSET	2
+#define DELTA_MSOC_THR_WORD		12
+#define DELTA_MSOC_THR_OFFSET		3
+#define DELTA_BSOC_THR_WORD		13
+#define DELTA_BSOC_THR_OFFSET		2
+#define RECHARGE_SOC_THR_WORD		14
+#define RECHARGE_SOC_THR_OFFSET		0
+#define CHG_TERM_CURR_WORD		14
+#define CHG_TERM_CURR_OFFSET		1
+#define EMPTY_VOLT_WORD			15
+#define EMPTY_VOLT_OFFSET		0
+#define VBATT_LOW_WORD			15
+#define VBATT_LOW_OFFSET		1
+#define ESR_TIMER_DISCHG_MAX_WORD	17
+#define ESR_TIMER_DISCHG_MAX_OFFSET	0
+#define ESR_TIMER_DISCHG_INIT_WORD	17
+#define ESR_TIMER_DISCHG_INIT_OFFSET	2
+#define ESR_TIMER_CHG_MAX_WORD		18
+#define ESR_TIMER_CHG_MAX_OFFSET	0
+#define ESR_TIMER_CHG_INIT_WORD		18
+#define ESR_TIMER_CHG_INIT_OFFSET	2
+#define PROFILE_LOAD_WORD		24
+#define PROFILE_LOAD_OFFSET		0
+#define ESR_RSLOW_DISCHG_WORD		34
+#define ESR_RSLOW_DISCHG_OFFSET		0
+#define ESR_RSLOW_CHG_WORD		51
+#define ESR_RSLOW_CHG_OFFSET		0
+#define NOM_CAP_WORD			58
+#define NOM_CAP_OFFSET			0
+#define ACT_BATT_CAP_BKUP_WORD		74
+#define ACT_BATT_CAP_BKUP_OFFSET	0
+#define CYCLE_COUNT_WORD		75
+#define CYCLE_COUNT_OFFSET		0
+#define PROFILE_INTEGRITY_WORD		79
+#define SW_CONFIG_OFFSET		0
+#define PROFILE_INTEGRITY_OFFSET	3
+#define BATT_SOC_WORD			91
+#define BATT_SOC_OFFSET			0
+#define FULL_SOC_WORD			93
+#define FULL_SOC_OFFSET			2
+#define MONOTONIC_SOC_WORD		94
+#define MONOTONIC_SOC_OFFSET		2
+#define CC_SOC_WORD			95
+#define CC_SOC_OFFSET			0
+#define CC_SOC_SW_WORD			96
+#define CC_SOC_SW_OFFSET		0
+#define VOLTAGE_PRED_WORD		97
+#define VOLTAGE_PRED_OFFSET		0
+#define OCV_WORD			97
+#define OCV_OFFSET			2
+#define ESR_WORD			99
+#define ESR_OFFSET			0
+#define RSLOW_WORD			101
+#define RSLOW_OFFSET			0
+#define ACT_BATT_CAP_WORD		117
+#define ACT_BATT_CAP_OFFSET		0
+#define LAST_BATT_SOC_WORD		119
+#define LAST_BATT_SOC_OFFSET		0
+#define LAST_MONOTONIC_SOC_WORD		119
+#define LAST_MONOTONIC_SOC_OFFSET	2
+#define ALG_FLAGS_WORD			120
+#define ALG_FLAGS_OFFSET		1
+
+/* v2 SRAM address and offset in ascending order */
+#define KI_COEFF_LOW_DISCHG_v2_WORD	9
+#define KI_COEFF_LOW_DISCHG_v2_OFFSET	3
+#define KI_COEFF_MED_DISCHG_v2_WORD	10
+#define KI_COEFF_MED_DISCHG_v2_OFFSET	0
+#define KI_COEFF_HI_DISCHG_v2_WORD	10
+#define KI_COEFF_HI_DISCHG_v2_OFFSET	1
+#define DELTA_BSOC_THR_v2_WORD		12
+#define DELTA_BSOC_THR_v2_OFFSET	3
+#define DELTA_MSOC_THR_v2_WORD		13
+#define DELTA_MSOC_THR_v2_OFFSET	0
+#define RECHARGE_SOC_THR_v2_WORD	14
+#define RECHARGE_SOC_THR_v2_OFFSET	1
+#define CHG_TERM_CURR_v2_WORD		15
+#define CHG_TERM_CURR_v2_OFFSET		1
+#define EMPTY_VOLT_v2_WORD		15
+#define EMPTY_VOLT_v2_OFFSET		3
+#define VBATT_LOW_v2_WORD		16
+#define VBATT_LOW_v2_OFFSET		0
+#define RECHARGE_VBATT_THR_v2_WORD	16
+#define RECHARGE_VBATT_THR_v2_OFFSET	1
+#define FLOAT_VOLT_v2_WORD		16
+#define FLOAT_VOLT_v2_OFFSET		2
+
+static int fg_decode_voltage_15b(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int val);
+static int fg_decode_value_16b(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int val);
+static int fg_decode_default(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int val);
+static int fg_decode_cc_soc(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int value);
+static void fg_encode_voltage(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int val_mv, u8 *buf);
+static void fg_encode_current(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int val_ma, u8 *buf);
+static void fg_encode_default(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int val, u8 *buf);
+
+static struct fg_irq_info fg_irqs[FG_IRQ_MAX];
+
+#define PARAM(_id, _addr_word, _addr_byte, _len, _num, _den, _offset,	\
+	      _enc, _dec)						\
+	[FG_SRAM_##_id] = {						\
+		.addr_word	= _addr_word,				\
+		.addr_byte	= _addr_byte,				\
+		.len		= _len,					\
+		.numrtr		= _num,					\
+		.denmtr		= _den,					\
+		.offset		= _offset,				\
+		.encode		= _enc,					\
+		.decode		= _dec,					\
+	}								\
+
+static struct fg_sram_param pmi8998_v1_sram_params[] = {
+	PARAM(BATT_SOC, BATT_SOC_WORD, BATT_SOC_OFFSET, 4, 1, 1, 0, NULL,
+		fg_decode_default),
+	PARAM(FULL_SOC, FULL_SOC_WORD, FULL_SOC_OFFSET, 2, 1, 1, 0, NULL,
+		fg_decode_default),
+	PARAM(VOLTAGE_PRED, VOLTAGE_PRED_WORD, VOLTAGE_PRED_OFFSET, 2, 1000,
+		244141, 0, NULL, fg_decode_voltage_15b),
+	PARAM(OCV, OCV_WORD, OCV_OFFSET, 2, 1000, 244141, 0, NULL,
+		fg_decode_voltage_15b),
+	PARAM(ESR, ESR_WORD, ESR_OFFSET, 2, 1000, 244141, 0, fg_encode_default,
+		fg_decode_value_16b),
+	PARAM(RSLOW, RSLOW_WORD, RSLOW_OFFSET, 2, 1000, 244141, 0, NULL,
+		fg_decode_value_16b),
+	PARAM(ALG_FLAGS, ALG_FLAGS_WORD, ALG_FLAGS_OFFSET, 1, 1, 1, 0, NULL,
+		fg_decode_default),
+	PARAM(CC_SOC, CC_SOC_WORD, CC_SOC_OFFSET, 4, 1, 1, 0, NULL,
+		fg_decode_cc_soc),
+	PARAM(CC_SOC_SW, CC_SOC_SW_WORD, CC_SOC_SW_OFFSET, 4, 1, 1, 0, NULL,
+		fg_decode_cc_soc),
+	PARAM(ACT_BATT_CAP, ACT_BATT_CAP_BKUP_WORD, ACT_BATT_CAP_BKUP_OFFSET, 2,
+		1, 1, 0, NULL, fg_decode_default),
+	/* Entries below here are configurable during initialization */
+	PARAM(CUTOFF_VOLT, CUTOFF_VOLT_WORD, CUTOFF_VOLT_OFFSET, 2, 1000000,
+		244141, 0, fg_encode_voltage, NULL),
+	PARAM(EMPTY_VOLT, EMPTY_VOLT_WORD, EMPTY_VOLT_OFFSET, 1, 100000, 390625,
+		-2500, fg_encode_voltage, NULL),
+	PARAM(VBATT_LOW, VBATT_LOW_WORD, VBATT_LOW_OFFSET, 1, 100000, 390625,
+		-2500, fg_encode_voltage, NULL),
+	PARAM(VBATT_FULL, VBATT_FULL_WORD, VBATT_FULL_OFFSET, 2, 1000,
+		244141, 0, fg_encode_voltage, fg_decode_voltage_15b),
+	PARAM(SYS_TERM_CURR, SYS_TERM_CURR_WORD, SYS_TERM_CURR_OFFSET, 3,
+		1000000, 122070, 0, fg_encode_current, NULL),
+	PARAM(CHG_TERM_CURR, CHG_TERM_CURR_WORD, CHG_TERM_CURR_OFFSET, 1,
+		100000, 390625, 0, fg_encode_current, NULL),
+	PARAM(DELTA_MSOC_THR, DELTA_MSOC_THR_WORD, DELTA_MSOC_THR_OFFSET, 1,
+		2048, 100, 0, fg_encode_default, NULL),
+	PARAM(DELTA_BSOC_THR, DELTA_BSOC_THR_WORD, DELTA_BSOC_THR_OFFSET, 1,
+		2048, 100, 0, fg_encode_default, NULL),
+	PARAM(RECHARGE_SOC_THR, RECHARGE_SOC_THR_WORD, RECHARGE_SOC_THR_OFFSET,
+		1, 256, 100, 0, fg_encode_default, NULL),
+	PARAM(ESR_TIMER_DISCHG_MAX, ESR_TIMER_DISCHG_MAX_WORD,
+		ESR_TIMER_DISCHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default,
+		NULL),
+	PARAM(ESR_TIMER_DISCHG_INIT, ESR_TIMER_DISCHG_INIT_WORD,
+		ESR_TIMER_DISCHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default,
+		NULL),
+	PARAM(ESR_TIMER_CHG_MAX, ESR_TIMER_CHG_MAX_WORD,
+		ESR_TIMER_CHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+	PARAM(ESR_TIMER_CHG_INIT, ESR_TIMER_CHG_INIT_WORD,
+		ESR_TIMER_CHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+	PARAM(KI_COEFF_MED_DISCHG, KI_COEFF_MED_DISCHG_WORD,
+		KI_COEFF_MED_DISCHG_OFFSET, 1, 1000, 244141, 0,
+		fg_encode_default, NULL),
+	PARAM(KI_COEFF_HI_DISCHG, KI_COEFF_HI_DISCHG_WORD,
+		KI_COEFF_HI_DISCHG_OFFSET, 1, 1000, 244141, 0,
+		fg_encode_default, NULL),
+	PARAM(ESR_TIGHT_FILTER, ESR_FILTER_WORD, ESR_UPD_TIGHT_OFFSET,
+		1, 512, 1000000, 0, fg_encode_default, NULL),
+	PARAM(ESR_BROAD_FILTER, ESR_FILTER_WORD, ESR_UPD_BROAD_OFFSET,
+		1, 512, 1000000, 0, fg_encode_default, NULL),
+	PARAM(SLOPE_LIMIT, SLOPE_LIMIT_WORD, SLOPE_LIMIT_OFFSET, 1, 8192, 1000,
+		0, fg_encode_default, NULL),
+};
+
+static struct fg_sram_param pmi8998_v2_sram_params[] = {
+	PARAM(BATT_SOC, BATT_SOC_WORD, BATT_SOC_OFFSET, 4, 1, 1, 0, NULL,
+		fg_decode_default),
+	PARAM(FULL_SOC, FULL_SOC_WORD, FULL_SOC_OFFSET, 2, 1, 1, 0, NULL,
+		fg_decode_default),
+	PARAM(VOLTAGE_PRED, VOLTAGE_PRED_WORD, VOLTAGE_PRED_OFFSET, 2, 1000,
+		244141, 0, NULL, fg_decode_voltage_15b),
+	PARAM(OCV, OCV_WORD, OCV_OFFSET, 2, 1000, 244141, 0, NULL,
+		fg_decode_voltage_15b),
+	PARAM(ESR, ESR_WORD, ESR_OFFSET, 2, 1000, 244141, 0, fg_encode_default,
+		fg_decode_value_16b),
+	PARAM(RSLOW, RSLOW_WORD, RSLOW_OFFSET, 2, 1000, 244141, 0, NULL,
+		fg_decode_value_16b),
+	PARAM(ALG_FLAGS, ALG_FLAGS_WORD, ALG_FLAGS_OFFSET, 1, 1, 1, 0, NULL,
+		fg_decode_default),
+	PARAM(CC_SOC, CC_SOC_WORD, CC_SOC_OFFSET, 4, 1, 1, 0, NULL,
+		fg_decode_cc_soc),
+	PARAM(CC_SOC_SW, CC_SOC_SW_WORD, CC_SOC_SW_OFFSET, 4, 1, 1, 0, NULL,
+		fg_decode_cc_soc),
+	PARAM(ACT_BATT_CAP, ACT_BATT_CAP_BKUP_WORD, ACT_BATT_CAP_BKUP_OFFSET, 2,
+		1, 1, 0, NULL, fg_decode_default),
+	/* Entries below here are configurable during initialization */
+	PARAM(CUTOFF_VOLT, CUTOFF_VOLT_WORD, CUTOFF_VOLT_OFFSET, 2, 1000000,
+		244141, 0, fg_encode_voltage, NULL),
+	PARAM(EMPTY_VOLT, EMPTY_VOLT_v2_WORD, EMPTY_VOLT_v2_OFFSET, 1, 1000,
+		15625, -2000, fg_encode_voltage, NULL),
+	PARAM(VBATT_LOW, VBATT_LOW_v2_WORD, VBATT_LOW_v2_OFFSET, 1, 1000,
+		15625, -2000, fg_encode_voltage, NULL),
+	PARAM(FLOAT_VOLT, FLOAT_VOLT_v2_WORD, FLOAT_VOLT_v2_OFFSET, 1, 1000,
+		15625, -2000, fg_encode_voltage, NULL),
+	PARAM(VBATT_FULL, VBATT_FULL_WORD, VBATT_FULL_OFFSET, 2, 1000,
+		244141, 0, fg_encode_voltage, fg_decode_voltage_15b),
+	PARAM(SYS_TERM_CURR, SYS_TERM_CURR_WORD, SYS_TERM_CURR_OFFSET, 3,
+		1000000, 122070, 0, fg_encode_current, NULL),
+	PARAM(CHG_TERM_CURR, CHG_TERM_CURR_v2_WORD, CHG_TERM_CURR_v2_OFFSET, 1,
+		100000, 390625, 0, fg_encode_current, NULL),
+	PARAM(DELTA_MSOC_THR, DELTA_MSOC_THR_v2_WORD, DELTA_MSOC_THR_v2_OFFSET,
+		1, 2048, 100, 0, fg_encode_default, NULL),
+	PARAM(DELTA_BSOC_THR, DELTA_BSOC_THR_v2_WORD, DELTA_BSOC_THR_v2_OFFSET,
+		1, 2048, 100, 0, fg_encode_default, NULL),
+	PARAM(RECHARGE_SOC_THR, RECHARGE_SOC_THR_v2_WORD,
+		RECHARGE_SOC_THR_v2_OFFSET, 1, 256, 100, 0, fg_encode_default,
+		NULL),
+	PARAM(RECHARGE_VBATT_THR, RECHARGE_VBATT_THR_v2_WORD,
+		RECHARGE_VBATT_THR_v2_OFFSET, 1, 1000, 15625, -2000,
+		fg_encode_voltage, NULL),
+	PARAM(ESR_TIMER_DISCHG_MAX, ESR_TIMER_DISCHG_MAX_WORD,
+		ESR_TIMER_DISCHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default,
+		NULL),
+	PARAM(ESR_TIMER_DISCHG_INIT, ESR_TIMER_DISCHG_INIT_WORD,
+		ESR_TIMER_DISCHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default,
+		NULL),
+	PARAM(ESR_TIMER_CHG_MAX, ESR_TIMER_CHG_MAX_WORD,
+		ESR_TIMER_CHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+	PARAM(ESR_TIMER_CHG_INIT, ESR_TIMER_CHG_INIT_WORD,
+		ESR_TIMER_CHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+	PARAM(KI_COEFF_MED_DISCHG, KI_COEFF_MED_DISCHG_v2_WORD,
+		KI_COEFF_MED_DISCHG_v2_OFFSET, 1, 1000, 244141, 0,
+		fg_encode_default, NULL),
+	PARAM(KI_COEFF_HI_DISCHG, KI_COEFF_HI_DISCHG_v2_WORD,
+		KI_COEFF_HI_DISCHG_v2_OFFSET, 1, 1000, 244141, 0,
+		fg_encode_default, NULL),
+	PARAM(ESR_TIGHT_FILTER, ESR_FILTER_WORD, ESR_UPD_TIGHT_OFFSET,
+		1, 512, 1000000, 0, fg_encode_default, NULL),
+	PARAM(ESR_BROAD_FILTER, ESR_FILTER_WORD, ESR_UPD_BROAD_OFFSET,
+		1, 512, 1000000, 0, fg_encode_default, NULL),
+	PARAM(SLOPE_LIMIT, SLOPE_LIMIT_WORD, SLOPE_LIMIT_OFFSET, 1, 8192, 1000,
+		0, fg_encode_default, NULL),
+};
+
+static struct fg_alg_flag pmi8998_v1_alg_flags[] = {
+	[ALG_FLAG_SOC_LT_OTG_MIN]	= {
+		.name	= "SOC_LT_OTG_MIN",
+		.bit	= BIT(0),
+	},
+	[ALG_FLAG_SOC_LT_RECHARGE]	= {
+		.name	= "SOC_LT_RECHARGE",
+		.bit	= BIT(1),
+	},
+	[ALG_FLAG_IBATT_LT_ITERM]	= {
+		.name	= "IBATT_LT_ITERM",
+		.bit	= BIT(2),
+	},
+	[ALG_FLAG_IBATT_GT_HPM]		= {
+		.name	= "IBATT_GT_HPM",
+		.bit	= BIT(3),
+	},
+	[ALG_FLAG_IBATT_GT_UPM]		= {
+		.name	= "IBATT_GT_UPM",
+		.bit	= BIT(4),
+	},
+	[ALG_FLAG_VBATT_LT_RECHARGE]	= {
+		.name	= "VBATT_LT_RECHARGE",
+		.bit	= BIT(5),
+	},
+	[ALG_FLAG_VBATT_GT_VFLOAT]	= {
+		.invalid = true,
+	},
+};
+
+static struct fg_alg_flag pmi8998_v2_alg_flags[] = {
+	[ALG_FLAG_SOC_LT_OTG_MIN]	= {
+		.name	= "SOC_LT_OTG_MIN",
+		.bit	= BIT(0),
+	},
+	[ALG_FLAG_SOC_LT_RECHARGE]	= {
+		.name	= "SOC_LT_RECHARGE",
+		.bit	= BIT(1),
+	},
+	[ALG_FLAG_IBATT_LT_ITERM]	= {
+		.name	= "IBATT_LT_ITERM",
+		.bit	= BIT(2),
+	},
+	[ALG_FLAG_IBATT_GT_HPM]		= {
+		.name	= "IBATT_GT_HPM",
+		.bit	= BIT(4),
+	},
+	[ALG_FLAG_IBATT_GT_UPM]		= {
+		.name	= "IBATT_GT_UPM",
+		.bit	= BIT(5),
+	},
+	[ALG_FLAG_VBATT_LT_RECHARGE]	= {
+		.name	= "VBATT_LT_RECHARGE",
+		.bit	= BIT(6),
+	},
+	[ALG_FLAG_VBATT_GT_VFLOAT]	= {
+		.name	= "VBATT_GT_VFLOAT",
+		.bit	= BIT(7),
+	},
+};
+
+static int fg_gen3_debug_mask;
+module_param_named(
+	debug_mask, fg_gen3_debug_mask, int, 0600
+);
+
+static bool fg_profile_dump;
+module_param_named(
+	profile_dump, fg_profile_dump, bool, 0600
+);
+
+static int fg_sram_dump_period_ms = 20000;
+module_param_named(
+	sram_dump_period_ms, fg_sram_dump_period_ms, int, 0600
+);
+
+static int fg_restart;
+static bool fg_sram_dump;
+
+/* All getters HERE */
+
+#define VOLTAGE_15BIT_MASK	GENMASK(14, 0)
+static int fg_decode_voltage_15b(struct fg_sram_param *sp,
+				enum fg_sram_param_id id, int value)
+{
+	value &= VOLTAGE_15BIT_MASK;
+	sp[id].value = div_u64((u64)value * sp[id].denmtr, sp[id].numrtr);
+	pr_debug("id: %d raw value: %x decoded value: %x\n", id, value,
+		sp[id].value);
+	return sp[id].value;
+}
+
+static int fg_decode_cc_soc(struct fg_sram_param *sp,
+				enum fg_sram_param_id id, int value)
+{
+	sp[id].value = div_s64((s64)value * sp[id].denmtr, sp[id].numrtr);
+	sp[id].value = sign_extend32(sp[id].value, 31);
+	pr_debug("id: %d raw value: %x decoded value: %x\n", id, value,
+		sp[id].value);
+	return sp[id].value;
+}
+
+static int fg_decode_value_16b(struct fg_sram_param *sp,
+				enum fg_sram_param_id id, int value)
+{
+	sp[id].value = div_u64((u64)(u16)value * sp[id].denmtr, sp[id].numrtr);
+	pr_debug("id: %d raw value: %x decoded value: %x\n", id, value,
+		sp[id].value);
+	return sp[id].value;
+}
+
+static int fg_decode_default(struct fg_sram_param *sp, enum fg_sram_param_id id,
+				int value)
+{
+	sp[id].value = value;
+	return sp[id].value;
+}
+
+static int fg_decode(struct fg_sram_param *sp, enum fg_sram_param_id id,
+			int value)
+{
+	if (!sp[id].decode) {
+		pr_err("No decoding function for parameter %d\n", id);
+		return -EINVAL;
+	}
+
+	return sp[id].decode(sp, id, value);
+}
+
+static void fg_encode_voltage(struct fg_sram_param *sp,
+				enum fg_sram_param_id  id, int val_mv, u8 *buf)
+{
+	int i, mask = 0xff;
+	int64_t temp;
+
+	val_mv += sp[id].offset;
+	temp = (int64_t)div_u64((u64)val_mv * sp[id].numrtr, sp[id].denmtr);
+	pr_debug("temp: %llx id: %d, val_mv: %d, buf: [ ", temp, id, val_mv);
+	for (i = 0; i < sp[id].len; i++) {
+		buf[i] = temp & mask;
+		temp >>= 8;
+		pr_debug("%x ", buf[i]);
+	}
+	pr_debug("]\n");
+}
+
+static void fg_encode_current(struct fg_sram_param *sp,
+				enum fg_sram_param_id  id, int val_ma, u8 *buf)
+{
+	int i, mask = 0xff;
+	int64_t temp;
+	s64 current_ma;
+
+	current_ma = val_ma;
+	temp = (int64_t)div_s64(current_ma * sp[id].numrtr, sp[id].denmtr);
+	pr_debug("temp: %llx id: %d, val: %d, buf: [ ", temp, id, val_ma);
+	for (i = 0; i < sp[id].len; i++) {
+		buf[i] = temp & mask;
+		temp >>= 8;
+		pr_debug("%x ", buf[i]);
+	}
+	pr_debug("]\n");
+}
+
+static void fg_encode_default(struct fg_sram_param *sp,
+				enum fg_sram_param_id  id, int val, u8 *buf)
+{
+	int i, mask = 0xff;
+	int64_t temp;
+
+	temp = DIV_ROUND_CLOSEST(val * sp[id].numrtr, sp[id].denmtr);
+	pr_debug("temp: %llx id: %d, val: %d, buf: [ ", temp, id, val);
+	for (i = 0; i < sp[id].len; i++) {
+		buf[i] = temp & mask;
+		temp >>= 8;
+		pr_debug("%x ", buf[i]);
+	}
+	pr_debug("]\n");
+}
+
+static void fg_encode(struct fg_sram_param *sp, enum fg_sram_param_id id,
+			int val, u8 *buf)
+{
+	if (!sp[id].encode) {
+		pr_err("No encoding function for parameter %d\n", id);
+		return;
+	}
+
+	sp[id].encode(sp, id, val, buf);
+}
+
+/*
+ * Please make sure *_sram_params table has the entry for the parameter
+ * obtained through this function. In addition to address, offset,
+ * length from where this SRAM parameter is read, a decode function
+ * need to be specified.
+ */
+static int fg_get_sram_prop(struct fg_chip *chip, enum fg_sram_param_id id,
+				int *val)
+{
+	int temp, rc, i;
+	u8 buf[4];
+
+	if (id < 0 || id > FG_SRAM_MAX || chip->sp[id].len > sizeof(buf))
+		return -EINVAL;
+
+	if (chip->battery_missing)
+		return -ENODATA;
+
+	rc = fg_sram_read(chip, chip->sp[id].addr_word, chip->sp[id].addr_byte,
+		buf, chip->sp[id].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error reading address 0x%04x[%d] rc=%d\n",
+			chip->sp[id].addr_word, chip->sp[id].addr_byte, rc);
+		return rc;
+	}
+
+	for (i = 0, temp = 0; i < chip->sp[id].len; i++)
+		temp |= buf[i] << (8 * i);
+
+	*val = fg_decode(chip->sp, id, temp);
+	return 0;
+}
+
+#define CC_SOC_30BIT	GENMASK(29, 0)
+static int fg_get_cc_soc(struct fg_chip *chip, int *val)
+{
+	int rc, cc_soc;
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC, &cc_soc);
+	if (rc < 0) {
+		pr_err("Error in getting CC_SOC, rc=%d\n", rc);
+		return rc;
+	}
+
+	*val = div_s64(cc_soc * chip->cl.nom_cap_uah, CC_SOC_30BIT);
+	return 0;
+}
+
+static int fg_get_cc_soc_sw(struct fg_chip *chip, int *val)
+{
+	int rc, cc_soc;
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc);
+	if (rc < 0) {
+		pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
+		return rc;
+	}
+
+	*val = div_s64(cc_soc * chip->cl.learned_cc_uah, CC_SOC_30BIT);
+	return 0;
+}
+
+#define BATT_TEMP_NUMR		1
+#define BATT_TEMP_DENR		1
+static int fg_get_battery_temp(struct fg_chip *chip, int *val)
+{
+	int rc = 0, temp;
+	u8 buf[2];
+
+	rc = fg_read(chip, BATT_INFO_BATT_TEMP_LSB(chip), buf, 2);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			BATT_INFO_BATT_TEMP_LSB(chip), rc);
+		return rc;
+	}
+
+	temp = ((buf[1] & BATT_TEMP_MSB_MASK) << 8) |
+		(buf[0] & BATT_TEMP_LSB_MASK);
+	temp = DIV_ROUND_CLOSEST(temp, 4);
+
+	/* Value is in Kelvin; Convert it to deciDegC */
+	temp = (temp - 273) * 10;
+	*val = temp;
+	return 0;
+}
+
+static int fg_get_battery_resistance(struct fg_chip *chip, int *val)
+{
+	int rc, esr_uohms, rslow_uohms;
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_ESR, &esr_uohms);
+	if (rc < 0) {
+		pr_err("failed to get ESR, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_RSLOW, &rslow_uohms);
+	if (rc < 0) {
+		pr_err("failed to get Rslow, rc=%d\n", rc);
+		return rc;
+	}
+
+	*val = esr_uohms + rslow_uohms;
+	return 0;
+}
+
+#define BATT_CURRENT_NUMR	488281
+#define BATT_CURRENT_DENR	1000
+static int fg_get_battery_current(struct fg_chip *chip, int *val)
+{
+	int rc = 0;
+	int64_t temp = 0;
+	u8 buf[2];
+
+	rc = fg_read(chip, BATT_INFO_IBATT_LSB(chip), buf, 2);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			BATT_INFO_IBATT_LSB(chip), rc);
+		return rc;
+	}
+
+	if (chip->wa_flags & PMI8998_V1_REV_WA)
+		temp = buf[0] << 8 | buf[1];
+	else
+		temp = buf[1] << 8 | buf[0];
+
+	pr_debug("buf: %x %x temp: %llx\n", buf[0], buf[1], temp);
+	/* Sign bit is bit 15 */
+	temp = twos_compliment_extend(temp, 15);
+	*val = div_s64((s64)temp * BATT_CURRENT_NUMR, BATT_CURRENT_DENR);
+	return 0;
+}
+
+#define BATT_VOLTAGE_NUMR	122070
+#define BATT_VOLTAGE_DENR	1000
+static int fg_get_battery_voltage(struct fg_chip *chip, int *val)
+{
+	int rc = 0;
+	u16 temp = 0;
+	u8 buf[2];
+
+	rc = fg_read(chip, BATT_INFO_VBATT_LSB(chip), buf, 2);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			BATT_INFO_VBATT_LSB(chip), rc);
+		return rc;
+	}
+
+	if (chip->wa_flags & PMI8998_V1_REV_WA)
+		temp = buf[0] << 8 | buf[1];
+	else
+		temp = buf[1] << 8 | buf[0];
+
+	pr_debug("buf: %x %x temp: %x\n", buf[0], buf[1], temp);
+	*val = div_u64((u64)temp * BATT_VOLTAGE_NUMR, BATT_VOLTAGE_DENR);
+	return 0;
+}
+
+#define MAX_TRIES_SOC		5
+static int fg_get_msoc_raw(struct fg_chip *chip, int *val)
+{
+	u8 cap[2];
+	int rc, tries = 0;
+
+	while (tries < MAX_TRIES_SOC) {
+		rc = fg_read(chip, BATT_SOC_FG_MONOTONIC_SOC(chip), cap, 2);
+		if (rc < 0) {
+			pr_err("failed to read addr=0x%04x, rc=%d\n",
+				BATT_SOC_FG_MONOTONIC_SOC(chip), rc);
+			return rc;
+		}
+
+		if (cap[0] == cap[1])
+			break;
+
+		tries++;
+	}
+
+	if (tries == MAX_TRIES_SOC) {
+		pr_err("shadow registers do not match\n");
+		return -EINVAL;
+	}
+
+	fg_dbg(chip, FG_POWER_SUPPLY, "raw: 0x%02x\n", cap[0]);
+	*val = cap[0];
+	return 0;
+}
+
+#define FULL_CAPACITY	100
+#define FULL_SOC_RAW	255
+static int fg_get_msoc(struct fg_chip *chip, int *msoc)
+{
+	int rc;
+
+	rc = fg_get_msoc_raw(chip, msoc);
+	if (rc < 0)
+		return rc;
+
+	*msoc = DIV_ROUND_CLOSEST(*msoc * FULL_CAPACITY, FULL_SOC_RAW);
+	return 0;
+}
+
+static bool is_batt_empty(struct fg_chip *chip)
+{
+	u8 status;
+	int rc, vbatt_uv, msoc;
+
+	rc = fg_read(chip, BATT_SOC_INT_RT_STS(chip), &status, 1);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			BATT_SOC_INT_RT_STS(chip), rc);
+		return false;
+	}
+
+	if (!(status & MSOC_EMPTY_BIT))
+		return false;
+
+	rc = fg_get_battery_voltage(chip, &vbatt_uv);
+	if (rc < 0) {
+		pr_err("failed to get battery voltage, rc=%d\n", rc);
+		return false;
+	}
+
+	rc = fg_get_msoc(chip, &msoc);
+	if (!rc)
+		pr_warn("batt_soc_rt_sts: %x vbatt: %d uV msoc:%d\n", status,
+			vbatt_uv, msoc);
+
+	return ((vbatt_uv < chip->dt.cutoff_volt_mv * 1000) ? true : false);
+}
+
+static int fg_get_debug_batt_id(struct fg_chip *chip, int *batt_id)
+{
+	int rc;
+	u64 temp;
+	u8 buf[2];
+
+	rc = fg_read(chip, ADC_RR_FAKE_BATT_LOW_LSB(chip), buf, 2);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			ADC_RR_FAKE_BATT_LOW_LSB(chip), rc);
+		return rc;
+	}
+
+	/*
+	 * Fake battery threshold is encoded in the following format.
+	 * Threshold (code) = (battery_id in Ohms) * 0.00015 * 2^10 / 2.5
+	 */
+	temp = (buf[1] << 8 | buf[0]) * 2500000;
+	do_div(temp, 150 * 1024);
+	batt_id[0] = temp;
+	rc = fg_read(chip, ADC_RR_FAKE_BATT_HIGH_LSB(chip), buf, 2);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			ADC_RR_FAKE_BATT_HIGH_LSB(chip), rc);
+		return rc;
+	}
+
+	temp = (buf[1] << 8 | buf[0]) * 2500000;
+	do_div(temp, 150 * 1024);
+	batt_id[1] = temp;
+	pr_debug("debug batt_id range: [%d %d]\n", batt_id[0], batt_id[1]);
+	return 0;
+}
+
+static bool is_debug_batt_id(struct fg_chip *chip)
+{
+	int debug_batt_id[2], rc;
+
+	if (!chip->batt_id_ohms)
+		return false;
+
+	rc = fg_get_debug_batt_id(chip, debug_batt_id);
+	if (rc < 0) {
+		pr_err("Failed to get debug batt_id, rc=%d\n", rc);
+		return false;
+	}
+
+	if (is_between(debug_batt_id[0], debug_batt_id[1],
+		chip->batt_id_ohms)) {
+		fg_dbg(chip, FG_POWER_SUPPLY, "Debug battery id: %dohms\n",
+			chip->batt_id_ohms);
+		return true;
+	}
+
+	return false;
+}
+
+#define DEBUG_BATT_SOC	67
+#define BATT_MISS_SOC	50
+#define EMPTY_SOC	0
+static int fg_get_prop_capacity(struct fg_chip *chip, int *val)
+{
+	int rc, msoc;
+
+	if (is_debug_batt_id(chip)) {
+		*val = DEBUG_BATT_SOC;
+		return 0;
+	}
+
+	if (chip->fg_restarting) {
+		*val = chip->last_soc;
+		return 0;
+	}
+
+	if (chip->battery_missing) {
+		*val = BATT_MISS_SOC;
+		return 0;
+	}
+
+	if (is_batt_empty(chip)) {
+		*val = EMPTY_SOC;
+		return 0;
+	}
+
+	if (chip->charge_full) {
+		*val = FULL_CAPACITY;
+		return 0;
+	}
+
+	rc = fg_get_msoc(chip, &msoc);
+	if (rc < 0)
+		return rc;
+
+	if (chip->delta_soc > 0)
+		*val = chip->maint_soc;
+	else
+		*val = msoc;
+	return 0;
+}
+
+#define DEFAULT_BATT_TYPE	"Unknown Battery"
+#define MISSING_BATT_TYPE	"Missing Battery"
+#define LOADING_BATT_TYPE	"Loading Battery"
+static const char *fg_get_battery_type(struct fg_chip *chip)
+{
+	if (chip->battery_missing)
+		return MISSING_BATT_TYPE;
+
+	if (chip->bp.batt_type_str) {
+		if (chip->profile_loaded)
+			return chip->bp.batt_type_str;
+		else if (chip->profile_available)
+			return LOADING_BATT_TYPE;
+	}
+
+	return DEFAULT_BATT_TYPE;
+}
+
+static int fg_batt_missing_config(struct fg_chip *chip, bool enable)
+{
+	int rc;
+
+	rc = fg_masked_write(chip, BATT_INFO_BATT_MISS_CFG(chip),
+			BM_FROM_BATT_ID_BIT, enable ? BM_FROM_BATT_ID_BIT : 0);
+	if (rc < 0)
+		pr_err("Error in writing to %04x, rc=%d\n",
+			BATT_INFO_BATT_MISS_CFG(chip), rc);
+	return rc;
+}
+
+static int fg_get_batt_id(struct fg_chip *chip)
+{
+	int rc, ret, batt_id = 0;
+
+	if (!chip->batt_id_chan)
+		return -EINVAL;
+
+	rc = fg_batt_missing_config(chip, false);
+	if (rc < 0) {
+		pr_err("Error in disabling BMD, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = iio_read_channel_processed(chip->batt_id_chan, &batt_id);
+	if (rc < 0) {
+		pr_err("Error in reading batt_id channel, rc:%d\n", rc);
+		goto out;
+	}
+
+	/* Wait for 200ms before enabling BMD again */
+	msleep(200);
+
+	fg_dbg(chip, FG_STATUS, "batt_id: %d\n", batt_id);
+	chip->batt_id_ohms = batt_id;
+out:
+	ret = fg_batt_missing_config(chip, true);
+	if (ret < 0) {
+		pr_err("Error in enabling BMD, ret=%d\n", ret);
+		return ret;
+	}
+
+	return rc;
+}
+
+static int fg_get_batt_profile(struct fg_chip *chip)
+{
+	struct device_node *node = chip->dev->of_node;
+	struct device_node *batt_node, *profile_node;
+	const char *data;
+	int rc, len;
+
+	batt_node = of_find_node_by_name(node, "qcom,battery-data");
+	if (!batt_node) {
+		pr_err("Batterydata not available\n");
+		return -ENXIO;
+	}
+
+	profile_node = of_batterydata_get_best_profile(batt_node,
+				chip->batt_id_ohms / 1000, NULL);
+	if (IS_ERR(profile_node))
+		return PTR_ERR(profile_node);
+
+	if (!profile_node) {
+		pr_err("couldn't find profile handle\n");
+		return -ENODATA;
+	}
+
+	rc = of_property_read_string(profile_node, "qcom,battery-type",
+			&chip->bp.batt_type_str);
+	if (rc < 0) {
+		pr_err("battery type unavailable, rc:%d\n", rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(profile_node, "qcom,max-voltage-uv",
+			&chip->bp.float_volt_uv);
+	if (rc < 0) {
+		pr_err("battery float voltage unavailable, rc:%d\n", rc);
+		chip->bp.float_volt_uv = -EINVAL;
+	}
+
+	rc = of_property_read_u32(profile_node, "qcom,fastchg-current-ma",
+			&chip->bp.fastchg_curr_ma);
+	if (rc < 0) {
+		pr_err("battery fastchg current unavailable, rc:%d\n", rc);
+		chip->bp.fastchg_curr_ma = -EINVAL;
+	}
+
+	rc = of_property_read_u32(profile_node, "qcom,fg-cc-cv-threshold-mv",
+			&chip->bp.vbatt_full_mv);
+	if (rc < 0) {
+		pr_err("battery cc_cv threshold unavailable, rc:%d\n", rc);
+		chip->bp.vbatt_full_mv = -EINVAL;
+	}
+
+	data = of_get_property(profile_node, "qcom,fg-profile-data", &len);
+	if (!data) {
+		pr_err("No profile data available\n");
+		return -ENODATA;
+	}
+
+	if (len != PROFILE_LEN) {
+		pr_err("battery profile incorrect size: %d\n", len);
+		return -EINVAL;
+	}
+
+	chip->profile_available = true;
+	memcpy(chip->batt_profile, data, len);
+
+	return 0;
+}
+
+static inline void get_temp_setpoint(int threshold, u8 *val)
+{
+	/* Resolution is 0.5C. Base is -30C. */
+	*val = DIV_ROUND_CLOSEST((threshold + 30) * 10, 5);
+}
+
+static inline void get_batt_temp_delta(int delta, u8 *val)
+{
+	switch (delta) {
+	case 2:
+		*val = BTEMP_DELTA_2K;
+		break;
+	case 4:
+		*val = BTEMP_DELTA_4K;
+		break;
+	case 6:
+		*val = BTEMP_DELTA_6K;
+		break;
+	case 10:
+		*val = BTEMP_DELTA_10K;
+		break;
+	default:
+		*val = BTEMP_DELTA_2K;
+		break;
+	};
+}
+
+static int fg_set_esr_timer(struct fg_chip *chip, int cycles, bool charging,
+				int flags)
+{
+	u8 buf[2];
+	int rc, timer_max, timer_init;
+
+	if (charging) {
+		timer_max = FG_SRAM_ESR_TIMER_CHG_MAX;
+		timer_init = FG_SRAM_ESR_TIMER_CHG_INIT;
+	} else {
+		timer_max = FG_SRAM_ESR_TIMER_DISCHG_MAX;
+		timer_init = FG_SRAM_ESR_TIMER_DISCHG_INIT;
+	}
+
+	fg_encode(chip->sp, timer_max, cycles, buf);
+	rc = fg_sram_write(chip,
+			chip->sp[timer_max].addr_word,
+			chip->sp[timer_max].addr_byte, buf,
+			chip->sp[timer_max].len, flags);
+	if (rc < 0) {
+		pr_err("Error in writing esr_timer_dischg_max, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	fg_encode(chip->sp, timer_init, cycles, buf);
+	rc = fg_sram_write(chip,
+			chip->sp[timer_init].addr_word,
+			chip->sp[timer_init].addr_byte, buf,
+			chip->sp[timer_init].len, flags);
+	if (rc < 0) {
+		pr_err("Error in writing esr_timer_dischg_init, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+/* Other functions HERE */
+
+static void fg_notify_charger(struct fg_chip *chip)
+{
+	union power_supply_propval prop = {0, };
+	int rc;
+
+	if (!chip->batt_psy)
+		return;
+
+	if (!chip->profile_available)
+		return;
+
+	prop.intval = chip->bp.float_volt_uv;
+	rc = power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_VOLTAGE_MAX, &prop);
+	if (rc < 0) {
+		pr_err("Error in setting voltage_max property on batt_psy, rc=%d\n",
+			rc);
+		return;
+	}
+
+	prop.intval = chip->bp.fastchg_curr_ma * 1000;
+	rc = power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &prop);
+	if (rc < 0) {
+		pr_err("Error in setting constant_charge_current_max property on batt_psy, rc=%d\n",
+			rc);
+		return;
+	}
+
+	fg_dbg(chip, FG_STATUS, "Notified charger on float voltage and FCC\n");
+}
+
+static int fg_awake_cb(struct votable *votable, void *data, int awake,
+			const char *client)
+{
+	struct fg_chip *chip = data;
+
+	if (awake)
+		pm_stay_awake(chip->dev);
+	else
+		pm_relax(chip->dev);
+
+	pr_debug("client: %s awake: %d\n", client, awake);
+	return 0;
+}
+
+static bool batt_psy_initialized(struct fg_chip *chip)
+{
+	if (chip->batt_psy)
+		return true;
+
+	chip->batt_psy = power_supply_get_by_name("battery");
+	if (!chip->batt_psy)
+		return false;
+
+	/* batt_psy is initialized, set the fcc and fv */
+	fg_notify_charger(chip);
+
+	return true;
+}
+
+static bool is_parallel_charger_available(struct fg_chip *chip)
+{
+	if (!chip->parallel_psy)
+		chip->parallel_psy = power_supply_get_by_name("parallel");
+
+	if (!chip->parallel_psy)
+		return false;
+
+	return true;
+}
+
+static int fg_save_learned_cap_to_sram(struct fg_chip *chip)
+{
+	int16_t cc_mah;
+	int rc;
+
+	if (chip->battery_missing || !chip->cl.learned_cc_uah)
+		return -EPERM;
+
+	cc_mah = div64_s64(chip->cl.learned_cc_uah, 1000);
+	/* Write to a backup register to use across reboot */
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_ACT_BATT_CAP].addr_word,
+			chip->sp[FG_SRAM_ACT_BATT_CAP].addr_byte, (u8 *)&cc_mah,
+			chip->sp[FG_SRAM_ACT_BATT_CAP].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing act_batt_cap_bkup, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Write to actual capacity register for coulomb counter operation */
+	rc = fg_sram_write(chip, ACT_BATT_CAP_WORD, ACT_BATT_CAP_OFFSET,
+			(u8 *)&cc_mah, chip->sp[FG_SRAM_ACT_BATT_CAP].len,
+			FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing act_batt_cap, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_dbg(chip, FG_CAP_LEARN, "learned capacity %llduah/%dmah stored\n",
+		chip->cl.learned_cc_uah, cc_mah);
+	return 0;
+}
+
+#define CAPACITY_DELTA_DECIPCT	500
+static int fg_load_learned_cap_from_sram(struct fg_chip *chip)
+{
+	int rc, act_cap_mah;
+	int64_t delta_cc_uah, pct_nom_cap_uah;
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_mah);
+	if (rc < 0) {
+		pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->cl.learned_cc_uah = act_cap_mah * 1000;
+
+	if (chip->cl.learned_cc_uah != chip->cl.nom_cap_uah) {
+		if (chip->cl.learned_cc_uah == 0)
+			chip->cl.learned_cc_uah = chip->cl.nom_cap_uah;
+
+		delta_cc_uah = abs(chip->cl.learned_cc_uah -
+					chip->cl.nom_cap_uah);
+		pct_nom_cap_uah = div64_s64((int64_t)chip->cl.nom_cap_uah *
+				CAPACITY_DELTA_DECIPCT, 1000);
+		/*
+		 * If the learned capacity is out of range by 50% from the
+		 * nominal capacity, then overwrite the learned capacity with
+		 * the nominal capacity.
+		 */
+		if (chip->cl.nom_cap_uah && delta_cc_uah > pct_nom_cap_uah) {
+			fg_dbg(chip, FG_CAP_LEARN, "learned_cc_uah: %lld is higher than expected, capping it to nominal: %lld\n",
+				chip->cl.learned_cc_uah, chip->cl.nom_cap_uah);
+			chip->cl.learned_cc_uah = chip->cl.nom_cap_uah;
+		}
+
+		rc = fg_save_learned_cap_to_sram(chip);
+		if (rc < 0)
+			pr_err("Error in saving learned_cc_uah, rc=%d\n", rc);
+	}
+
+	fg_dbg(chip, FG_CAP_LEARN, "learned_cc_uah:%lld nom_cap_uah: %lld\n",
+		chip->cl.learned_cc_uah, chip->cl.nom_cap_uah);
+	return 0;
+}
+
+static bool is_temp_valid_cap_learning(struct fg_chip *chip)
+{
+	int rc, batt_temp;
+
+	rc = fg_get_battery_temp(chip, &batt_temp);
+	if (rc < 0) {
+		pr_err("Error in getting batt_temp\n");
+		return false;
+	}
+
+	if (batt_temp > chip->dt.cl_max_temp ||
+		batt_temp < chip->dt.cl_min_temp) {
+		fg_dbg(chip, FG_CAP_LEARN, "batt temp %d out of range [%d %d]\n",
+			batt_temp, chip->dt.cl_min_temp, chip->dt.cl_max_temp);
+		return false;
+	}
+
+	return true;
+}
+
+static void fg_cap_learning_post_process(struct fg_chip *chip)
+{
+	int64_t max_inc_val, min_dec_val, old_cap;
+	int rc;
+
+	max_inc_val = chip->cl.learned_cc_uah
+			* (1000 + chip->dt.cl_max_cap_inc);
+	do_div(max_inc_val, 1000);
+
+	min_dec_val = chip->cl.learned_cc_uah
+			* (1000 - chip->dt.cl_max_cap_dec);
+	do_div(min_dec_val, 1000);
+
+	old_cap = chip->cl.learned_cc_uah;
+	if (chip->cl.final_cc_uah > max_inc_val)
+		chip->cl.learned_cc_uah = max_inc_val;
+	else if (chip->cl.final_cc_uah < min_dec_val)
+		chip->cl.learned_cc_uah = min_dec_val;
+	else
+		chip->cl.learned_cc_uah =
+			chip->cl.final_cc_uah;
+
+	if (chip->dt.cl_max_cap_limit) {
+		max_inc_val = (int64_t)chip->cl.nom_cap_uah * (1000 +
+				chip->dt.cl_max_cap_limit);
+		do_div(max_inc_val, 1000);
+		if (chip->cl.final_cc_uah > max_inc_val) {
+			fg_dbg(chip, FG_CAP_LEARN, "learning capacity %lld goes above max limit %lld\n",
+				chip->cl.final_cc_uah, max_inc_val);
+			chip->cl.learned_cc_uah = max_inc_val;
+		}
+	}
+
+	if (chip->dt.cl_min_cap_limit) {
+		min_dec_val = (int64_t)chip->cl.nom_cap_uah * (1000 -
+				chip->dt.cl_min_cap_limit);
+		do_div(min_dec_val, 1000);
+		if (chip->cl.final_cc_uah < min_dec_val) {
+			fg_dbg(chip, FG_CAP_LEARN, "learning capacity %lld goes below min limit %lld\n",
+				chip->cl.final_cc_uah, min_dec_val);
+			chip->cl.learned_cc_uah = min_dec_val;
+		}
+	}
+
+	rc = fg_save_learned_cap_to_sram(chip);
+	if (rc < 0)
+		pr_err("Error in saving learned_cc_uah, rc=%d\n", rc);
+
+	fg_dbg(chip, FG_CAP_LEARN, "final cc_uah = %lld, learned capacity %lld -> %lld uah\n",
+		chip->cl.final_cc_uah, old_cap, chip->cl.learned_cc_uah);
+}
+
+static int  fg_cap_learning_process_full_data(struct fg_chip *chip)
+{
+	int rc, cc_soc_sw, cc_soc_delta_pct;
+	int64_t delta_cc_uah;
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc_sw);
+	if (rc < 0) {
+		pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
+		return rc;
+	}
+
+	cc_soc_delta_pct = DIV_ROUND_CLOSEST(
+				abs(cc_soc_sw - chip->cl.init_cc_soc_sw) * 100,
+				CC_SOC_30BIT);
+	delta_cc_uah = div64_s64(chip->cl.learned_cc_uah * cc_soc_delta_pct,
+				100);
+	chip->cl.final_cc_uah = chip->cl.init_cc_uah + delta_cc_uah;
+	fg_dbg(chip, FG_CAP_LEARN, "Current cc_soc=%d cc_soc_delta_pct=%d total_cc_uah=%lld\n",
+		cc_soc_sw, cc_soc_delta_pct, chip->cl.final_cc_uah);
+	return 0;
+}
+
+static int fg_cap_learning_begin(struct fg_chip *chip, int batt_soc)
+{
+	int rc, cc_soc_sw;
+
+	if (DIV_ROUND_CLOSEST(batt_soc * 100, FULL_SOC_RAW) >
+		chip->dt.cl_start_soc) {
+		fg_dbg(chip, FG_CAP_LEARN, "Battery SOC %d is high!, not starting\n",
+			batt_soc);
+		return -EINVAL;
+	}
+
+	chip->cl.init_cc_uah = div64_s64(chip->cl.learned_cc_uah * batt_soc,
+					FULL_SOC_RAW);
+	rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc_sw);
+	if (rc < 0) {
+		pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->cl.init_cc_soc_sw = cc_soc_sw;
+	chip->cl.active = true;
+	fg_dbg(chip, FG_CAP_LEARN, "Capacity learning started @ battery SOC %d init_cc_soc_sw:%d\n",
+		batt_soc, chip->cl.init_cc_soc_sw);
+	return 0;
+}
+
+static int fg_cap_learning_done(struct fg_chip *chip)
+{
+	int rc, cc_soc_sw;
+
+	rc = fg_cap_learning_process_full_data(chip);
+	if (rc < 0) {
+		pr_err("Error in processing cap learning full data, rc=%d\n",
+			rc);
+		goto out;
+	}
+
+	/* Write a FULL value to cc_soc_sw */
+	cc_soc_sw = CC_SOC_30BIT;
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_CC_SOC_SW].addr_word,
+		chip->sp[FG_SRAM_CC_SOC_SW].addr_byte, (u8 *)&cc_soc_sw,
+		chip->sp[FG_SRAM_CC_SOC_SW].len, FG_IMA_ATOMIC);
+	if (rc < 0) {
+		pr_err("Error in writing cc_soc_sw, rc=%d\n", rc);
+		goto out;
+	}
+
+	fg_cap_learning_post_process(chip);
+out:
+	return rc;
+}
+
+#define FULL_SOC_RAW	255
+static void fg_cap_learning_update(struct fg_chip *chip)
+{
+	int rc, batt_soc;
+
+	mutex_lock(&chip->cl.lock);
+
+	if (!is_temp_valid_cap_learning(chip) || !chip->cl.learned_cc_uah ||
+		chip->battery_missing) {
+		fg_dbg(chip, FG_CAP_LEARN, "Aborting cap_learning %lld\n",
+			chip->cl.learned_cc_uah);
+		chip->cl.active = false;
+		chip->cl.init_cc_uah = 0;
+		goto out;
+	}
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &batt_soc);
+	if (rc < 0) {
+		pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
+		goto out;
+	}
+
+	/* We need only the most significant byte here */
+	batt_soc = (u32)batt_soc >> 24;
+
+	fg_dbg(chip, FG_CAP_LEARN, "Chg_status: %d cl_active: %d batt_soc: %d\n",
+		chip->charge_status, chip->cl.active, batt_soc);
+
+	/* Initialize the starting point of learning capacity */
+	if (!chip->cl.active) {
+		if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) {
+			rc = fg_cap_learning_begin(chip, batt_soc);
+			chip->cl.active = (rc == 0);
+		}
+
+	} else {
+		if (chip->charge_done) {
+			rc = fg_cap_learning_done(chip);
+			if (rc < 0)
+				pr_err("Error in completing capacity learning, rc=%d\n",
+					rc);
+
+			chip->cl.active = false;
+			chip->cl.init_cc_uah = 0;
+		}
+
+		if (chip->charge_status == POWER_SUPPLY_STATUS_NOT_CHARGING) {
+			fg_dbg(chip, FG_CAP_LEARN, "Capacity learning aborted @ battery SOC %d\n",
+				batt_soc);
+			chip->cl.active = false;
+			chip->cl.init_cc_uah = 0;
+		}
+	}
+
+out:
+	mutex_unlock(&chip->cl.lock);
+}
+
+#define KI_COEFF_MED_DISCHG_DEFAULT	1500
+#define KI_COEFF_HI_DISCHG_DEFAULT	2200
+static int fg_adjust_ki_coeff_dischg(struct fg_chip *chip)
+{
+	int rc, i, msoc;
+	int ki_coeff_med = KI_COEFF_MED_DISCHG_DEFAULT;
+	int ki_coeff_hi = KI_COEFF_HI_DISCHG_DEFAULT;
+	u8 val;
+
+	if (!chip->ki_coeff_dischg_en)
+		return 0;
+
+	rc = fg_get_prop_capacity(chip, &msoc);
+	if (rc < 0) {
+		pr_err("Error in getting capacity, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (chip->charge_status == POWER_SUPPLY_STATUS_DISCHARGING) {
+		for (i = KI_COEFF_SOC_LEVELS - 1; i >= 0; i--) {
+			if (msoc < chip->dt.ki_coeff_soc[i]) {
+				ki_coeff_med = chip->dt.ki_coeff_med_dischg[i];
+				ki_coeff_hi = chip->dt.ki_coeff_hi_dischg[i];
+			}
+		}
+	}
+
+	fg_encode(chip->sp, FG_SRAM_KI_COEFF_MED_DISCHG, ki_coeff_med, &val);
+	rc = fg_sram_write(chip,
+			chip->sp[FG_SRAM_KI_COEFF_MED_DISCHG].addr_word,
+			chip->sp[FG_SRAM_KI_COEFF_MED_DISCHG].addr_byte, &val,
+			chip->sp[FG_SRAM_KI_COEFF_MED_DISCHG].len,
+			FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ki_coeff_med, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_KI_COEFF_HI_DISCHG, ki_coeff_hi, &val);
+	rc = fg_sram_write(chip,
+			chip->sp[FG_SRAM_KI_COEFF_HI_DISCHG].addr_word,
+			chip->sp[FG_SRAM_KI_COEFF_HI_DISCHG].addr_byte, &val,
+			chip->sp[FG_SRAM_KI_COEFF_HI_DISCHG].len,
+			FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ki_coeff_hi, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_dbg(chip, FG_STATUS, "Wrote ki_coeff_med %d ki_coeff_hi %d\n",
+		ki_coeff_med, ki_coeff_hi);
+	return 0;
+}
+
+static int fg_set_recharge_voltage(struct fg_chip *chip, int voltage_mv)
+{
+	u8 buf;
+	int rc;
+
+	if (chip->dt.auto_recharge_soc)
+		return 0;
+
+	/* This configuration is available only for pmicobalt v2.0 and above */
+	if (chip->wa_flags & PMI8998_V1_REV_WA)
+		return 0;
+
+	fg_dbg(chip, FG_STATUS, "Setting recharge voltage to %dmV\n",
+		voltage_mv);
+	fg_encode(chip->sp, FG_SRAM_RECHARGE_VBATT_THR, voltage_mv, &buf);
+	rc = fg_sram_write(chip,
+			chip->sp[FG_SRAM_RECHARGE_VBATT_THR].addr_word,
+			chip->sp[FG_SRAM_RECHARGE_VBATT_THR].addr_byte,
+			&buf, chip->sp[FG_SRAM_RECHARGE_VBATT_THR].len,
+			FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing recharge_vbatt_thr, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+#define AUTO_RECHG_VOLT_LOW_LIMIT_MV	3700
+static int fg_charge_full_update(struct fg_chip *chip)
+{
+	union power_supply_propval prop = {0, };
+	int rc, msoc, bsoc, recharge_soc;
+	u8 full_soc[2] = {0xFF, 0xFF};
+
+	if (!chip->dt.hold_soc_while_full)
+		return 0;
+
+	if (!batt_psy_initialized(chip))
+		return 0;
+
+	mutex_lock(&chip->charge_full_lock);
+	if (!chip->charge_done && chip->bsoc_delta_irq_en) {
+		disable_irq_wake(fg_irqs[BSOC_DELTA_IRQ].irq);
+		disable_irq_nosync(fg_irqs[BSOC_DELTA_IRQ].irq);
+		chip->bsoc_delta_irq_en = false;
+	} else if (chip->charge_done && !chip->bsoc_delta_irq_en) {
+		enable_irq(fg_irqs[BSOC_DELTA_IRQ].irq);
+		enable_irq_wake(fg_irqs[BSOC_DELTA_IRQ].irq);
+		chip->bsoc_delta_irq_en = true;
+	}
+
+	rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH,
+		&prop);
+	if (rc < 0) {
+		pr_err("Error in getting battery health, rc=%d\n", rc);
+		goto out;
+	}
+
+	chip->health = prop.intval;
+	recharge_soc = chip->dt.recharge_soc_thr;
+	recharge_soc = DIV_ROUND_CLOSEST(recharge_soc * FULL_SOC_RAW,
+				FULL_CAPACITY);
+	rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &bsoc);
+	if (rc < 0) {
+		pr_err("Error in getting BATT_SOC, rc=%d\n", rc);
+		goto out;
+	}
+
+	/* We need 2 most significant bytes here */
+	bsoc = (u32)bsoc >> 16;
+	rc = fg_get_msoc(chip, &msoc);
+	if (rc < 0) {
+		pr_err("Error in getting msoc, rc=%d\n", rc);
+		goto out;
+	}
+
+	fg_dbg(chip, FG_STATUS, "msoc: %d bsoc: %x health: %d status: %d full: %d\n",
+		msoc, bsoc, chip->health, chip->charge_status,
+		chip->charge_full);
+	if (chip->charge_done && !chip->charge_full) {
+		if (msoc >= 99 && chip->health == POWER_SUPPLY_HEALTH_GOOD) {
+			fg_dbg(chip, FG_STATUS, "Setting charge_full to true\n");
+			chip->charge_full = true;
+			/*
+			 * Lower the recharge voltage so that VBAT_LT_RECHG
+			 * signal will not be asserted soon.
+			 */
+			rc = fg_set_recharge_voltage(chip,
+					AUTO_RECHG_VOLT_LOW_LIMIT_MV);
+			if (rc < 0) {
+				pr_err("Error in reducing recharge voltage, rc=%d\n",
+					rc);
+				goto out;
+			}
+		} else {
+			fg_dbg(chip, FG_STATUS, "Terminated charging @ SOC%d\n",
+				msoc);
+		}
+	} else if ((bsoc >> 8) <= recharge_soc && chip->charge_full) {
+		chip->delta_soc = FULL_CAPACITY - msoc;
+
+		/*
+		 * We're spreading out the delta SOC over every 10% change
+		 * in monotonic SOC. We cannot spread more than 9% in the
+		 * range of 0-100 skipping the first 10%.
+		 */
+		if (chip->delta_soc > 9) {
+			chip->delta_soc = 0;
+			chip->maint_soc = 0;
+		} else {
+			chip->maint_soc = FULL_CAPACITY;
+			chip->last_msoc = msoc;
+		}
+
+		chip->charge_full = false;
+
+		/*
+		 * Raise the recharge voltage so that VBAT_LT_RECHG signal
+		 * will be asserted soon as battery SOC had dropped below
+		 * the recharge SOC threshold.
+		 */
+		rc = fg_set_recharge_voltage(chip,
+					chip->dt.recharge_volt_thr_mv);
+		if (rc < 0) {
+			pr_err("Error in setting recharge voltage, rc=%d\n",
+				rc);
+			goto out;
+		}
+		fg_dbg(chip, FG_STATUS, "bsoc: %d recharge_soc: %d delta_soc: %d\n",
+			bsoc >> 8, recharge_soc, chip->delta_soc);
+	} else {
+		goto out;
+	}
+
+	if (!chip->charge_full)
+		goto out;
+
+	/*
+	 * During JEITA conditions, charge_full can happen early. FULL_SOC
+	 * and MONOTONIC_SOC needs to be updated to reflect the same. Write
+	 * battery SOC to FULL_SOC and write a full value to MONOTONIC_SOC.
+	 */
+	rc = fg_sram_write(chip, FULL_SOC_WORD, FULL_SOC_OFFSET, (u8 *)&bsoc, 2,
+			FG_IMA_ATOMIC);
+	if (rc < 0) {
+		pr_err("failed to write full_soc rc=%d\n", rc);
+		goto out;
+	}
+
+	rc = fg_sram_write(chip, MONOTONIC_SOC_WORD, MONOTONIC_SOC_OFFSET,
+			full_soc, 2, FG_IMA_ATOMIC);
+	if (rc < 0) {
+		pr_err("failed to write monotonic_soc rc=%d\n", rc);
+		goto out;
+	}
+
+	fg_dbg(chip, FG_STATUS, "Set charge_full to true @ soc %d\n", msoc);
+out:
+	mutex_unlock(&chip->charge_full_lock);
+	return rc;
+}
+
+#define RCONN_CONFIG_BIT	BIT(0)
+static int fg_rconn_config(struct fg_chip *chip)
+{
+	int rc, esr_uohms;
+	u64 scaling_factor;
+	u32 val = 0;
+
+	rc = fg_sram_read(chip, PROFILE_INTEGRITY_WORD,
+			SW_CONFIG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in reading SW_CONFIG_OFFSET, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (val & RCONN_CONFIG_BIT) {
+		fg_dbg(chip, FG_STATUS, "Rconn already configured: %x\n", val);
+		return 0;
+	}
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_ESR, &esr_uohms);
+	if (rc < 0) {
+		pr_err("failed to get ESR, rc=%d\n", rc);
+		return rc;
+	}
+
+	scaling_factor = div64_u64((u64)esr_uohms * 1000,
+				esr_uohms + (chip->dt.rconn_mohms * 1000));
+
+	rc = fg_sram_read(chip, ESR_RSLOW_CHG_WORD,
+			ESR_RSLOW_CHG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in reading ESR_RSLOW_CHG_OFFSET, rc=%d\n", rc);
+		return rc;
+	}
+
+	val *= scaling_factor;
+	do_div(val, 1000);
+	rc = fg_sram_write(chip, ESR_RSLOW_CHG_WORD,
+			ESR_RSLOW_CHG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ESR_RSLOW_CHG_OFFSET, rc=%d\n", rc);
+		return rc;
+	}
+	fg_dbg(chip, FG_STATUS, "esr_rslow_chg modified to %x\n", val & 0xFF);
+
+	rc = fg_sram_read(chip, ESR_RSLOW_DISCHG_WORD,
+			ESR_RSLOW_DISCHG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in reading ESR_RSLOW_DISCHG_OFFSET, rc=%d\n", rc);
+		return rc;
+	}
+
+	val *= scaling_factor;
+	do_div(val, 1000);
+	rc = fg_sram_write(chip, ESR_RSLOW_DISCHG_WORD,
+			ESR_RSLOW_DISCHG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ESR_RSLOW_DISCHG_OFFSET, rc=%d\n", rc);
+		return rc;
+	}
+	fg_dbg(chip, FG_STATUS, "esr_rslow_dischg modified to %x\n",
+		val & 0xFF);
+
+	val = RCONN_CONFIG_BIT;
+	rc = fg_sram_write(chip, PROFILE_INTEGRITY_WORD,
+			SW_CONFIG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing SW_CONFIG_OFFSET, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int fg_set_constant_chg_voltage(struct fg_chip *chip, int volt_uv)
+{
+	u8 buf[2];
+	int rc;
+
+	if (volt_uv <= 0 || volt_uv > 15590000) {
+		pr_err("Invalid voltage %d\n", volt_uv);
+		return -EINVAL;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_VBATT_FULL, volt_uv, buf);
+
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_VBATT_FULL].addr_word,
+		chip->sp[FG_SRAM_VBATT_FULL].addr_byte, buf,
+		chip->sp[FG_SRAM_VBATT_FULL].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing vbatt_full, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int fg_set_recharge_soc(struct fg_chip *chip, int recharge_soc)
+{
+	u8 buf;
+	int rc;
+
+	if (!chip->dt.auto_recharge_soc)
+		return 0;
+
+	fg_encode(chip->sp, FG_SRAM_RECHARGE_SOC_THR, recharge_soc, &buf);
+	rc = fg_sram_write(chip,
+			chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_word,
+			chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_byte, &buf,
+			chip->sp[FG_SRAM_RECHARGE_SOC_THR].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing recharge_soc_thr, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int fg_adjust_recharge_soc(struct fg_chip *chip)
+{
+	int rc, msoc, recharge_soc, new_recharge_soc = 0;
+
+	if (!chip->dt.auto_recharge_soc)
+		return 0;
+
+	recharge_soc = chip->dt.recharge_soc_thr;
+	/*
+	 * If the input is present and charging had been terminated, adjust
+	 * the recharge SOC threshold based on the monotonic SOC at which
+	 * the charge termination had happened.
+	 */
+	if (is_input_present(chip) && !chip->recharge_soc_adjusted
+		&& chip->charge_done) {
+		/* Get raw monotonic SOC for calculation */
+		rc = fg_get_msoc(chip, &msoc);
+		if (rc < 0) {
+			pr_err("Error in getting msoc, rc=%d\n", rc);
+			return rc;
+		}
+
+		/* Adjust the recharge_soc threshold */
+		new_recharge_soc = msoc - (FULL_CAPACITY - recharge_soc);
+	} else if (chip->recharge_soc_adjusted && (!is_input_present(chip)
+				|| chip->health == POWER_SUPPLY_HEALTH_GOOD)) {
+		/* Restore the default value */
+		new_recharge_soc = recharge_soc;
+	}
+
+	if (new_recharge_soc > 0 && new_recharge_soc < FULL_CAPACITY) {
+		rc = fg_set_recharge_soc(chip, new_recharge_soc);
+		if (rc) {
+			pr_err("Couldn't set resume SOC for FG, rc=%d\n", rc);
+			return rc;
+		}
+
+		chip->recharge_soc_adjusted = (new_recharge_soc !=
+						recharge_soc);
+		fg_dbg(chip, FG_STATUS, "resume soc set to %d\n",
+			new_recharge_soc);
+	}
+
+	return 0;
+}
+
+static int fg_slope_limit_config(struct fg_chip *chip, int batt_temp)
+{
+	enum slope_limit_status status;
+	int rc;
+	u8 buf;
+
+	if (!chip->slope_limit_en)
+		return 0;
+
+	if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING ||
+		chip->charge_status == POWER_SUPPLY_STATUS_FULL) {
+		if (batt_temp < chip->dt.slope_limit_temp)
+			status = LOW_TEMP_CHARGE;
+		else
+			status = HIGH_TEMP_CHARGE;
+	} else {
+		if (batt_temp < chip->dt.slope_limit_temp)
+			status = LOW_TEMP_DISCHARGE;
+		else
+			status = HIGH_TEMP_DISCHARGE;
+	}
+
+	if (chip->slope_limit_sts == status)
+		return 0;
+
+	fg_encode(chip->sp, FG_SRAM_SLOPE_LIMIT,
+		chip->dt.slope_limit_coeffs[status], &buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_SLOPE_LIMIT].addr_word,
+			chip->sp[FG_SRAM_SLOPE_LIMIT].addr_byte, &buf,
+			chip->sp[FG_SRAM_SLOPE_LIMIT].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in configuring slope_limit coefficient, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	chip->slope_limit_sts = status;
+	fg_dbg(chip, FG_STATUS, "Slope limit status: %d value: %x\n", status,
+		buf);
+	return 0;
+}
+
+static int fg_esr_filter_config(struct fg_chip *chip, int batt_temp)
+{
+	u8 esr_tight_lt_flt, esr_broad_lt_flt;
+	bool cold_temp = false;
+	int rc;
+
+	/*
+	 * If the battery temperature is lower than -20 C, then skip modifying
+	 * ESR filter.
+	 */
+	if (batt_temp < -210)
+		return 0;
+
+	/*
+	 * If battery temperature is lesser than 10 C (default), then apply the
+	 * ESR low temperature tight and broad filter values to ESR room
+	 * temperature tight and broad filters. If battery temperature is higher
+	 * than 10 C, then apply back the room temperature ESR filter
+	 * coefficients to ESR room temperature tight and broad filters.
+	 */
+	if (batt_temp > chip->dt.esr_flt_switch_temp
+		&& chip->esr_flt_cold_temp_en) {
+		fg_encode(chip->sp, FG_SRAM_ESR_TIGHT_FILTER,
+			chip->dt.esr_tight_flt_upct, &esr_tight_lt_flt);
+		fg_encode(chip->sp, FG_SRAM_ESR_BROAD_FILTER,
+			chip->dt.esr_broad_flt_upct, &esr_broad_lt_flt);
+	} else if (batt_temp <= chip->dt.esr_flt_switch_temp
+			&& !chip->esr_flt_cold_temp_en) {
+		fg_encode(chip->sp, FG_SRAM_ESR_TIGHT_FILTER,
+			chip->dt.esr_tight_lt_flt_upct, &esr_tight_lt_flt);
+		fg_encode(chip->sp, FG_SRAM_ESR_BROAD_FILTER,
+			chip->dt.esr_broad_lt_flt_upct, &esr_broad_lt_flt);
+		cold_temp = true;
+	} else {
+		return 0;
+	}
+
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR_TIGHT_FILTER].addr_word,
+			chip->sp[FG_SRAM_ESR_TIGHT_FILTER].addr_byte,
+			&esr_tight_lt_flt,
+			chip->sp[FG_SRAM_ESR_TIGHT_FILTER].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ESR LT tight filter, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR_BROAD_FILTER].addr_word,
+			chip->sp[FG_SRAM_ESR_BROAD_FILTER].addr_byte,
+			&esr_broad_lt_flt,
+			chip->sp[FG_SRAM_ESR_BROAD_FILTER].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ESR LT broad filter, rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->esr_flt_cold_temp_en = cold_temp;
+	fg_dbg(chip, FG_STATUS, "applied %s ESR filter values\n",
+		cold_temp ? "cold" : "normal");
+	return 0;
+}
+
+static int fg_esr_fcc_config(struct fg_chip *chip)
+{
+	union power_supply_propval prop = {0, };
+	int rc;
+	bool parallel_en = false;
+
+	if (is_parallel_charger_available(chip)) {
+		rc = power_supply_get_property(chip->parallel_psy,
+			POWER_SUPPLY_PROP_CHARGING_ENABLED, &prop);
+		if (rc < 0) {
+			pr_err("Error in reading charging_enabled from parallel_psy, rc=%d\n",
+				rc);
+			return rc;
+		}
+		parallel_en = prop.intval;
+	}
+
+	fg_dbg(chip, FG_POWER_SUPPLY, "charge_status: %d parallel_en: %d esr_fcc_ctrl_en: %d\n",
+		chip->charge_status, parallel_en, chip->esr_fcc_ctrl_en);
+
+	if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
+								parallel_en) {
+		if (chip->esr_fcc_ctrl_en)
+			return 0;
+
+		/*
+		 * When parallel charging is enabled, configure ESR FCC to
+		 * 300mA to trigger an ESR pulse. Without this, FG can ask
+		 * the main charger to increase FCC when it is supposed to
+		 * decrease it.
+		 */
+		rc = fg_masked_write(chip, BATT_INFO_ESR_FAST_CRG_CFG(chip),
+				ESR_FAST_CRG_IVAL_MASK |
+				ESR_FAST_CRG_CTL_EN_BIT,
+				ESR_FCC_300MA | ESR_FAST_CRG_CTL_EN_BIT);
+		if (rc < 0) {
+			pr_err("Error in writing to %04x, rc=%d\n",
+				BATT_INFO_ESR_FAST_CRG_CFG(chip), rc);
+			return rc;
+		}
+
+		chip->esr_fcc_ctrl_en = true;
+	} else {
+		if (!chip->esr_fcc_ctrl_en)
+			return 0;
+
+		/*
+		 * If we're here, then it means either the device is not in
+		 * charging state or parallel charging is disabled. Disable
+		 * ESR fast charge current control in SW.
+		 */
+		rc = fg_masked_write(chip, BATT_INFO_ESR_FAST_CRG_CFG(chip),
+				ESR_FAST_CRG_CTL_EN_BIT, 0);
+		if (rc < 0) {
+			pr_err("Error in writing to %04x, rc=%d\n",
+				BATT_INFO_ESR_FAST_CRG_CFG(chip), rc);
+			return rc;
+		}
+
+		chip->esr_fcc_ctrl_en = false;
+	}
+
+	fg_dbg(chip, FG_STATUS, "esr_fcc_ctrl_en set to %d\n",
+		chip->esr_fcc_ctrl_en);
+	return 0;
+}
+
+static void fg_batt_avg_update(struct fg_chip *chip)
+{
+	if (chip->charge_status == chip->prev_charge_status)
+		return;
+
+	cancel_delayed_work_sync(&chip->batt_avg_work);
+	fg_circ_buf_clr(&chip->ibatt_circ_buf);
+	fg_circ_buf_clr(&chip->vbatt_circ_buf);
+
+	if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING ||
+			chip->charge_status == POWER_SUPPLY_STATUS_DISCHARGING)
+		schedule_delayed_work(&chip->batt_avg_work,
+							msecs_to_jiffies(2000));
+}
+
+static void status_change_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work,
+			struct fg_chip, status_change_work);
+	union power_supply_propval prop = {0, };
+	int rc, batt_temp;
+
+	if (!batt_psy_initialized(chip)) {
+		fg_dbg(chip, FG_STATUS, "Charger not available?!\n");
+		goto out;
+	}
+
+	rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS,
+			&prop);
+	if (rc < 0) {
+		pr_err("Error in getting charging status, rc=%d\n", rc);
+		goto out;
+	}
+
+	chip->prev_charge_status = chip->charge_status;
+	chip->charge_status = prop.intval;
+	rc = power_supply_get_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CHARGE_TYPE, &prop);
+	if (rc < 0) {
+		pr_err("Error in getting charge type, rc=%d\n", rc);
+		goto out;
+	}
+
+	chip->charge_type = prop.intval;
+	rc = power_supply_get_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CHARGE_DONE, &prop);
+	if (rc < 0) {
+		pr_err("Error in getting charge_done, rc=%d\n", rc);
+		goto out;
+	}
+
+	chip->charge_done = prop.intval;
+	if (chip->cyc_ctr.en)
+		schedule_work(&chip->cycle_count_work);
+
+	fg_cap_learning_update(chip);
+
+	rc = fg_charge_full_update(chip);
+	if (rc < 0)
+		pr_err("Error in charge_full_update, rc=%d\n", rc);
+
+	rc = fg_adjust_recharge_soc(chip);
+	if (rc < 0)
+		pr_err("Error in adjusting recharge_soc, rc=%d\n", rc);
+
+	rc = fg_adjust_ki_coeff_dischg(chip);
+	if (rc < 0)
+		pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc);
+
+	rc = fg_esr_fcc_config(chip);
+	if (rc < 0)
+		pr_err("Error in adjusting FCC for ESR, rc=%d\n", rc);
+
+	rc = fg_get_battery_temp(chip, &batt_temp);
+	if (!rc) {
+		rc = fg_slope_limit_config(chip, batt_temp);
+		if (rc < 0)
+			pr_err("Error in configuring slope limiter rc:%d\n",
+				rc);
+	}
+
+	fg_batt_avg_update(chip);
+
+out:
+	fg_dbg(chip, FG_POWER_SUPPLY, "charge_status:%d charge_type:%d charge_done:%d\n",
+		chip->charge_status, chip->charge_type, chip->charge_done);
+	pm_relax(chip->dev);
+}
+
+static void restore_cycle_counter(struct fg_chip *chip)
+{
+	int rc = 0, i;
+	u8 data[2];
+
+	mutex_lock(&chip->cyc_ctr.lock);
+	for (i = 0; i < BUCKET_COUNT; i++) {
+		rc = fg_sram_read(chip, CYCLE_COUNT_WORD + (i / 2),
+				CYCLE_COUNT_OFFSET + (i % 2) * 2, data, 2,
+				FG_IMA_DEFAULT);
+		if (rc < 0)
+			pr_err("failed to read bucket %d rc=%d\n", i, rc);
+		else
+			chip->cyc_ctr.count[i] = data[0] | data[1] << 8;
+	}
+	mutex_unlock(&chip->cyc_ctr.lock);
+}
+
+static void clear_cycle_counter(struct fg_chip *chip)
+{
+	int rc = 0, i;
+
+	if (!chip->cyc_ctr.en)
+		return;
+
+	mutex_lock(&chip->cyc_ctr.lock);
+	memset(chip->cyc_ctr.count, 0, sizeof(chip->cyc_ctr.count));
+	for (i = 0; i < BUCKET_COUNT; i++) {
+		chip->cyc_ctr.started[i] = false;
+		chip->cyc_ctr.last_soc[i] = 0;
+	}
+	rc = fg_sram_write(chip, CYCLE_COUNT_WORD, CYCLE_COUNT_OFFSET,
+			(u8 *)&chip->cyc_ctr.count,
+			sizeof(chip->cyc_ctr.count) / sizeof(u8 *),
+			FG_IMA_DEFAULT);
+	if (rc < 0)
+		pr_err("failed to clear cycle counter rc=%d\n", rc);
+
+	mutex_unlock(&chip->cyc_ctr.lock);
+}
+
+static int fg_inc_store_cycle_ctr(struct fg_chip *chip, int bucket)
+{
+	int rc = 0;
+	u16 cyc_count;
+	u8 data[2];
+
+	if (bucket < 0 || (bucket > BUCKET_COUNT - 1))
+		return 0;
+
+	cyc_count = chip->cyc_ctr.count[bucket];
+	cyc_count++;
+	data[0] = cyc_count & 0xFF;
+	data[1] = cyc_count >> 8;
+
+	rc = fg_sram_write(chip, CYCLE_COUNT_WORD + (bucket / 2),
+			CYCLE_COUNT_OFFSET + (bucket % 2) * 2, data, 2,
+			FG_IMA_DEFAULT);
+	if (rc < 0)
+		pr_err("failed to write BATT_CYCLE[%d] rc=%d\n",
+			bucket, rc);
+	else
+		chip->cyc_ctr.count[bucket] = cyc_count;
+	return rc;
+}
+
+static void cycle_count_work(struct work_struct *work)
+{
+	int rc = 0, bucket, i, batt_soc;
+	struct fg_chip *chip = container_of(work,
+				struct fg_chip,
+				cycle_count_work);
+
+	mutex_lock(&chip->cyc_ctr.lock);
+	rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &batt_soc);
+	if (rc < 0) {
+		pr_err("Failed to read battery soc rc: %d\n", rc);
+		goto out;
+	}
+
+	/* We need only the most significant byte here */
+	batt_soc = (u32)batt_soc >> 24;
+
+	if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) {
+		/* Find out which bucket the SOC falls in */
+		bucket = batt_soc / BUCKET_SOC_PCT;
+		pr_debug("batt_soc: %d bucket: %d\n", batt_soc, bucket);
+
+		/*
+		 * If we've started counting for the previous bucket,
+		 * then store the counter for that bucket if the
+		 * counter for current bucket is getting started.
+		 */
+		if (bucket > 0 && chip->cyc_ctr.started[bucket - 1] &&
+			!chip->cyc_ctr.started[bucket]) {
+			rc = fg_inc_store_cycle_ctr(chip, bucket - 1);
+			if (rc < 0) {
+				pr_err("Error in storing cycle_ctr rc: %d\n",
+					rc);
+				goto out;
+			} else {
+				chip->cyc_ctr.started[bucket - 1] = false;
+				chip->cyc_ctr.last_soc[bucket - 1] = 0;
+			}
+		}
+		if (!chip->cyc_ctr.started[bucket]) {
+			chip->cyc_ctr.started[bucket] = true;
+			chip->cyc_ctr.last_soc[bucket] = batt_soc;
+		}
+	} else {
+		for (i = 0; i < BUCKET_COUNT; i++) {
+			if (chip->cyc_ctr.started[i] &&
+				batt_soc > chip->cyc_ctr.last_soc[i]) {
+				rc = fg_inc_store_cycle_ctr(chip, i);
+				if (rc < 0)
+					pr_err("Error in storing cycle_ctr rc: %d\n",
+						rc);
+				chip->cyc_ctr.last_soc[i] = 0;
+			}
+			chip->cyc_ctr.started[i] = false;
+		}
+	}
+out:
+	mutex_unlock(&chip->cyc_ctr.lock);
+}
+
+static int fg_get_cycle_count(struct fg_chip *chip)
+{
+	int count;
+
+	if (!chip->cyc_ctr.en)
+		return 0;
+
+	if ((chip->cyc_ctr.id <= 0) || (chip->cyc_ctr.id > BUCKET_COUNT))
+		return -EINVAL;
+
+	mutex_lock(&chip->cyc_ctr.lock);
+	count = chip->cyc_ctr.count[chip->cyc_ctr.id - 1];
+	mutex_unlock(&chip->cyc_ctr.lock);
+	return count;
+}
+
+#define PROFILE_LOAD_BIT	BIT(0)
+#define BOOTLOADER_LOAD_BIT	BIT(1)
+#define BOOTLOADER_RESTART_BIT	BIT(2)
+#define HLOS_RESTART_BIT	BIT(3)
+static bool is_profile_load_required(struct fg_chip *chip)
+{
+	u8 buf[PROFILE_COMP_LEN], val;
+	bool profiles_same = false;
+	int rc;
+
+	rc = fg_sram_read(chip, PROFILE_INTEGRITY_WORD,
+			PROFILE_INTEGRITY_OFFSET, &val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("failed to read profile integrity rc=%d\n", rc);
+		return false;
+	}
+
+	/* Check if integrity bit is set */
+	if (val & PROFILE_LOAD_BIT) {
+		fg_dbg(chip, FG_STATUS, "Battery profile integrity bit is set\n");
+		rc = fg_sram_read(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
+				buf, PROFILE_COMP_LEN, FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in reading battery profile, rc:%d\n", rc);
+			return false;
+		}
+		profiles_same = memcmp(chip->batt_profile, buf,
+					PROFILE_COMP_LEN) == 0;
+		if (profiles_same) {
+			fg_dbg(chip, FG_STATUS, "Battery profile is same, not loading it\n");
+			return false;
+		}
+
+		if (!chip->dt.force_load_profile) {
+			pr_warn("Profiles doesn't match, skipping loading it since force_load_profile is disabled\n");
+			if (fg_profile_dump) {
+				pr_info("FG: loaded profile:\n");
+				dump_sram(buf, PROFILE_LOAD_WORD,
+					PROFILE_COMP_LEN);
+				pr_info("FG: available profile:\n");
+				dump_sram(chip->batt_profile, PROFILE_LOAD_WORD,
+					PROFILE_LEN);
+			}
+			return false;
+		}
+
+		fg_dbg(chip, FG_STATUS, "Profiles are different, loading the correct one\n");
+	} else {
+		fg_dbg(chip, FG_STATUS, "Profile integrity bit is not set\n");
+		if (fg_profile_dump) {
+			pr_info("FG: profile to be loaded:\n");
+			dump_sram(chip->batt_profile, PROFILE_LOAD_WORD,
+				PROFILE_LEN);
+		}
+	}
+	return true;
+}
+
+static void clear_battery_profile(struct fg_chip *chip)
+{
+	u8 val = 0;
+	int rc;
+
+	rc = fg_sram_write(chip, PROFILE_INTEGRITY_WORD,
+			PROFILE_INTEGRITY_OFFSET, &val, 1, FG_IMA_DEFAULT);
+	if (rc < 0)
+		pr_err("failed to write profile integrity rc=%d\n", rc);
+}
+
+#define SOC_READY_WAIT_MS		2000
+static int __fg_restart(struct fg_chip *chip)
+{
+	int rc, msoc;
+	bool tried_again = false;
+
+	rc = fg_get_prop_capacity(chip, &msoc);
+	if (rc < 0) {
+		pr_err("Error in getting capacity, rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->last_soc = msoc;
+	chip->fg_restarting = true;
+	reinit_completion(&chip->soc_ready);
+	rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT,
+			RESTART_GO_BIT);
+	if (rc < 0) {
+		pr_err("Error in writing to %04x, rc=%d\n",
+			BATT_SOC_RESTART(chip), rc);
+		goto out;
+	}
+
+wait:
+	rc = wait_for_completion_interruptible_timeout(&chip->soc_ready,
+		msecs_to_jiffies(SOC_READY_WAIT_MS));
+
+	/* If we were interrupted wait again one more time. */
+	if (rc == -ERESTARTSYS && !tried_again) {
+		tried_again = true;
+		goto wait;
+	} else if (rc <= 0) {
+		pr_err("wait for soc_ready timed out rc=%d\n", rc);
+	}
+
+	rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
+	if (rc < 0) {
+		pr_err("Error in writing to %04x, rc=%d\n",
+			BATT_SOC_RESTART(chip), rc);
+		goto out;
+	}
+out:
+	chip->fg_restarting = false;
+	return rc;
+}
+
+static void profile_load_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work,
+				struct fg_chip,
+				profile_load_work.work);
+	u8 buf[2], val;
+	int rc;
+
+	vote(chip->awake_votable, PROFILE_LOAD, true, 0);
+	if (!is_profile_load_required(chip))
+		goto done;
+
+	clear_cycle_counter(chip);
+	mutex_lock(&chip->cl.lock);
+	chip->cl.learned_cc_uah = 0;
+	chip->cl.active = false;
+	mutex_unlock(&chip->cl.lock);
+
+	fg_dbg(chip, FG_STATUS, "profile loading started\n");
+	rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
+	if (rc < 0) {
+		pr_err("Error in writing to %04x, rc=%d\n",
+			BATT_SOC_RESTART(chip), rc);
+		goto out;
+	}
+
+	/* load battery profile */
+	rc = fg_sram_write(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
+			chip->batt_profile, PROFILE_LEN, FG_IMA_ATOMIC);
+	if (rc < 0) {
+		pr_err("Error in writing battery profile, rc:%d\n", rc);
+		goto out;
+	}
+
+	rc = __fg_restart(chip);
+	if (rc < 0) {
+		pr_err("Error in restarting FG, rc=%d\n", rc);
+		goto out;
+	}
+
+	fg_dbg(chip, FG_STATUS, "SOC is ready\n");
+
+	/* Set the profile integrity bit */
+	val = HLOS_RESTART_BIT | PROFILE_LOAD_BIT;
+	rc = fg_sram_write(chip, PROFILE_INTEGRITY_WORD,
+			PROFILE_INTEGRITY_OFFSET, &val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("failed to write profile integrity rc=%d\n", rc);
+		goto out;
+	}
+
+done:
+	rc = fg_sram_read(chip, NOM_CAP_WORD, NOM_CAP_OFFSET, buf, 2,
+			FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in reading %04x[%d] rc=%d\n", NOM_CAP_WORD,
+			NOM_CAP_OFFSET, rc);
+	} else {
+		chip->cl.nom_cap_uah = (int)(buf[0] | buf[1] << 8) * 1000;
+		rc = fg_load_learned_cap_from_sram(chip);
+		if (rc < 0)
+			pr_err("Error in loading capacity learning data, rc:%d\n",
+				rc);
+	}
+
+	batt_psy_initialized(chip);
+	fg_notify_charger(chip);
+	chip->profile_loaded = true;
+	chip->soc_reporting_ready = true;
+	fg_dbg(chip, FG_STATUS, "profile loaded successfully");
+out:
+	vote(chip->awake_votable, PROFILE_LOAD, false, 0);
+}
+
+static void sram_dump_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work, struct fg_chip,
+					    sram_dump_work.work);
+	u8 buf[FG_SRAM_LEN];
+	int rc;
+	s64 timestamp_ms, quotient;
+	s32 remainder;
+
+	rc = fg_sram_read(chip, 0, 0, buf, FG_SRAM_LEN, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in reading FG SRAM, rc:%d\n", rc);
+		goto resched;
+	}
+
+	timestamp_ms = ktime_to_ms(ktime_get_boottime());
+	quotient = div_s64_rem(timestamp_ms, 1000, &remainder);
+	fg_dbg(chip, FG_STATUS, "SRAM Dump Started at %lld.%d\n",
+		quotient, remainder);
+	dump_sram(buf, 0, FG_SRAM_LEN);
+	timestamp_ms = ktime_to_ms(ktime_get_boottime());
+	quotient = div_s64_rem(timestamp_ms, 1000, &remainder);
+	fg_dbg(chip, FG_STATUS, "SRAM Dump done at %lld.%d\n",
+		quotient, remainder);
+resched:
+	schedule_delayed_work(&chip->sram_dump_work,
+			msecs_to_jiffies(fg_sram_dump_period_ms));
+}
+
+static int fg_sram_dump_sysfs(const char *val, const struct kernel_param *kp)
+{
+	int rc;
+	struct power_supply *bms_psy;
+	struct fg_chip *chip;
+	bool old_val = fg_sram_dump;
+
+	rc = param_set_bool(val, kp);
+	if (rc) {
+		pr_err("Unable to set fg_sram_dump: %d\n", rc);
+		return rc;
+	}
+
+	if (fg_sram_dump == old_val)
+		return 0;
+
+	bms_psy = power_supply_get_by_name("bms");
+	if (!bms_psy) {
+		pr_err("bms psy not found\n");
+		return -ENODEV;
+	}
+
+	chip = power_supply_get_drvdata(bms_psy);
+	if (fg_sram_dump)
+		schedule_delayed_work(&chip->sram_dump_work,
+				msecs_to_jiffies(fg_sram_dump_period_ms));
+	else
+		cancel_delayed_work_sync(&chip->sram_dump_work);
+
+	return 0;
+}
+
+static struct kernel_param_ops fg_sram_dump_ops = {
+	.set = fg_sram_dump_sysfs,
+	.get = param_get_bool,
+};
+
+module_param_cb(sram_dump_en, &fg_sram_dump_ops, &fg_sram_dump, 0644);
+
+static int fg_restart_sysfs(const char *val, const struct kernel_param *kp)
+{
+	int rc;
+	struct power_supply *bms_psy;
+	struct fg_chip *chip;
+
+	rc = param_set_int(val, kp);
+	if (rc) {
+		pr_err("Unable to set fg_restart: %d\n", rc);
+		return rc;
+	}
+
+	if (fg_restart != 1) {
+		pr_err("Bad value %d\n", fg_restart);
+		return -EINVAL;
+	}
+
+	bms_psy = power_supply_get_by_name("bms");
+	if (!bms_psy) {
+		pr_err("bms psy not found\n");
+		return 0;
+	}
+
+	chip = power_supply_get_drvdata(bms_psy);
+	rc = __fg_restart(chip);
+	if (rc < 0) {
+		pr_err("Error in restarting FG, rc=%d\n", rc);
+		return rc;
+	}
+
+	pr_info("FG restart done\n");
+	return rc;
+}
+
+static struct kernel_param_ops fg_restart_ops = {
+	.set = fg_restart_sysfs,
+	.get = param_get_int,
+};
+
+module_param_cb(restart, &fg_restart_ops, &fg_restart, 0644);
+
+#define BATT_AVG_POLL_PERIOD_MS	10000
+static void batt_avg_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work, struct fg_chip,
+					    batt_avg_work.work);
+	int rc, ibatt_now, vbatt_now;
+
+	mutex_lock(&chip->batt_avg_lock);
+	rc = fg_get_battery_current(chip, &ibatt_now);
+	if (rc < 0) {
+		pr_err("failed to get battery current, rc=%d\n", rc);
+		goto reschedule;
+	}
+
+	rc = fg_get_battery_voltage(chip, &vbatt_now);
+	if (rc < 0) {
+		pr_err("failed to get battery voltage, rc=%d\n", rc);
+		goto reschedule;
+	}
+
+	fg_circ_buf_add(&chip->ibatt_circ_buf, ibatt_now);
+	fg_circ_buf_add(&chip->vbatt_circ_buf, vbatt_now);
+
+reschedule:
+	mutex_unlock(&chip->batt_avg_lock);
+	schedule_delayed_work(&chip->batt_avg_work,
+			      msecs_to_jiffies(BATT_AVG_POLL_PERIOD_MS));
+}
+
+#define HOURS_TO_SECONDS	3600
+#define OCV_SLOPE_UV		10869
+#define MILLI_UNIT		1000
+#define MICRO_UNIT		1000000
+static int fg_get_time_to_full(struct fg_chip *chip, int *val)
+{
+	int rc, ibatt_avg, vbatt_avg, rbatt, msoc, ocv_cc2cv, full_soc,
+		act_cap_uah;
+	s32 i_cc2cv, soc_cc2cv, ln_val, centi_tau_scale;
+	s64 t_predicted_cc = 0, t_predicted_cv = 0;
+
+	if (chip->bp.float_volt_uv <= 0) {
+		pr_err("battery profile is not loaded\n");
+		return -ENODATA;
+	}
+
+	if (!batt_psy_initialized(chip)) {
+		fg_dbg(chip, FG_TTF, "charger is not available\n");
+		return -ENODATA;
+	}
+
+	rc = fg_get_prop_capacity(chip, &msoc);
+	if (rc < 0) {
+		pr_err("failed to get msoc rc=%d\n", rc);
+		return rc;
+	}
+	fg_dbg(chip, FG_TTF, "msoc=%d\n", msoc);
+
+	if (msoc >= 100) {
+		*val = 0;
+		return 0;
+	}
+
+	mutex_lock(&chip->batt_avg_lock);
+	rc = fg_circ_buf_avg(&chip->ibatt_circ_buf, &ibatt_avg);
+	if (rc < 0) {
+		/* try to get instantaneous current */
+		rc = fg_get_battery_current(chip, &ibatt_avg);
+		if (rc < 0) {
+			mutex_unlock(&chip->batt_avg_lock);
+			pr_err("failed to get battery current, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	rc = fg_circ_buf_avg(&chip->vbatt_circ_buf, &vbatt_avg);
+	if (rc < 0) {
+		/* try to get instantaneous voltage */
+		rc = fg_get_battery_voltage(chip, &vbatt_avg);
+		if (rc < 0) {
+			mutex_unlock(&chip->batt_avg_lock);
+			pr_err("failed to get battery voltage, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	mutex_unlock(&chip->batt_avg_lock);
+	fg_dbg(chip, FG_TTF, "vbatt_avg=%d\n", vbatt_avg);
+
+	/* clamp ibatt_avg to -150mA */
+	if (ibatt_avg > -150000)
+		ibatt_avg = -150000;
+	fg_dbg(chip, FG_TTF, "ibatt_avg=%d\n", ibatt_avg);
+
+	/* reverse polarity to be consistent with unsigned current settings */
+	ibatt_avg = abs(ibatt_avg);
+
+	/* estimated battery current at the CC to CV transition */
+	i_cc2cv = div_s64((s64)ibatt_avg * vbatt_avg, chip->bp.float_volt_uv);
+	fg_dbg(chip, FG_TTF, "i_cc2cv=%d\n", i_cc2cv);
+
+	rc = fg_get_battery_resistance(chip, &rbatt);
+	if (rc < 0) {
+		pr_err("failed to get battery resistance rc=%d\n", rc);
+		return rc;
+	}
+
+	/* clamp rbatt to 50mOhms */
+	if (rbatt < 50000)
+		rbatt = 50000;
+
+	fg_dbg(chip, FG_TTF, "rbatt=%d\n", rbatt);
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_uah);
+	if (rc < 0) {
+		pr_err("failed to get ACT_BATT_CAP rc=%d\n", rc);
+		return rc;
+	}
+	act_cap_uah *= MILLI_UNIT;
+	fg_dbg(chip, FG_TTF, "actual_capacity_uah=%d\n", act_cap_uah);
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_FULL_SOC, &full_soc);
+	if (rc < 0) {
+		pr_err("failed to get full soc rc=%d\n", rc);
+		return rc;
+	}
+	full_soc = DIV_ROUND_CLOSEST(((u16)full_soc >> 8) * FULL_CAPACITY,
+								FULL_SOC_RAW);
+	fg_dbg(chip, FG_TTF, "full_soc=%d\n", full_soc);
+
+	/* if we are already in CV state then we can skip estimating CC */
+	if (chip->charge_type == POWER_SUPPLY_CHARGE_TYPE_TAPER)
+		goto skip_cc_estimate;
+
+	/* if the charger is current limited then use power approximation */
+	if (ibatt_avg > chip->bp.fastchg_curr_ma * MILLI_UNIT - 50000)
+		ocv_cc2cv = div_s64((s64)rbatt * ibatt_avg, MICRO_UNIT);
+	else
+		ocv_cc2cv = div_s64((s64)rbatt * i_cc2cv, MICRO_UNIT);
+	ocv_cc2cv = chip->bp.float_volt_uv - ocv_cc2cv;
+	fg_dbg(chip, FG_TTF, "ocv_cc2cv=%d\n", ocv_cc2cv);
+
+	soc_cc2cv = div_s64(chip->bp.float_volt_uv - ocv_cc2cv, OCV_SLOPE_UV);
+	/* estimated SOC at the CC to CV transition */
+	soc_cc2cv = 100 - soc_cc2cv;
+	fg_dbg(chip, FG_TTF, "soc_cc2cv=%d\n", soc_cc2cv);
+
+	/* the esimated SOC may be lower than the current SOC */
+	if (soc_cc2cv - msoc <= 0)
+		goto skip_cc_estimate;
+
+	t_predicted_cc = div_s64((s64)full_soc * act_cap_uah, 100);
+	t_predicted_cc = div_s64(t_predicted_cc * (soc_cc2cv - msoc), 100);
+	t_predicted_cc *= HOURS_TO_SECONDS;
+	t_predicted_cc = div_s64(t_predicted_cc, (ibatt_avg + i_cc2cv) / 2);
+
+skip_cc_estimate:
+	fg_dbg(chip, FG_TTF, "t_predicted_cc=%lld\n", t_predicted_cc);
+
+	/* CV estimate starts here */
+	if (chip->charge_type >= POWER_SUPPLY_CHARGE_TYPE_TAPER)
+		ln_val = ibatt_avg / (abs(chip->dt.sys_term_curr_ma) + 200);
+	else
+		ln_val = i_cc2cv / (abs(chip->dt.sys_term_curr_ma) + 200);
+
+	if (msoc < 95)
+		centi_tau_scale = 100;
+	else
+		centi_tau_scale = 20 * (100 - msoc);
+
+	fg_dbg(chip, FG_TTF, "ln_in=%d\n", ln_val);
+	rc = fg_lerp(fg_ln_table, ARRAY_SIZE(fg_ln_table), ln_val, &ln_val);
+	fg_dbg(chip, FG_TTF, "ln_out=%d\n", ln_val);
+	t_predicted_cv = div_s64((s64)act_cap_uah * rbatt, MICRO_UNIT);
+	t_predicted_cv = div_s64(t_predicted_cv * centi_tau_scale, 100);
+	t_predicted_cv = div_s64(t_predicted_cv * ln_val, MILLI_UNIT);
+	t_predicted_cv = div_s64(t_predicted_cv * HOURS_TO_SECONDS, MICRO_UNIT);
+	fg_dbg(chip, FG_TTF, "t_predicted_cv=%lld\n", t_predicted_cv);
+	*val = t_predicted_cc + t_predicted_cv;
+	return 0;
+}
+
+#define CENTI_ICORRECT_C0	105
+#define CENTI_ICORRECT_C1	20
+static int fg_get_time_to_empty(struct fg_chip *chip, int *val)
+{
+	int rc, ibatt_avg, msoc, act_cap_uah;
+	s32 divisor;
+	s64 t_predicted;
+
+	rc = fg_circ_buf_avg(&chip->ibatt_circ_buf, &ibatt_avg);
+	if (rc < 0) {
+		/* try to get instantaneous current */
+		rc = fg_get_battery_current(chip, &ibatt_avg);
+		if (rc < 0) {
+			pr_err("failed to get battery current, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	/* clamp ibatt_avg to 150mA */
+	if (ibatt_avg < 150000)
+		ibatt_avg = 150000;
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_uah);
+	if (rc < 0) {
+		pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
+		return rc;
+	}
+	act_cap_uah *= MILLI_UNIT;
+
+	rc = fg_get_prop_capacity(chip, &msoc);
+	if (rc < 0) {
+		pr_err("Error in getting capacity, rc=%d\n", rc);
+		return rc;
+	}
+
+	t_predicted = div_s64((s64)msoc * act_cap_uah, 100);
+	t_predicted *= HOURS_TO_SECONDS;
+	divisor = CENTI_ICORRECT_C0 * 100 + CENTI_ICORRECT_C1 * msoc;
+	divisor = div_s64((s64)divisor * ibatt_avg, 10000);
+	if (divisor > 0)
+		t_predicted = div_s64(t_predicted, divisor);
+
+	*val = t_predicted;
+	return 0;
+}
+
+static int fg_update_maint_soc(struct fg_chip *chip)
+{
+	int rc = 0, msoc;
+
+	mutex_lock(&chip->charge_full_lock);
+	if (chip->delta_soc <= 0)
+		goto out;
+
+	rc = fg_get_msoc(chip, &msoc);
+	if (rc < 0) {
+		pr_err("Error in getting msoc, rc=%d\n", rc);
+		goto out;
+	}
+
+	if (msoc > chip->maint_soc) {
+		/*
+		 * When the monotonic SOC goes above maintenance SOC, we should
+		 * stop showing the maintenance SOC.
+		 */
+		chip->delta_soc = 0;
+		chip->maint_soc = 0;
+	} else if (msoc <= chip->last_msoc) {
+		/* MSOC is decreasing. Decrease maintenance SOC as well */
+		chip->maint_soc -= 1;
+		if (!(msoc % 10)) {
+			/*
+			 * Reduce the maintenance SOC additionally by 1 whenever
+			 * it crosses a SOC multiple of 10.
+			 */
+			chip->maint_soc -= 1;
+			chip->delta_soc -= 1;
+		}
+	}
+
+	fg_dbg(chip, FG_IRQ, "msoc: %d last_msoc: %d maint_soc: %d delta_soc: %d\n",
+		msoc, chip->last_msoc, chip->maint_soc, chip->delta_soc);
+	chip->last_msoc = msoc;
+out:
+	mutex_unlock(&chip->charge_full_lock);
+	return rc;
+}
+
+static int fg_esr_validate(struct fg_chip *chip)
+{
+	int rc, esr_uohms;
+	u8 buf[2];
+
+	if (chip->dt.esr_clamp_mohms <= 0)
+		return 0;
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_ESR, &esr_uohms);
+	if (rc < 0) {
+		pr_err("failed to get ESR, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (esr_uohms >= chip->dt.esr_clamp_mohms * 1000) {
+		pr_debug("ESR %d is > ESR_clamp\n", esr_uohms);
+		return 0;
+	}
+
+	esr_uohms = chip->dt.esr_clamp_mohms * 1000;
+	fg_encode(chip->sp, FG_SRAM_ESR, esr_uohms, buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR].addr_word,
+			chip->sp[FG_SRAM_ESR].addr_byte, buf,
+			chip->sp[FG_SRAM_ESR].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ESR, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_dbg(chip, FG_STATUS, "ESR clamped to %duOhms\n", esr_uohms);
+	return 0;
+}
+
+/* PSY CALLBACKS STAY HERE */
+
+static int fg_psy_get_property(struct power_supply *psy,
+				       enum power_supply_property psp,
+				       union power_supply_propval *pval)
+{
+	struct fg_chip *chip = power_supply_get_drvdata(psy);
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CAPACITY:
+		rc = fg_get_prop_capacity(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		if (chip->battery_missing)
+			pval->intval = 3700000;
+		else
+			rc = fg_get_battery_voltage(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+		rc = fg_get_battery_current(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_TEMP:
+		rc = fg_get_battery_temp(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_RESISTANCE:
+		rc = fg_get_battery_resistance(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+		rc = fg_get_sram_prop(chip, FG_SRAM_OCV, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+		pval->intval = chip->cl.nom_cap_uah;
+		break;
+	case POWER_SUPPLY_PROP_RESISTANCE_ID:
+		pval->intval = chip->batt_id_ohms;
+		break;
+	case POWER_SUPPLY_PROP_BATTERY_TYPE:
+		pval->strval = fg_get_battery_type(chip);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+		pval->intval = chip->bp.float_volt_uv;
+		break;
+	case POWER_SUPPLY_PROP_CYCLE_COUNT:
+		pval->intval = fg_get_cycle_count(chip);
+		break;
+	case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+		pval->intval = chip->cyc_ctr.id;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
+		rc = fg_get_cc_soc(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_NOW:
+		pval->intval = chip->cl.init_cc_uah;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL:
+		pval->intval = chip->cl.learned_cc_uah;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+		rc = fg_get_cc_soc_sw(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
+		rc = fg_get_time_to_full(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
+		rc = fg_get_time_to_empty(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_SOC_REPORTING_READY:
+		pval->intval = chip->soc_reporting_ready;
+		break;
+	case POWER_SUPPLY_PROP_DEBUG_BATTERY:
+		pval->intval = is_debug_batt_id(chip);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+		rc = fg_get_sram_prop(chip, FG_SRAM_VBATT_FULL, &pval->intval);
+		break;
+	default:
+		pr_err("unsupported property %d\n", psp);
+		rc = -EINVAL;
+		break;
+	}
+
+	if (rc < 0)
+		return -ENODATA;
+
+	return 0;
+}
+
+static int fg_psy_set_property(struct power_supply *psy,
+				  enum power_supply_property psp,
+				  const union power_supply_propval *pval)
+{
+	struct fg_chip *chip = power_supply_get_drvdata(psy);
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+		if ((pval->intval > 0) && (pval->intval <= BUCKET_COUNT)) {
+			chip->cyc_ctr.id = pval->intval;
+		} else {
+			pr_err("rejecting invalid cycle_count_id = %d\n",
+				pval->intval);
+			return -EINVAL;
+		}
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+		rc = fg_set_constant_chg_voltage(chip, pval->intval);
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+static int fg_property_is_writeable(struct power_supply *psy,
+						enum power_supply_property psp)
+{
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+		return 1;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static void fg_external_power_changed(struct power_supply *psy)
+{
+	pr_debug("power supply changed\n");
+}
+
+static int fg_notifier_cb(struct notifier_block *nb,
+		unsigned long event, void *data)
+{
+	struct power_supply *psy = data;
+	struct fg_chip *chip = container_of(nb, struct fg_chip, nb);
+
+	if (event != PSY_EVENT_PROP_CHANGED)
+		return NOTIFY_OK;
+
+	if (work_pending(&chip->status_change_work))
+		return NOTIFY_OK;
+
+	if ((strcmp(psy->desc->name, "battery") == 0)
+		|| (strcmp(psy->desc->name, "usb") == 0)) {
+		/*
+		 * We cannot vote for awake votable here as that takes
+		 * a mutex lock and this is executed in an atomic context.
+		 */
+		pm_stay_awake(chip->dev);
+		schedule_work(&chip->status_change_work);
+	}
+
+	return NOTIFY_OK;
+}
+
+static enum power_supply_property fg_psy_props[] = {
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_TEMP,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_VOLTAGE_OCV,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_RESISTANCE_ID,
+	POWER_SUPPLY_PROP_RESISTANCE,
+	POWER_SUPPLY_PROP_BATTERY_TYPE,
+	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+	POWER_SUPPLY_PROP_CYCLE_COUNT,
+	POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
+	POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+	POWER_SUPPLY_PROP_CHARGE_NOW,
+	POWER_SUPPLY_PROP_CHARGE_FULL,
+	POWER_SUPPLY_PROP_CHARGE_COUNTER,
+	POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
+	POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+	POWER_SUPPLY_PROP_SOC_REPORTING_READY,
+	POWER_SUPPLY_PROP_DEBUG_BATTERY,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+};
+
+static const struct power_supply_desc fg_psy_desc = {
+	.name = "bms",
+	.type = POWER_SUPPLY_TYPE_BMS,
+	.properties = fg_psy_props,
+	.num_properties = ARRAY_SIZE(fg_psy_props),
+	.get_property = fg_psy_get_property,
+	.set_property = fg_psy_set_property,
+	.external_power_changed = fg_external_power_changed,
+	.property_is_writeable = fg_property_is_writeable,
+};
+
+/* INIT FUNCTIONS STAY HERE */
+
+static int fg_hw_init(struct fg_chip *chip)
+{
+	int rc;
+	u8 buf[4], val;
+
+	fg_encode(chip->sp, FG_SRAM_CUTOFF_VOLT, chip->dt.cutoff_volt_mv, buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_CUTOFF_VOLT].addr_word,
+			chip->sp[FG_SRAM_CUTOFF_VOLT].addr_byte, buf,
+			chip->sp[FG_SRAM_CUTOFF_VOLT].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing cutoff_volt, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_EMPTY_VOLT, chip->dt.empty_volt_mv, buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_EMPTY_VOLT].addr_word,
+			chip->sp[FG_SRAM_EMPTY_VOLT].addr_byte, buf,
+			chip->sp[FG_SRAM_EMPTY_VOLT].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing empty_volt, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* This SRAM register is only present in v2.0 and above */
+	if (!(chip->wa_flags & PMI8998_V1_REV_WA) &&
+					chip->bp.float_volt_uv > 0) {
+		fg_encode(chip->sp, FG_SRAM_FLOAT_VOLT,
+			chip->bp.float_volt_uv / 1000, buf);
+		rc = fg_sram_write(chip, chip->sp[FG_SRAM_FLOAT_VOLT].addr_word,
+			chip->sp[FG_SRAM_FLOAT_VOLT].addr_byte, buf,
+			chip->sp[FG_SRAM_FLOAT_VOLT].len, FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing float_volt, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (chip->bp.vbatt_full_mv > 0) {
+		rc = fg_set_constant_chg_voltage(chip,
+				chip->bp.vbatt_full_mv * 1000);
+		if (rc < 0)
+			return rc;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_CHG_TERM_CURR, chip->dt.chg_term_curr_ma,
+		buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_CHG_TERM_CURR].addr_word,
+			chip->sp[FG_SRAM_CHG_TERM_CURR].addr_byte, buf,
+			chip->sp[FG_SRAM_CHG_TERM_CURR].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing chg_term_curr, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_SYS_TERM_CURR, chip->dt.sys_term_curr_ma,
+		buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_SYS_TERM_CURR].addr_word,
+			chip->sp[FG_SRAM_SYS_TERM_CURR].addr_byte, buf,
+			chip->sp[FG_SRAM_SYS_TERM_CURR].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing sys_term_curr, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (chip->dt.vbatt_low_thr_mv > 0) {
+		fg_encode(chip->sp, FG_SRAM_VBATT_LOW,
+			chip->dt.vbatt_low_thr_mv, buf);
+		rc = fg_sram_write(chip, chip->sp[FG_SRAM_VBATT_LOW].addr_word,
+				chip->sp[FG_SRAM_VBATT_LOW].addr_byte, buf,
+				chip->sp[FG_SRAM_VBATT_LOW].len,
+				FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing vbatt_low_thr, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (chip->dt.delta_soc_thr > 0 && chip->dt.delta_soc_thr < 100) {
+		fg_encode(chip->sp, FG_SRAM_DELTA_MSOC_THR,
+			chip->dt.delta_soc_thr, buf);
+		rc = fg_sram_write(chip,
+				chip->sp[FG_SRAM_DELTA_MSOC_THR].addr_word,
+				chip->sp[FG_SRAM_DELTA_MSOC_THR].addr_byte,
+				buf, chip->sp[FG_SRAM_DELTA_MSOC_THR].len,
+				FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing delta_msoc_thr, rc=%d\n", rc);
+			return rc;
+		}
+
+		fg_encode(chip->sp, FG_SRAM_DELTA_BSOC_THR,
+			chip->dt.delta_soc_thr, buf);
+		rc = fg_sram_write(chip,
+				chip->sp[FG_SRAM_DELTA_BSOC_THR].addr_word,
+				chip->sp[FG_SRAM_DELTA_BSOC_THR].addr_byte,
+				buf, chip->sp[FG_SRAM_DELTA_BSOC_THR].len,
+				FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing delta_bsoc_thr, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	/*
+	 * configure battery thermal coefficients c1,c2,c3
+	 * if its value is not zero.
+	 */
+	if (chip->dt.batt_therm_coeffs[0] > 0) {
+		rc = fg_write(chip, BATT_INFO_THERM_C1(chip),
+			chip->dt.batt_therm_coeffs, BATT_THERM_NUM_COEFFS);
+		if (rc < 0) {
+			pr_err("Error in writing battery thermal coefficients, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+
+	if (chip->dt.recharge_soc_thr > 0 && chip->dt.recharge_soc_thr < 100) {
+		rc = fg_set_recharge_soc(chip, chip->dt.recharge_soc_thr);
+		if (rc < 0) {
+			pr_err("Error in setting recharge_soc, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (chip->dt.recharge_volt_thr_mv > 0) {
+		rc = fg_set_recharge_voltage(chip,
+			chip->dt.recharge_volt_thr_mv);
+		if (rc < 0) {
+			pr_err("Error in setting recharge_voltage, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	if (chip->dt.rsense_sel >= SRC_SEL_BATFET &&
+			chip->dt.rsense_sel < SRC_SEL_RESERVED) {
+		rc = fg_masked_write(chip, BATT_INFO_IBATT_SENSING_CFG(chip),
+				SOURCE_SELECT_MASK, chip->dt.rsense_sel);
+		if (rc < 0) {
+			pr_err("Error in writing rsense_sel, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	get_temp_setpoint(chip->dt.jeita_thresholds[JEITA_COLD], &val);
+	rc = fg_write(chip, BATT_INFO_JEITA_TOO_COLD(chip), &val, 1);
+	if (rc < 0) {
+		pr_err("Error in writing jeita_cold, rc=%d\n", rc);
+		return rc;
+	}
+
+	get_temp_setpoint(chip->dt.jeita_thresholds[JEITA_COOL], &val);
+	rc = fg_write(chip, BATT_INFO_JEITA_COLD(chip), &val, 1);
+	if (rc < 0) {
+		pr_err("Error in writing jeita_cool, rc=%d\n", rc);
+		return rc;
+	}
+
+	get_temp_setpoint(chip->dt.jeita_thresholds[JEITA_WARM], &val);
+	rc = fg_write(chip, BATT_INFO_JEITA_HOT(chip), &val, 1);
+	if (rc < 0) {
+		pr_err("Error in writing jeita_warm, rc=%d\n", rc);
+		return rc;
+	}
+
+	get_temp_setpoint(chip->dt.jeita_thresholds[JEITA_HOT], &val);
+	rc = fg_write(chip, BATT_INFO_JEITA_TOO_HOT(chip), &val, 1);
+	if (rc < 0) {
+		pr_err("Error in writing jeita_hot, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (chip->dt.esr_timer_charging > 0) {
+		rc = fg_set_esr_timer(chip, chip->dt.esr_timer_charging, true,
+				      FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in setting ESR timer, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (chip->dt.esr_timer_awake > 0) {
+		rc = fg_set_esr_timer(chip, chip->dt.esr_timer_awake, false,
+				      FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in setting ESR timer, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (chip->cyc_ctr.en)
+		restore_cycle_counter(chip);
+
+	if (chip->dt.jeita_hyst_temp >= 0) {
+		val = chip->dt.jeita_hyst_temp << JEITA_TEMP_HYST_SHIFT;
+		rc = fg_masked_write(chip, BATT_INFO_BATT_TEMP_CFG(chip),
+			JEITA_TEMP_HYST_MASK, val);
+		if (rc < 0) {
+			pr_err("Error in writing batt_temp_cfg, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	get_batt_temp_delta(chip->dt.batt_temp_delta, &val);
+	rc = fg_masked_write(chip, BATT_INFO_BATT_TMPR_INTR(chip),
+			CHANGE_THOLD_MASK, val);
+	if (rc < 0) {
+		pr_err("Error in writing batt_temp_delta, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (chip->dt.rconn_mohms > 0) {
+		rc = fg_rconn_config(chip);
+		if (rc < 0) {
+			pr_err("Error in configuring Rconn, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	fg_encode(chip->sp, FG_SRAM_ESR_TIGHT_FILTER,
+		chip->dt.esr_tight_flt_upct, buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR_TIGHT_FILTER].addr_word,
+			chip->sp[FG_SRAM_ESR_TIGHT_FILTER].addr_byte, buf,
+			chip->sp[FG_SRAM_ESR_TIGHT_FILTER].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ESR tight filter, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_ESR_BROAD_FILTER,
+		chip->dt.esr_broad_flt_upct, buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR_BROAD_FILTER].addr_word,
+			chip->sp[FG_SRAM_ESR_BROAD_FILTER].addr_byte, buf,
+			chip->sp[FG_SRAM_ESR_BROAD_FILTER].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ESR broad filter, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int fg_memif_init(struct fg_chip *chip)
+{
+	return fg_ima_init(chip);
+}
+
+/* INTERRUPT HANDLERS STAY HERE */
+
+static irqreturn_t fg_mem_xcp_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+	u8 status;
+	int rc;
+
+	rc = fg_read(chip, MEM_IF_INT_RT_STS(chip), &status, 1);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			MEM_IF_INT_RT_STS(chip), rc);
+		return IRQ_HANDLED;
+	}
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered, status:%d\n", irq, status);
+	if (status & MEM_XCP_BIT) {
+		rc = fg_clear_dma_errors_if_any(chip);
+		if (rc < 0) {
+			pr_err("Error in clearing DMA error, rc=%d\n", rc);
+			return IRQ_HANDLED;
+		}
+
+		mutex_lock(&chip->sram_rw_lock);
+		rc = fg_clear_ima_errors_if_any(chip, true);
+		if (rc < 0 && rc != -EAGAIN)
+			pr_err("Error in checking IMA errors rc:%d\n", rc);
+		mutex_unlock(&chip->sram_rw_lock);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_vbatt_low_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_batt_missing_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+	u8 status;
+	int rc;
+
+	rc = fg_read(chip, BATT_INFO_INT_RT_STS(chip), &status, 1);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			BATT_INFO_INT_RT_STS(chip), rc);
+		return IRQ_HANDLED;
+	}
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered sts:%d\n", irq, status);
+	chip->battery_missing = (status & BT_MISS_BIT);
+
+	if (chip->battery_missing) {
+		chip->profile_available = false;
+		chip->profile_loaded = false;
+		chip->soc_reporting_ready = false;
+		return IRQ_HANDLED;
+	}
+
+	rc = fg_get_batt_id(chip);
+	if (rc < 0) {
+		chip->soc_reporting_ready = true;
+		pr_err("Error in getting battery id, rc:%d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	rc = fg_get_batt_profile(chip);
+	if (rc < 0) {
+		chip->soc_reporting_ready = true;
+		pr_err("Error in getting battery profile, rc:%d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	clear_battery_profile(chip);
+	schedule_delayed_work(&chip->profile_load_work, 0);
+
+	if (chip->fg_psy)
+		power_supply_changed(chip->fg_psy);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_delta_batt_temp_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+	union power_supply_propval prop = {0, };
+	int rc, batt_temp;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	rc = fg_get_battery_temp(chip, &batt_temp);
+	if (rc < 0) {
+		pr_err("Error in getting batt_temp\n");
+		return IRQ_HANDLED;
+	}
+
+	rc = fg_esr_filter_config(chip, batt_temp);
+	if (rc < 0)
+		pr_err("Error in configuring ESR filter rc:%d\n", rc);
+
+	rc = fg_slope_limit_config(chip, batt_temp);
+	if (rc < 0)
+		pr_err("Error in configuring slope limiter rc:%d\n", rc);
+
+	if (!batt_psy_initialized(chip)) {
+		chip->last_batt_temp = batt_temp;
+		return IRQ_HANDLED;
+	}
+
+	power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH,
+		&prop);
+	chip->health = prop.intval;
+
+	if (chip->last_batt_temp != batt_temp) {
+		chip->last_batt_temp = batt_temp;
+		power_supply_changed(chip->batt_psy);
+	}
+
+	if (abs(chip->last_batt_temp - batt_temp) > 30)
+		pr_warn("Battery temperature last:%d current: %d\n",
+			chip->last_batt_temp, batt_temp);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_first_est_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	complete_all(&chip->soc_ready);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_soc_update_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	complete_all(&chip->soc_update);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_delta_bsoc_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+	int rc;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	rc = fg_charge_full_update(chip);
+	if (rc < 0)
+		pr_err("Error in charge_full_update, rc=%d\n", rc);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_delta_msoc_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+	int rc;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	if (chip->cyc_ctr.en)
+		schedule_work(&chip->cycle_count_work);
+
+	if (chip->cl.active)
+		fg_cap_learning_update(chip);
+
+	rc = fg_charge_full_update(chip);
+	if (rc < 0)
+		pr_err("Error in charge_full_update, rc=%d\n", rc);
+
+	rc = fg_adjust_ki_coeff_dischg(chip);
+	if (rc < 0)
+		pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc);
+
+	rc = fg_update_maint_soc(chip);
+	if (rc < 0)
+		pr_err("Error in updating maint_soc, rc=%d\n", rc);
+
+	rc = fg_esr_validate(chip);
+	if (rc < 0)
+		pr_err("Error in validating ESR, rc=%d\n", rc);
+
+	if (batt_psy_initialized(chip))
+		power_supply_changed(chip->batt_psy);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_empty_soc_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	if (batt_psy_initialized(chip))
+		power_supply_changed(chip->batt_psy);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_soc_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_dummy_irq_handler(int irq, void *data)
+{
+	pr_debug("irq %d triggered\n", irq);
+	return IRQ_HANDLED;
+}
+
+static struct fg_irq_info fg_irqs[FG_IRQ_MAX] = {
+	/* BATT_SOC irqs */
+	[MSOC_FULL_IRQ] = {
+		.name		= "msoc-full",
+		.handler	= fg_soc_irq_handler,
+	},
+	[MSOC_HIGH_IRQ] = {
+		.name		= "msoc-high",
+		.handler	= fg_soc_irq_handler,
+		.wakeable	= true,
+	},
+	[MSOC_EMPTY_IRQ] = {
+		.name		= "msoc-empty",
+		.handler	= fg_empty_soc_irq_handler,
+		.wakeable	= true,
+	},
+	[MSOC_LOW_IRQ] = {
+		.name		= "msoc-low",
+		.handler	= fg_soc_irq_handler,
+		.wakeable	= true,
+	},
+	[MSOC_DELTA_IRQ] = {
+		.name		= "msoc-delta",
+		.handler	= fg_delta_msoc_irq_handler,
+		.wakeable	= true,
+	},
+	[BSOC_DELTA_IRQ] = {
+		.name		= "bsoc-delta",
+		.handler	= fg_delta_bsoc_irq_handler,
+		.wakeable	= true,
+	},
+	[SOC_READY_IRQ] = {
+		.name		= "soc-ready",
+		.handler	= fg_first_est_irq_handler,
+		.wakeable	= true,
+	},
+	[SOC_UPDATE_IRQ] = {
+		.name		= "soc-update",
+		.handler	= fg_soc_update_irq_handler,
+	},
+	/* BATT_INFO irqs */
+	[BATT_TEMP_DELTA_IRQ] = {
+		.name		= "batt-temp-delta",
+		.handler	= fg_delta_batt_temp_irq_handler,
+		.wakeable	= true,
+	},
+	[BATT_MISSING_IRQ] = {
+		.name		= "batt-missing",
+		.handler	= fg_batt_missing_irq_handler,
+		.wakeable	= true,
+	},
+	[ESR_DELTA_IRQ] = {
+		.name		= "esr-delta",
+		.handler	= fg_dummy_irq_handler,
+	},
+	[VBATT_LOW_IRQ] = {
+		.name		= "vbatt-low",
+		.handler	= fg_vbatt_low_irq_handler,
+		.wakeable	= true,
+	},
+	[VBATT_PRED_DELTA_IRQ] = {
+		.name		= "vbatt-pred-delta",
+		.handler	= fg_dummy_irq_handler,
+	},
+	/* MEM_IF irqs */
+	[DMA_GRANT_IRQ] = {
+		.name		= "dma-grant",
+		.handler	= fg_dummy_irq_handler,
+	},
+	[MEM_XCP_IRQ] = {
+		.name		= "mem-xcp",
+		.handler	= fg_mem_xcp_irq_handler,
+	},
+	[IMA_RDY_IRQ] = {
+		.name		= "ima-rdy",
+		.handler	= fg_dummy_irq_handler,
+	},
+};
+
+static int fg_get_irq_index_byname(const char *name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(fg_irqs); i++) {
+		if (strcmp(fg_irqs[i].name, name) == 0)
+			return i;
+	}
+
+	pr_err("%s is not in irq list\n", name);
+	return -ENOENT;
+}
+
+static int fg_register_interrupts(struct fg_chip *chip)
+{
+	struct device_node *child, *node = chip->dev->of_node;
+	struct property *prop;
+	const char *name;
+	int rc, irq, irq_index;
+
+	for_each_available_child_of_node(node, child) {
+		of_property_for_each_string(child, "interrupt-names", prop,
+						name) {
+			irq = of_irq_get_byname(child, name);
+			if (irq < 0) {
+				dev_err(chip->dev, "failed to get irq %s irq:%d\n",
+					name, irq);
+				return irq;
+			}
+
+			irq_index = fg_get_irq_index_byname(name);
+			if (irq_index < 0)
+				return irq_index;
+
+			rc = devm_request_threaded_irq(chip->dev, irq, NULL,
+					fg_irqs[irq_index].handler,
+					IRQF_ONESHOT, name, chip);
+			if (rc < 0) {
+				dev_err(chip->dev, "failed to register irq handler for %s rc:%d\n",
+					name, rc);
+				return rc;
+			}
+
+			fg_irqs[irq_index].irq = irq;
+			if (fg_irqs[irq_index].wakeable)
+				enable_irq_wake(fg_irqs[irq_index].irq);
+		}
+	}
+
+	return 0;
+}
+
+static int fg_parse_slope_limit_coefficients(struct fg_chip *chip)
+{
+	struct device_node *node = chip->dev->of_node;
+	int rc, i;
+
+	rc = of_property_read_u32(node, "qcom,slope-limit-temp-threshold",
+			&chip->dt.slope_limit_temp);
+	if (rc < 0)
+		return 0;
+
+	rc = of_property_count_elems_of_size(node, "qcom,slope-limit-coeffs",
+			sizeof(u32));
+	if (rc != SLOPE_LIMIT_NUM_COEFFS)
+		return -EINVAL;
+
+	rc = of_property_read_u32_array(node, "qcom,slope-limit-coeffs",
+			chip->dt.slope_limit_coeffs, SLOPE_LIMIT_NUM_COEFFS);
+	if (rc < 0) {
+		pr_err("Error in reading qcom,slope-limit-coeffs, rc=%d\n", rc);
+		return rc;
+	}
+
+	for (i = 0; i < SLOPE_LIMIT_NUM_COEFFS; i++) {
+		if (chip->dt.slope_limit_coeffs[i] > SLOPE_LIMIT_COEFF_MAX ||
+			chip->dt.slope_limit_coeffs[i] < 0) {
+			pr_err("Incorrect slope limit coefficient\n");
+			return -EINVAL;
+		}
+	}
+
+	chip->slope_limit_en = true;
+	return 0;
+}
+
+static int fg_parse_ki_coefficients(struct fg_chip *chip)
+{
+	struct device_node *node = chip->dev->of_node;
+	int rc, i;
+
+	rc = of_property_count_elems_of_size(node, "qcom,ki-coeff-soc-dischg",
+		sizeof(u32));
+	if (rc != KI_COEFF_SOC_LEVELS)
+		return 0;
+
+	rc = of_property_read_u32_array(node, "qcom,ki-coeff-soc-dischg",
+			chip->dt.ki_coeff_soc, KI_COEFF_SOC_LEVELS);
+	if (rc < 0) {
+		pr_err("Error in reading ki-coeff-soc-dischg, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = of_property_count_elems_of_size(node, "qcom,ki-coeff-med-dischg",
+		sizeof(u32));
+	if (rc != KI_COEFF_SOC_LEVELS)
+		return 0;
+
+	rc = of_property_read_u32_array(node, "qcom,ki-coeff-med-dischg",
+			chip->dt.ki_coeff_med_dischg, KI_COEFF_SOC_LEVELS);
+	if (rc < 0) {
+		pr_err("Error in reading ki-coeff-med-dischg, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = of_property_count_elems_of_size(node, "qcom,ki-coeff-hi-dischg",
+		sizeof(u32));
+	if (rc != KI_COEFF_SOC_LEVELS)
+		return 0;
+
+	rc = of_property_read_u32_array(node, "qcom,ki-coeff-hi-dischg",
+			chip->dt.ki_coeff_hi_dischg, KI_COEFF_SOC_LEVELS);
+	if (rc < 0) {
+		pr_err("Error in reading ki-coeff-hi-dischg, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	for (i = 0; i < KI_COEFF_SOC_LEVELS; i++) {
+		if (chip->dt.ki_coeff_soc[i] < 0 ||
+			chip->dt.ki_coeff_soc[i] > FULL_CAPACITY) {
+			pr_err("Error in ki_coeff_soc_dischg values\n");
+			return -EINVAL;
+		}
+
+		if (chip->dt.ki_coeff_med_dischg[i] < 0 ||
+			chip->dt.ki_coeff_med_dischg[i] > KI_COEFF_MAX) {
+			pr_err("Error in ki_coeff_med_dischg values\n");
+			return -EINVAL;
+		}
+
+		if (chip->dt.ki_coeff_med_dischg[i] < 0 ||
+			chip->dt.ki_coeff_med_dischg[i] > KI_COEFF_MAX) {
+			pr_err("Error in ki_coeff_med_dischg values\n");
+			return -EINVAL;
+		}
+	}
+	chip->ki_coeff_dischg_en = true;
+	return 0;
+}
+
+#define DEFAULT_CUTOFF_VOLT_MV		3200
+#define DEFAULT_EMPTY_VOLT_MV		2800
+#define DEFAULT_RECHARGE_VOLT_MV	4250
+#define DEFAULT_CHG_TERM_CURR_MA	100
+#define DEFAULT_SYS_TERM_CURR_MA	-125
+#define DEFAULT_DELTA_SOC_THR		1
+#define DEFAULT_RECHARGE_SOC_THR	95
+#define DEFAULT_BATT_TEMP_COLD		0
+#define DEFAULT_BATT_TEMP_COOL		5
+#define DEFAULT_BATT_TEMP_WARM		45
+#define DEFAULT_BATT_TEMP_HOT		50
+#define DEFAULT_CL_START_SOC		15
+#define DEFAULT_CL_MIN_TEMP_DECIDEGC	150
+#define DEFAULT_CL_MAX_TEMP_DECIDEGC	450
+#define DEFAULT_CL_MAX_INC_DECIPERC	5
+#define DEFAULT_CL_MAX_DEC_DECIPERC	100
+#define DEFAULT_CL_MIN_LIM_DECIPERC	0
+#define DEFAULT_CL_MAX_LIM_DECIPERC	0
+#define BTEMP_DELTA_LOW			2
+#define BTEMP_DELTA_HIGH		10
+#define DEFAULT_ESR_FLT_TEMP_DECIDEGC	100
+#define DEFAULT_ESR_TIGHT_FLT_UPCT	3907
+#define DEFAULT_ESR_BROAD_FLT_UPCT	99610
+#define DEFAULT_ESR_TIGHT_LT_FLT_UPCT	48829
+#define DEFAULT_ESR_BROAD_LT_FLT_UPCT	148438
+#define DEFAULT_ESR_CLAMP_MOHMS		20
+static int fg_parse_dt(struct fg_chip *chip)
+{
+	struct device_node *child, *revid_node, *node = chip->dev->of_node;
+	u32 base, temp;
+	u8 subtype;
+	int rc;
+
+	if (!node)  {
+		dev_err(chip->dev, "device tree node missing\n");
+		return -ENXIO;
+	}
+
+	revid_node = of_parse_phandle(node, "qcom,pmic-revid", 0);
+	if (!revid_node) {
+		pr_err("Missing qcom,pmic-revid property - driver failed\n");
+		return -EINVAL;
+	}
+
+	chip->pmic_rev_id = get_revid_data(revid_node);
+	if (IS_ERR_OR_NULL(chip->pmic_rev_id)) {
+		pr_err("Unable to get pmic_revid rc=%ld\n",
+			PTR_ERR(chip->pmic_rev_id));
+		/*
+		 * the revid peripheral must be registered, any failure
+		 * here only indicates that the rev-id module has not
+		 * probed yet.
+		 */
+		return -EPROBE_DEFER;
+	}
+
+	pr_debug("PMIC subtype %d Digital major %d\n",
+		chip->pmic_rev_id->pmic_subtype, chip->pmic_rev_id->rev4);
+
+	switch (chip->pmic_rev_id->pmic_subtype) {
+	case PMI8998_SUBTYPE:
+		if (chip->pmic_rev_id->rev4 < PMI8998_V2P0_REV4) {
+			chip->sp = pmi8998_v1_sram_params;
+			chip->alg_flags = pmi8998_v1_alg_flags;
+			chip->wa_flags |= PMI8998_V1_REV_WA;
+		} else if (chip->pmic_rev_id->rev4 == PMI8998_V2P0_REV4) {
+			chip->sp = pmi8998_v2_sram_params;
+			chip->alg_flags = pmi8998_v2_alg_flags;
+		} else {
+			return -EINVAL;
+		}
+		break;
+	case PM660_SUBTYPE:
+		chip->sp = pmi8998_v2_sram_params;
+		chip->alg_flags = pmi8998_v2_alg_flags;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (of_get_available_child_count(node) == 0) {
+		dev_err(chip->dev, "No child nodes specified!\n");
+		return -ENXIO;
+	}
+
+	for_each_available_child_of_node(node, child) {
+		rc = of_property_read_u32(child, "reg", &base);
+		if (rc < 0) {
+			dev_err(chip->dev, "reg not specified in node %s, rc=%d\n",
+				child->full_name, rc);
+			return rc;
+		}
+
+		rc = fg_read(chip, base + PERPH_SUBTYPE_REG, &subtype, 1);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't read subtype for base %d, rc=%d\n",
+				base, rc);
+			return rc;
+		}
+
+		switch (subtype) {
+		case FG_BATT_SOC_PMI8998:
+			chip->batt_soc_base = base;
+			break;
+		case FG_BATT_INFO_PMI8998:
+			chip->batt_info_base = base;
+			break;
+		case FG_MEM_INFO_PMI8998:
+			chip->mem_if_base = base;
+			break;
+		default:
+			dev_err(chip->dev, "Invalid peripheral subtype 0x%x\n",
+				subtype);
+			return -ENXIO;
+		}
+	}
+
+	rc = of_property_read_u32(node, "qcom,rradc-base", &base);
+	if (rc < 0) {
+		dev_err(chip->dev, "rradc-base not specified, rc=%d\n", rc);
+		return rc;
+	}
+	chip->rradc_base = base;
+
+	/* Read all the optional properties below */
+	rc = of_property_read_u32(node, "qcom,fg-cutoff-voltage", &temp);
+	if (rc < 0)
+		chip->dt.cutoff_volt_mv = DEFAULT_CUTOFF_VOLT_MV;
+	else
+		chip->dt.cutoff_volt_mv = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-empty-voltage", &temp);
+	if (rc < 0)
+		chip->dt.empty_volt_mv = DEFAULT_EMPTY_VOLT_MV;
+	else
+		chip->dt.empty_volt_mv = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-vbatt-low-thr", &temp);
+	if (rc < 0)
+		chip->dt.vbatt_low_thr_mv = -EINVAL;
+	else
+		chip->dt.vbatt_low_thr_mv = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-chg-term-current", &temp);
+	if (rc < 0)
+		chip->dt.chg_term_curr_ma = DEFAULT_CHG_TERM_CURR_MA;
+	else
+		chip->dt.chg_term_curr_ma = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-sys-term-current", &temp);
+	if (rc < 0)
+		chip->dt.sys_term_curr_ma = DEFAULT_SYS_TERM_CURR_MA;
+	else
+		chip->dt.sys_term_curr_ma = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-delta-soc-thr", &temp);
+	if (rc < 0)
+		chip->dt.delta_soc_thr = DEFAULT_DELTA_SOC_THR;
+	else
+		chip->dt.delta_soc_thr = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-recharge-soc-thr", &temp);
+	if (rc < 0)
+		chip->dt.recharge_soc_thr = DEFAULT_RECHARGE_SOC_THR;
+	else
+		chip->dt.recharge_soc_thr = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-recharge-voltage", &temp);
+	if (rc < 0)
+		chip->dt.recharge_volt_thr_mv = DEFAULT_RECHARGE_VOLT_MV;
+	else
+		chip->dt.recharge_volt_thr_mv = temp;
+
+	chip->dt.auto_recharge_soc = of_property_read_bool(node,
+					"qcom,fg-auto-recharge-soc");
+
+	rc = of_property_read_u32(node, "qcom,fg-rsense-sel", &temp);
+	if (rc < 0)
+		chip->dt.rsense_sel = SRC_SEL_BATFET_SMB;
+	else
+		chip->dt.rsense_sel = (u8)temp & SOURCE_SELECT_MASK;
+
+	chip->dt.jeita_thresholds[JEITA_COLD] = DEFAULT_BATT_TEMP_COLD;
+	chip->dt.jeita_thresholds[JEITA_COOL] = DEFAULT_BATT_TEMP_COOL;
+	chip->dt.jeita_thresholds[JEITA_WARM] = DEFAULT_BATT_TEMP_WARM;
+	chip->dt.jeita_thresholds[JEITA_HOT] = DEFAULT_BATT_TEMP_HOT;
+	if (of_property_count_elems_of_size(node, "qcom,fg-jeita-thresholds",
+		sizeof(u32)) == NUM_JEITA_LEVELS) {
+		rc = of_property_read_u32_array(node,
+				"qcom,fg-jeita-thresholds",
+				chip->dt.jeita_thresholds, NUM_JEITA_LEVELS);
+		if (rc < 0)
+			pr_warn("Error reading Jeita thresholds, default values will be used rc:%d\n",
+				rc);
+	}
+
+	if (of_property_count_elems_of_size(node,
+		"qcom,battery-thermal-coefficients",
+		sizeof(u8)) == BATT_THERM_NUM_COEFFS) {
+		rc = of_property_read_u8_array(node,
+				"qcom,battery-thermal-coefficients",
+				chip->dt.batt_therm_coeffs,
+				BATT_THERM_NUM_COEFFS);
+		if (rc < 0)
+			pr_warn("Error reading battery thermal coefficients, rc:%d\n",
+				rc);
+	}
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-timer-charging", &temp);
+	if (rc < 0)
+		chip->dt.esr_timer_charging = -EINVAL;
+	else
+		chip->dt.esr_timer_charging = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-timer-awake", &temp);
+	if (rc < 0)
+		chip->dt.esr_timer_awake = -EINVAL;
+	else
+		chip->dt.esr_timer_awake = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-timer-asleep", &temp);
+	if (rc < 0)
+		chip->dt.esr_timer_asleep = -EINVAL;
+	else
+		chip->dt.esr_timer_asleep = temp;
+
+	chip->cyc_ctr.en = of_property_read_bool(node, "qcom,cycle-counter-en");
+	if (chip->cyc_ctr.en)
+		chip->cyc_ctr.id = 1;
+
+	chip->dt.force_load_profile = of_property_read_bool(node,
+					"qcom,fg-force-load-profile");
+
+	rc = of_property_read_u32(node, "qcom,cl-start-capacity", &temp);
+	if (rc < 0)
+		chip->dt.cl_start_soc = DEFAULT_CL_START_SOC;
+	else
+		chip->dt.cl_start_soc = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-min-temp", &temp);
+	if (rc < 0)
+		chip->dt.cl_min_temp = DEFAULT_CL_MIN_TEMP_DECIDEGC;
+	else
+		chip->dt.cl_min_temp = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-max-temp", &temp);
+	if (rc < 0)
+		chip->dt.cl_max_temp = DEFAULT_CL_MAX_TEMP_DECIDEGC;
+	else
+		chip->dt.cl_max_temp = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-max-increment", &temp);
+	if (rc < 0)
+		chip->dt.cl_max_cap_inc = DEFAULT_CL_MAX_INC_DECIPERC;
+	else
+		chip->dt.cl_max_cap_inc = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-max-decrement", &temp);
+	if (rc < 0)
+		chip->dt.cl_max_cap_dec = DEFAULT_CL_MAX_DEC_DECIPERC;
+	else
+		chip->dt.cl_max_cap_dec = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-min-limit", &temp);
+	if (rc < 0)
+		chip->dt.cl_min_cap_limit = DEFAULT_CL_MIN_LIM_DECIPERC;
+	else
+		chip->dt.cl_min_cap_limit = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-max-limit", &temp);
+	if (rc < 0)
+		chip->dt.cl_max_cap_limit = DEFAULT_CL_MAX_LIM_DECIPERC;
+	else
+		chip->dt.cl_max_cap_limit = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-jeita-hyst-temp", &temp);
+	if (rc < 0)
+		chip->dt.jeita_hyst_temp = -EINVAL;
+	else
+		chip->dt.jeita_hyst_temp = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-batt-temp-delta", &temp);
+	if (rc < 0)
+		chip->dt.batt_temp_delta = -EINVAL;
+	else if (temp > BTEMP_DELTA_LOW && temp <= BTEMP_DELTA_HIGH)
+		chip->dt.batt_temp_delta = temp;
+
+	chip->dt.hold_soc_while_full = of_property_read_bool(node,
+					"qcom,hold-soc-while-full");
+
+	rc = fg_parse_ki_coefficients(chip);
+	if (rc < 0)
+		pr_err("Error in parsing Ki coefficients, rc=%d\n", rc);
+
+	rc = of_property_read_u32(node, "qcom,fg-rconn-mohms", &temp);
+	if (rc < 0)
+		chip->dt.rconn_mohms = -EINVAL;
+	else
+		chip->dt.rconn_mohms = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-filter-switch-temp",
+			&temp);
+	if (rc < 0)
+		chip->dt.esr_flt_switch_temp = DEFAULT_ESR_FLT_TEMP_DECIDEGC;
+	else
+		chip->dt.esr_flt_switch_temp = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-tight-filter-micro-pct",
+			&temp);
+	if (rc < 0)
+		chip->dt.esr_tight_flt_upct = DEFAULT_ESR_TIGHT_FLT_UPCT;
+	else
+		chip->dt.esr_tight_flt_upct = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-broad-filter-micro-pct",
+			&temp);
+	if (rc < 0)
+		chip->dt.esr_broad_flt_upct = DEFAULT_ESR_BROAD_FLT_UPCT;
+	else
+		chip->dt.esr_broad_flt_upct = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-tight-lt-filter-micro-pct",
+			&temp);
+	if (rc < 0)
+		chip->dt.esr_tight_lt_flt_upct = DEFAULT_ESR_TIGHT_LT_FLT_UPCT;
+	else
+		chip->dt.esr_tight_lt_flt_upct = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-broad-lt-filter-micro-pct",
+			&temp);
+	if (rc < 0)
+		chip->dt.esr_broad_lt_flt_upct = DEFAULT_ESR_BROAD_LT_FLT_UPCT;
+	else
+		chip->dt.esr_broad_lt_flt_upct = temp;
+
+	rc = fg_parse_slope_limit_coefficients(chip);
+	if (rc < 0)
+		pr_err("Error in parsing slope limit coeffs, rc=%d\n", rc);
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-clamp-mohms", &temp);
+	if (rc < 0)
+		chip->dt.esr_clamp_mohms = DEFAULT_ESR_CLAMP_MOHMS;
+	else
+		chip->dt.esr_clamp_mohms = temp;
+
+	return 0;
+}
+
+static void fg_cleanup(struct fg_chip *chip)
+{
+	power_supply_unreg_notifier(&chip->nb);
+	debugfs_remove_recursive(chip->dfs_root);
+	if (chip->awake_votable)
+		destroy_votable(chip->awake_votable);
+
+	if (chip->batt_id_chan)
+		iio_channel_release(chip->batt_id_chan);
+
+	dev_set_drvdata(chip->dev, NULL);
+}
+
+static int fg_gen3_probe(struct platform_device *pdev)
+{
+	struct fg_chip *chip;
+	struct power_supply_config fg_psy_cfg;
+	int rc, msoc, volt_uv, batt_temp;
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->dev = &pdev->dev;
+	chip->debug_mask = &fg_gen3_debug_mask;
+	chip->irqs = fg_irqs;
+	chip->charge_status = -EINVAL;
+	chip->prev_charge_status = -EINVAL;
+	chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
+	if (!chip->regmap) {
+		dev_err(chip->dev, "Parent regmap is unavailable\n");
+		return -ENXIO;
+	}
+
+	chip->batt_id_chan = iio_channel_get(chip->dev, "rradc_batt_id");
+	if (IS_ERR(chip->batt_id_chan)) {
+		if (PTR_ERR(chip->batt_id_chan) != -EPROBE_DEFER)
+			pr_err("batt_id_chan unavailable %ld\n",
+				PTR_ERR(chip->batt_id_chan));
+		rc = PTR_ERR(chip->batt_id_chan);
+		chip->batt_id_chan = NULL;
+		return rc;
+	}
+
+	chip->awake_votable = create_votable("FG_WS", VOTE_SET_ANY, fg_awake_cb,
+					chip);
+	if (IS_ERR(chip->awake_votable)) {
+		rc = PTR_ERR(chip->awake_votable);
+		return rc;
+	}
+
+	rc = fg_parse_dt(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Error in reading DT parameters, rc:%d\n",
+			rc);
+		goto exit;
+	}
+
+	mutex_init(&chip->bus_lock);
+	mutex_init(&chip->sram_rw_lock);
+	mutex_init(&chip->cyc_ctr.lock);
+	mutex_init(&chip->cl.lock);
+	mutex_init(&chip->batt_avg_lock);
+	mutex_init(&chip->charge_full_lock);
+	init_completion(&chip->soc_update);
+	init_completion(&chip->soc_ready);
+	INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work);
+	INIT_WORK(&chip->status_change_work, status_change_work);
+	INIT_WORK(&chip->cycle_count_work, cycle_count_work);
+	INIT_DELAYED_WORK(&chip->batt_avg_work, batt_avg_work);
+	INIT_DELAYED_WORK(&chip->sram_dump_work, sram_dump_work);
+
+	rc = fg_get_batt_id(chip);
+	if (rc < 0) {
+		pr_err("Error in getting battery id, rc:%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_get_batt_profile(chip);
+	if (rc < 0) {
+		chip->soc_reporting_ready = true;
+		pr_warn("profile for batt_id=%dKOhms not found..using OTP, rc:%d\n",
+			chip->batt_id_ohms / 1000, rc);
+	}
+
+	rc = fg_memif_init(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Error in initializing FG_MEMIF, rc:%d\n",
+			rc);
+		goto exit;
+	}
+
+	rc = fg_hw_init(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Error in initializing FG hardware, rc:%d\n",
+			rc);
+		goto exit;
+	}
+
+	platform_set_drvdata(pdev, chip);
+
+	/* Register the power supply */
+	fg_psy_cfg.drv_data = chip;
+	fg_psy_cfg.of_node = NULL;
+	fg_psy_cfg.supplied_to = NULL;
+	fg_psy_cfg.num_supplicants = 0;
+	chip->fg_psy = devm_power_supply_register(chip->dev, &fg_psy_desc,
+			&fg_psy_cfg);
+	if (IS_ERR(chip->fg_psy)) {
+		pr_err("failed to register fg_psy rc = %ld\n",
+				PTR_ERR(chip->fg_psy));
+		goto exit;
+	}
+
+	chip->nb.notifier_call = fg_notifier_cb;
+	rc = power_supply_reg_notifier(&chip->nb);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		goto exit;
+	}
+
+	rc = fg_register_interrupts(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Error in registering interrupts, rc:%d\n",
+			rc);
+		goto exit;
+	}
+
+	/* Keep SOC_UPDATE irq disabled until we require it */
+	if (fg_irqs[SOC_UPDATE_IRQ].irq)
+		disable_irq_nosync(fg_irqs[SOC_UPDATE_IRQ].irq);
+
+	/* Keep BSOC_DELTA_IRQ irq disabled until we require it */
+	if (fg_irqs[BSOC_DELTA_IRQ].irq) {
+		disable_irq_wake(fg_irqs[BSOC_DELTA_IRQ].irq);
+		disable_irq_nosync(fg_irqs[BSOC_DELTA_IRQ].irq);
+		chip->bsoc_delta_irq_en = false;
+	}
+
+	rc = fg_debugfs_create(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Error in creating debugfs entries, rc:%d\n",
+			rc);
+		goto exit;
+	}
+
+	rc = fg_get_battery_voltage(chip, &volt_uv);
+	if (!rc)
+		rc = fg_get_prop_capacity(chip, &msoc);
+
+	if (!rc)
+		rc = fg_get_battery_temp(chip, &batt_temp);
+
+	if (!rc) {
+		pr_info("battery SOC:%d voltage: %duV temp: %d id: %dKOhms\n",
+			msoc, volt_uv, batt_temp, chip->batt_id_ohms / 1000);
+		rc = fg_esr_filter_config(chip, batt_temp);
+		if (rc < 0)
+			pr_err("Error in configuring ESR filter rc:%d\n", rc);
+	}
+
+	device_init_wakeup(chip->dev, true);
+	if (chip->profile_available)
+		schedule_delayed_work(&chip->profile_load_work, 0);
+
+	pr_debug("FG GEN3 driver probed successfully\n");
+	return 0;
+exit:
+	fg_cleanup(chip);
+	return rc;
+}
+
+static int fg_gen3_suspend(struct device *dev)
+{
+	struct fg_chip *chip = dev_get_drvdata(dev);
+	int rc;
+
+	if (chip->dt.esr_timer_awake > 0 && chip->dt.esr_timer_asleep > 0) {
+		rc = fg_set_esr_timer(chip, chip->dt.esr_timer_asleep, false,
+				      FG_IMA_NO_WLOCK);
+		if (rc < 0) {
+			pr_err("Error in setting ESR timer during suspend, rc=%d\n",
+			       rc);
+			return rc;
+		}
+	}
+
+	cancel_delayed_work_sync(&chip->batt_avg_work);
+	if (fg_sram_dump)
+		cancel_delayed_work_sync(&chip->sram_dump_work);
+	return 0;
+}
+
+static int fg_gen3_resume(struct device *dev)
+{
+	struct fg_chip *chip = dev_get_drvdata(dev);
+	int rc;
+
+	if (chip->dt.esr_timer_awake > 0 && chip->dt.esr_timer_asleep > 0) {
+		rc = fg_set_esr_timer(chip, chip->dt.esr_timer_awake, false,
+				      FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in setting ESR timer during resume, rc=%d\n",
+			       rc);
+			return rc;
+		}
+	}
+
+	fg_circ_buf_clr(&chip->ibatt_circ_buf);
+	fg_circ_buf_clr(&chip->vbatt_circ_buf);
+	schedule_delayed_work(&chip->batt_avg_work, 0);
+	if (fg_sram_dump)
+		schedule_delayed_work(&chip->sram_dump_work,
+				msecs_to_jiffies(fg_sram_dump_period_ms));
+	return 0;
+}
+
+static const struct dev_pm_ops fg_gen3_pm_ops = {
+	.suspend	= fg_gen3_suspend,
+	.resume		= fg_gen3_resume,
+};
+
+static int fg_gen3_remove(struct platform_device *pdev)
+{
+	struct fg_chip *chip = dev_get_drvdata(&pdev->dev);
+
+	fg_cleanup(chip);
+	return 0;
+}
+
+static const struct of_device_id fg_gen3_match_table[] = {
+	{.compatible = FG_GEN3_DEV_NAME},
+	{},
+};
+
+static struct platform_driver fg_gen3_driver = {
+	.driver = {
+		.name = FG_GEN3_DEV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = fg_gen3_match_table,
+		.pm		= &fg_gen3_pm_ops,
+	},
+	.probe		= fg_gen3_probe,
+	.remove		= fg_gen3_remove,
+};
+
+static int __init fg_gen3_init(void)
+{
+	return platform_driver_register(&fg_gen3_driver);
+}
+
+static void __exit fg_gen3_exit(void)
+{
+	return platform_driver_unregister(&fg_gen3_driver);
+}
+
+module_init(fg_gen3_init);
+module_exit(fg_gen3_exit);
+
+MODULE_DESCRIPTION("QPNP Fuel gauge GEN3 driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" FG_GEN3_DEV_NAME);
diff --git a/drivers/power/supply/qcom/qpnp-qnovo.c b/drivers/power/supply/qcom/qpnp-qnovo.c
new file mode 100644
index 0000000..cbfab30
--- /dev/null
+++ b/drivers/power/supply/qcom/qpnp-qnovo.c
@@ -0,0 +1,1456 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/power_supply.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include "pmic-voter.h"
+
+#define QNOVO_REVISION1		0x00
+#define QNOVO_REVISION2		0x01
+#define QNOVO_PERPH_TYPE	0x04
+#define QNOVO_PERPH_SUBTYPE	0x05
+#define QNOVO_PTTIME_STS	0x07
+#define QNOVO_PTRAIN_STS	0x08
+#define QNOVO_ERROR_STS		0x09
+#define QNOVO_ERROR_BIT		BIT(0)
+#define QNOVO_INT_RT_STS	0x10
+#define QNOVO_INT_SET_TYPE	0x11
+#define QNOVO_INT_POLARITY_HIGH	0x12
+#define QNOVO_INT_POLARITY_LOW	0x13
+#define QNOVO_INT_LATCHED_CLR	0x14
+#define QNOVO_INT_EN_SET	0x15
+#define QNOVO_INT_EN_CLR	0x16
+#define QNOVO_INT_LATCHED_STS	0x18
+#define QNOVO_INT_PENDING_STS	0x19
+#define QNOVO_INT_MID_SEL	0x1A
+#define QNOVO_INT_PRIORITY	0x1B
+#define QNOVO_PE_CTRL		0x40
+#define QNOVO_PREST1_CTRL	0x41
+#define QNOVO_PPULS1_LSB_CTRL	0x42
+#define QNOVO_PPULS1_MSB_CTRL	0x43
+#define QNOVO_NREST1_CTRL	0x44
+#define QNOVO_NPULS1_CTRL	0x45
+#define QNOVO_PPCNT_CTRL	0x46
+#define QNOVO_VLIM1_LSB_CTRL	0x47
+#define QNOVO_VLIM1_MSB_CTRL	0x48
+#define QNOVO_PTRAIN_EN		0x49
+#define QNOVO_PTRAIN_EN_BIT	BIT(0)
+#define QNOVO_PE_CTRL2		0x4A
+#define QNOVO_PREST2_LSB_CTRL	0x50
+#define QNOVO_PREST2_MSB_CTRL	0x51
+#define QNOVO_PPULS2_LSB_CTRL	0x52
+#define QNOVO_PPULS2_MSB_CTRL	0x53
+#define QNOVO_NREST2_CTRL	0x54
+#define QNOVO_NPULS2_CTRL	0x55
+#define QNOVO_VLIM2_LSB_CTRL	0x56
+#define QNOVO_VLIM2_MSB_CTRL	0x57
+#define QNOVO_PVOLT1_LSB	0x60
+#define QNOVO_PVOLT1_MSB	0x61
+#define QNOVO_PCUR1_LSB		0x62
+#define QNOVO_PCUR1_MSB		0x63
+#define QNOVO_PVOLT2_LSB	0x70
+#define QNOVO_PVOLT2_MSB	0x71
+#define QNOVO_RVOLT2_LSB	0x72
+#define QNOVO_RVOLT2_MSB	0x73
+#define QNOVO_PCUR2_LSB		0x74
+#define QNOVO_PCUR2_MSB		0x75
+#define QNOVO_SCNT		0x80
+#define QNOVO_VMAX_LSB		0x90
+#define QNOVO_VMAX_MSB		0x91
+#define QNOVO_SNUM		0x92
+
+/* Registers ending in 0 imply external rsense */
+#define QNOVO_IADC_OFFSET_0	0xA0
+#define QNOVO_IADC_OFFSET_1	0xA1
+#define QNOVO_IADC_GAIN_0	0xA2
+#define QNOVO_IADC_GAIN_1	0xA3
+#define QNOVO_VADC_OFFSET	0xA4
+#define QNOVO_VADC_GAIN		0xA5
+#define QNOVO_IADC_GAIN_2	0xA6
+#define QNOVO_SPARE		0xA7
+#define QNOVO_STRM_CTRL		0xA8
+#define QNOVO_IADC_OFFSET_OVR_VAL	0xA9
+#define QNOVO_IADC_OFFSET_OVR		0xAA
+#define QNOVO_DISABLE_CHARGING		0xAB
+
+#define QNOVO_TR_IADC_OFFSET_0	0xF1
+#define QNOVO_TR_IADC_OFFSET_1	0xF2
+
+#define DRV_MAJOR_VERSION	1
+#define DRV_MINOR_VERSION	0
+
+#define IADC_LSB_NA	2441400
+#define VADC_LSB_NA	1220700
+#define GAIN_LSB_FACTOR	976560
+
+#define USER_VOTER		"user_voter"
+#define OK_TO_QNOVO_VOTER	"ok_to_qnovo_voter"
+
+#define QNOVO_VOTER		"qnovo_voter"
+
+struct qnovo_dt_props {
+	bool			external_rsense;
+	struct device_node	*revid_dev_node;
+};
+
+enum {
+	QNOVO_NO_ERR_STS_BIT		= BIT(0),
+};
+
+struct chg_props {
+	bool		charging;
+	bool		usb_online;
+	bool		dc_online;
+};
+
+struct chg_status {
+	bool		ok_to_qnovo;
+};
+
+struct qnovo {
+	int			base;
+	struct mutex		write_lock;
+	struct regmap		*regmap;
+	struct qnovo_dt_props	dt;
+	struct device		*dev;
+	struct votable		*disable_votable;
+	struct class		qnovo_class;
+	struct pmic_revid_data	*pmic_rev_id;
+	u32			wa_flags;
+	s64			external_offset_nA;
+	s64			internal_offset_nA;
+	s64			offset_nV;
+	s64			external_i_gain_mega;
+	s64			internal_i_gain_mega;
+	s64			v_gain_mega;
+	struct notifier_block	nb;
+	struct power_supply	*batt_psy;
+	struct power_supply	*usb_psy;
+	struct power_supply	*dc_psy;
+	struct chg_props	cp;
+	struct chg_status	cs;
+	struct work_struct	status_change_work;
+	int			fv_uV_request;
+	int			fcc_uA_request;
+};
+
+static int debug_mask;
+module_param_named(debug_mask, debug_mask, int, 0600);
+
+#define qnovo_dbg(chip, reason, fmt, ...)				\
+	do {								\
+		if (debug_mask & (reason))				\
+			dev_info(chip->dev, fmt, ##__VA_ARGS__);	\
+		else							\
+			dev_dbg(chip->dev, fmt, ##__VA_ARGS__);		\
+	} while (0)
+
+static bool is_secure(struct qnovo *chip, int addr)
+{
+	/* assume everything above 0x40 is secure */
+	return (bool)(addr >= 0x40);
+}
+
+static int qnovo_read(struct qnovo *chip, u16 addr, u8 *buf, int len)
+{
+	return regmap_bulk_read(chip->regmap, chip->base + addr, buf, len);
+}
+
+static int qnovo_masked_write(struct qnovo *chip, u16 addr, u8 mask, u8 val)
+{
+	int rc = 0;
+
+	mutex_lock(&chip->write_lock);
+	if (is_secure(chip, addr)) {
+		rc = regmap_write(chip->regmap,
+				((chip->base + addr) & ~(0xFF)) | 0xD0, 0xA5);
+		if (rc < 0)
+			goto unlock;
+	}
+
+	rc = regmap_update_bits(chip->regmap, chip->base + addr, mask, val);
+
+unlock:
+	mutex_unlock(&chip->write_lock);
+	return rc;
+}
+
+static int qnovo_write(struct qnovo *chip, u16 addr, u8 *buf, int len)
+{
+	int i, rc = 0;
+	bool is_start_secure, is_end_secure;
+
+	is_start_secure = is_secure(chip, addr);
+	is_end_secure = is_secure(chip, addr + len);
+
+	if (!is_start_secure && !is_end_secure) {
+		mutex_lock(&chip->write_lock);
+		rc = regmap_bulk_write(chip->regmap, chip->base + addr,
+					buf, len);
+		goto unlock;
+	}
+
+	mutex_lock(&chip->write_lock);
+	for (i = addr; i < addr + len; i++) {
+		if (is_secure(chip, i)) {
+			rc = regmap_write(chip->regmap,
+				((chip->base + i) & ~(0xFF)) | 0xD0, 0xA5);
+			if (rc < 0)
+				goto unlock;
+		}
+		rc = regmap_write(chip->regmap, chip->base + i, buf[i - addr]);
+		if (rc < 0)
+			goto unlock;
+	}
+
+unlock:
+	mutex_unlock(&chip->write_lock);
+	return rc;
+}
+
+static bool is_batt_available(struct qnovo *chip)
+{
+	if (!chip->batt_psy)
+		chip->batt_psy = power_supply_get_by_name("battery");
+
+	if (!chip->batt_psy)
+		return false;
+
+	return true;
+}
+
+static int qnovo_batt_psy_update(struct qnovo *chip, bool disable)
+{
+	union power_supply_propval pval = {0};
+	int rc = 0;
+
+	if (!is_batt_available(chip))
+		return -EINVAL;
+
+	if (chip->fv_uV_request != -EINVAL) {
+		pval.intval = disable ? -EINVAL : chip->fv_uV_request;
+		rc = power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_VOLTAGE_QNOVO,
+			&pval);
+		if (rc < 0) {
+			pr_err("Couldn't set prop qnovo_fv rc = %d\n", rc);
+			return -EINVAL;
+		}
+	}
+
+	if (chip->fcc_uA_request != -EINVAL) {
+		pval.intval = disable ? -EINVAL : chip->fcc_uA_request;
+		rc = power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CURRENT_QNOVO,
+			&pval);
+		if (rc < 0) {
+			pr_err("Couldn't set prop qnovo_fcc rc = %d\n", rc);
+			return -EINVAL;
+		}
+	}
+
+	return rc;
+}
+
+static int qnovo_disable_cb(struct votable *votable, void *data, int disable,
+					const char *client)
+{
+	struct qnovo *chip = data;
+	int rc = 0;
+
+	if (disable) {
+		rc = qnovo_batt_psy_update(chip, true);
+		if (rc < 0)
+			return rc;
+	}
+
+	rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
+				 disable ? 0 : QNOVO_PTRAIN_EN_BIT);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't %s pulse train rc=%d\n",
+			disable ? "disable" : "enable", rc);
+		return rc;
+	}
+
+	if (!disable) {
+		rc = qnovo_batt_psy_update(chip, false);
+		if (rc < 0)
+			return rc;
+	}
+
+	return rc;
+}
+
+static int qnovo_parse_dt(struct qnovo *chip)
+{
+	struct device_node *node = chip->dev->of_node;
+	int rc;
+
+	if (!node) {
+		pr_err("device tree node missing\n");
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32(node, "reg", &chip->base);
+	if (rc < 0) {
+		pr_err("Couldn't read base rc = %d\n", rc);
+		return rc;
+	}
+
+	chip->dt.external_rsense = of_property_read_bool(node,
+			"qcom,external-rsense");
+
+	chip->dt.revid_dev_node = of_parse_phandle(node, "qcom,pmic-revid", 0);
+	if (!chip->dt.revid_dev_node) {
+		pr_err("Missing qcom,pmic-revid property - driver failed\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qnovo_check_chg_version(struct qnovo *chip)
+{
+	int rc;
+
+	chip->pmic_rev_id = get_revid_data(chip->dt.revid_dev_node);
+	if (IS_ERR(chip->pmic_rev_id)) {
+		rc = PTR_ERR(chip->pmic_rev_id);
+		if (rc != -EPROBE_DEFER)
+			pr_err("Unable to get pmic_revid rc=%d\n", rc);
+		return rc;
+	}
+
+	if ((chip->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE)
+		   && (chip->pmic_rev_id->rev4 < PMI8998_V2P0_REV4)) {
+		chip->wa_flags |= QNOVO_NO_ERR_STS_BIT;
+	}
+
+	return 0;
+}
+
+enum {
+	VER = 0,
+	OK_TO_QNOVO,
+	ENABLE,
+	FV_REQUEST,
+	FCC_REQUEST,
+	PE_CTRL_REG,
+	PE_CTRL2_REG,
+	PTRAIN_STS_REG,
+	INT_RT_STS_REG,
+	PREST1,
+	PPULS1,
+	NREST1,
+	NPULS1,
+	PPCNT,
+	VLIM1,
+	PVOLT1,
+	PCUR1,
+	PTTIME,
+	PREST2,
+	PPULS2,
+	NREST2,
+	NPULS2,
+	VLIM2,
+	PVOLT2,
+	RVOLT2,
+	PCUR2,
+	SCNT,
+	VMAX,
+	SNUM,
+	VBATT,
+	IBATT,
+	BATTTEMP,
+	BATTSOC,
+};
+
+struct param_info {
+	char	*name;
+	int	start_addr;
+	int	num_regs;
+	int	reg_to_unit_multiplier;
+	int	reg_to_unit_divider;
+	int	reg_to_unit_offset;
+	int	min_val;
+	int	max_val;
+	char	*units_str;
+};
+
+static struct param_info params[] = {
+	[FV_REQUEST] = {
+		.units_str		= "uV",
+	},
+	[FCC_REQUEST] = {
+		.units_str		= "uA",
+	},
+	[PE_CTRL_REG] = {
+		.name			= "CTRL_REG",
+		.start_addr		= QNOVO_PE_CTRL,
+		.num_regs		= 1,
+		.units_str		= "",
+	},
+	[PE_CTRL2_REG] = {
+		.name			= "PE_CTRL2_REG",
+		.start_addr		= QNOVO_PE_CTRL2,
+		.num_regs		= 1,
+		.units_str		= "",
+	},
+	[PTRAIN_STS_REG] = {
+		.name			= "PTRAIN_STS",
+		.start_addr		= QNOVO_PTRAIN_STS,
+		.num_regs		= 1,
+		.units_str		= "",
+	},
+	[INT_RT_STS_REG] = {
+		.name			= "INT_RT_STS",
+		.start_addr		= QNOVO_INT_RT_STS,
+		.num_regs		= 1,
+		.units_str		= "",
+	},
+	[PREST1] = {
+		.name			= "PREST1",
+		.start_addr		= QNOVO_PREST1_CTRL,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 5,
+		.reg_to_unit_divider	= 1,
+		.min_val		= 5,
+		.max_val		= 1275,
+		.units_str		= "mS",
+	},
+	[PPULS1] = {
+		.name			= "PPULS1",
+		.start_addr		= QNOVO_PPULS1_LSB_CTRL,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 1600, /* converts to uC */
+		.reg_to_unit_divider	= 1,
+		.min_val		= 0,
+		.max_val		= 104856000,
+		.units_str		= "uC",
+	},
+	[NREST1] = {
+		.name			= "NREST1",
+		.start_addr		= QNOVO_NREST1_CTRL,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 5,
+		.reg_to_unit_divider	= 1,
+		.min_val		= 5,
+		.max_val		= 1275,
+		.units_str		= "mS",
+	},
+	[NPULS1] = {
+		.name			= "NPULS1",
+		.start_addr		= QNOVO_NPULS1_CTRL,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 5,
+		.reg_to_unit_divider	= 1,
+		.min_val		= 5,
+		.max_val		= 1275,
+		.units_str		= "mS",
+	},
+	[PPCNT] = {
+		.name			= "PPCNT",
+		.start_addr		= QNOVO_PPCNT_CTRL,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 1,
+		.reg_to_unit_divider	= 1,
+		.min_val		= 0,
+		.max_val		= 255,
+		.units_str		= "pulses",
+	},
+	[VLIM1] = {
+		.name			= "VLIM1",
+		.start_addr		= QNOVO_VLIM1_LSB_CTRL,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 610350, /* converts to nV */
+		.reg_to_unit_divider	= 1,
+		.min_val		= 0,
+		.max_val		= 5000000,
+		.units_str		= "uV",
+	},
+	[PVOLT1] = {
+		.name			= "PVOLT1",
+		.start_addr		= QNOVO_PVOLT1_LSB,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 610350, /* converts to nV */
+		.reg_to_unit_divider	= 1,
+		.units_str		= "uV",
+	},
+	[PCUR1] = {
+		.name			= "PCUR1",
+		.start_addr		= QNOVO_PCUR1_LSB,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 1220700, /* converts to nA */
+		.reg_to_unit_divider	= 1,
+		.units_str		= "uA",
+	},
+	[PTTIME] = {
+		.name			= "PTTIME",
+		.start_addr		= QNOVO_PTTIME_STS,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 2,
+		.reg_to_unit_divider	= 1,
+		.min_val		= 5,
+		.max_val		= 1275,
+		.units_str		= "S",
+	},
+	[PREST2] = {
+		.name			= "PREST2",
+		.start_addr		= QNOVO_PREST2_LSB_CTRL,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 5,
+		.reg_to_unit_divider	= 1,
+		.min_val		= 5,
+		.max_val		= 327675,
+		.units_str		= "mS",
+	},
+	[PPULS2] = {
+		.name			= "PPULS2",
+		.start_addr		= QNOVO_PPULS2_LSB_CTRL,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 1600, /* converts to uC */
+		.reg_to_unit_divider	= 1,
+		.min_val		= 0,
+		.max_val		= 104856000,
+		.units_str		= "uC",
+	},
+	[NREST2] = {
+		.name			= "NREST2",
+		.start_addr		= QNOVO_NREST2_CTRL,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 5,
+		.reg_to_unit_divider	= 1,
+		.reg_to_unit_offset	= -5,
+		.min_val		= 5,
+		.max_val		= 1280,
+		.units_str		= "mS",
+	},
+	[NPULS2] = {
+		.name			= "NPULS2",
+		.start_addr		= QNOVO_NPULS2_CTRL,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 5,
+		.reg_to_unit_divider	= 1,
+		.min_val		= 5,
+		.max_val		= 1275,
+		.units_str		= "mS",
+	},
+	[VLIM2] = {
+		.name			= "VLIM1",
+		.start_addr		= QNOVO_VLIM2_LSB_CTRL,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 610350, /* converts to nV */
+		.reg_to_unit_divider	= 1,
+		.min_val		= 0,
+		.max_val		= 5000000,
+		.units_str		= "uV",
+	},
+	[PVOLT2] = {
+		.name			= "PVOLT2",
+		.start_addr		= QNOVO_PVOLT2_LSB,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 610350, /* converts to nV */
+		.reg_to_unit_divider	= 1,
+		.units_str		= "uV",
+	},
+	[RVOLT2] = {
+		.name			= "RVOLT2",
+		.start_addr		= QNOVO_RVOLT2_LSB,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 610350,
+		.reg_to_unit_divider	= 1,
+		.units_str		= "uV",
+	},
+	[PCUR2] = {
+		.name			= "PCUR2",
+		.start_addr		= QNOVO_PCUR2_LSB,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 1220700, /* converts to nA */
+		.reg_to_unit_divider	= 1,
+		.units_str		= "uA",
+	},
+	[SCNT] = {
+		.name			= "SCNT",
+		.start_addr		= QNOVO_SCNT,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 1,
+		.reg_to_unit_divider	= 1,
+		.units_str		= "pulses",
+	},
+	[VMAX] = {
+		.name			= "VMAX",
+		.start_addr		= QNOVO_VMAX_LSB,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 814000, /* converts to nV */
+		.reg_to_unit_divider	= 1,
+		.units_str		= "uV",
+	},
+	[SNUM] = {
+		.name			= "SNUM",
+		.start_addr		= QNOVO_SNUM,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 1,
+		.reg_to_unit_divider	= 1,
+		.units_str		= "pulses",
+	},
+	[VBATT]	= {
+		.name			= "POWER_SUPPLY_PROP_VOLTAGE_NOW",
+		.start_addr		= POWER_SUPPLY_PROP_VOLTAGE_NOW,
+		.units_str		= "uV",
+	},
+	[IBATT]	= {
+		.name			= "POWER_SUPPLY_PROP_CURRENT_NOW",
+		.start_addr		= POWER_SUPPLY_PROP_CURRENT_NOW,
+		.units_str		= "uA",
+	},
+	[BATTTEMP] = {
+		.name			= "POWER_SUPPLY_PROP_TEMP",
+		.start_addr		= POWER_SUPPLY_PROP_TEMP,
+		.units_str		= "uV",
+	},
+	[BATTSOC] = {
+		.name			= "POWER_SUPPLY_PROP_CAPACITY",
+		.start_addr		= POWER_SUPPLY_PROP_CAPACITY,
+		.units_str		= "%",
+	},
+};
+
+static struct class_attribute qnovo_attributes[];
+
+static ssize_t version_show(struct class *c, struct class_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d.%d\n",
+			DRV_MAJOR_VERSION, DRV_MINOR_VERSION);
+}
+
+static ssize_t ok_to_qnovo_show(struct class *c, struct class_attribute *attr,
+			char *buf)
+{
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", chip->cs.ok_to_qnovo);
+}
+
+static ssize_t enable_show(struct class *c, struct class_attribute *attr,
+			char *ubuf)
+{
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	int val;
+
+	val = get_client_vote(chip->disable_votable, USER_VOTER);
+	val = !val;
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t enable_store(struct class *c, struct class_attribute *attr,
+			const char *ubuf, size_t count)
+{
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	unsigned long val;
+	bool disable;
+
+	if (kstrtoul(ubuf, 10, &val))
+		return -EINVAL;
+
+	disable = !val;
+
+	vote(chip->disable_votable, USER_VOTER, disable, 0);
+	return count;
+}
+
+static ssize_t val_show(struct class *c, struct class_attribute *attr,
+			char *ubuf)
+{
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	int i = attr - qnovo_attributes;
+	int val = 0;
+
+	if (i == FV_REQUEST)
+		val = chip->fv_uV_request;
+
+	if (i == FCC_REQUEST)
+		val = chip->fcc_uA_request;
+
+	return snprintf(ubuf, PAGE_SIZE, "%d%s\n", val, params[i].units_str);
+}
+
+static ssize_t val_store(struct class *c, struct class_attribute *attr,
+			const char *ubuf, size_t count)
+{
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	int i = attr - qnovo_attributes;
+	unsigned long val;
+
+	if (kstrtoul(ubuf, 10, &val))
+		return -EINVAL;
+
+	if (i == FV_REQUEST)
+		chip->fv_uV_request = val;
+
+	if (i == FCC_REQUEST)
+		chip->fcc_uA_request = val;
+
+	return count;
+}
+
+static ssize_t reg_show(struct class *c, struct class_attribute *attr,
+			char *ubuf)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	u16 regval;
+	int rc;
+
+	rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+	regval = buf[1] << 8 | buf[0];
+
+	return snprintf(ubuf, PAGE_SIZE, "0x%04x%s\n",
+			regval, params[i].units_str);
+}
+
+static ssize_t reg_store(struct class *c, struct class_attribute *attr,
+			const char *ubuf, size_t count)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	unsigned long val;
+	int rc;
+
+	if (kstrtoul(ubuf, 16, &val))
+		return -EINVAL;
+
+	buf[0] = val & 0xFF;
+	buf[1] = (val >> 8) & 0xFF;
+
+	rc = qnovo_write(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't write %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+	return count;
+}
+
+static ssize_t time_show(struct class *c, struct class_attribute *attr,
+		char *ubuf)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	u16 regval;
+	int val;
+	int rc;
+
+	rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+	regval = buf[1] << 8 | buf[0];
+
+	val = ((regval * params[i].reg_to_unit_multiplier)
+			/ params[i].reg_to_unit_divider)
+		- params[i].reg_to_unit_offset;
+
+	return snprintf(ubuf, PAGE_SIZE, "%d%s\n", val, params[i].units_str);
+}
+
+static ssize_t time_store(struct class *c, struct class_attribute *attr,
+		       const char *ubuf, size_t count)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	u16 regval;
+	unsigned long val;
+	int rc;
+
+	if (kstrtoul(ubuf, 10, &val))
+		return -EINVAL;
+
+	if (val < params[i].min_val || val > params[i].max_val) {
+		pr_err("Out of Range %d%s for %s\n", (int)val,
+				params[i].units_str,
+				params[i].name);
+		return -ERANGE;
+	}
+
+	regval = (((int)val + params[i].reg_to_unit_offset)
+			* params[i].reg_to_unit_divider)
+		/ params[i].reg_to_unit_multiplier;
+	buf[0] = regval & 0xFF;
+	buf[1] = (regval >> 8) & 0xFF;
+
+	rc = qnovo_write(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't write %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t current_show(struct class *c, struct class_attribute *attr,
+				char *ubuf)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	int rc;
+	int comp_val_uA;
+	s64 regval_nA;
+	s64 gain, offset_nA, comp_val_nA;
+
+	rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+	regval_nA = buf[1] << 8 | buf[0];
+	regval_nA = div_s64(regval_nA * params[i].reg_to_unit_multiplier,
+					params[i].reg_to_unit_divider)
+			- params[i].reg_to_unit_offset;
+
+	if (chip->dt.external_rsense) {
+		offset_nA = chip->external_offset_nA;
+		gain = chip->external_i_gain_mega;
+	} else {
+		offset_nA = chip->internal_offset_nA;
+		gain = chip->internal_i_gain_mega;
+	}
+
+	comp_val_nA = div_s64(regval_nA * gain, 1000000) + offset_nA;
+	comp_val_uA = div_s64(comp_val_nA, 1000);
+
+	return snprintf(ubuf, PAGE_SIZE, "%d%s\n",
+			comp_val_uA, params[i].units_str);
+}
+
+static ssize_t voltage_show(struct class *c, struct class_attribute *attr,
+				char *ubuf)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	int rc;
+	int comp_val_uV;
+	s64 regval_nV;
+	s64 gain, offset_nV, comp_val_nV;
+
+	rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+	regval_nV = buf[1] << 8 | buf[0];
+	regval_nV = div_s64(regval_nV * params[i].reg_to_unit_multiplier,
+					params[i].reg_to_unit_divider)
+			- params[i].reg_to_unit_offset;
+
+	offset_nV = chip->offset_nV;
+	gain = chip->v_gain_mega;
+
+	comp_val_nV = div_s64(regval_nV * gain, 1000000) + offset_nV;
+	comp_val_uV = div_s64(comp_val_nV, 1000);
+
+	return snprintf(ubuf, PAGE_SIZE, "%d%s\n",
+				comp_val_uV, params[i].units_str);
+}
+
+static ssize_t voltage_store(struct class *c, struct class_attribute *attr,
+		       const char *ubuf, size_t count)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	int rc;
+	unsigned long val_uV;
+	s64 regval_nV;
+	s64 gain, offset_nV;
+
+	if (kstrtoul(ubuf, 10, &val_uV))
+		return -EINVAL;
+
+	if (val_uV < params[i].min_val || val_uV > params[i].max_val) {
+		pr_err("Out of Range %d%s for %s\n", (int)val_uV,
+				params[i].units_str,
+				params[i].name);
+		return -ERANGE;
+	}
+
+	offset_nV = chip->offset_nV;
+	gain = chip->v_gain_mega;
+
+	regval_nV = (s64)val_uV * 1000 - offset_nV;
+	regval_nV = div_s64(regval_nV * 1000000, gain);
+
+	regval_nV = div_s64((regval_nV + params[i].reg_to_unit_offset)
+			* params[i].reg_to_unit_divider,
+			params[i].reg_to_unit_multiplier);
+	buf[0] = regval_nV & 0xFF;
+	buf[1] = ((u64)regval_nV >> 8) & 0xFF;
+
+	rc = qnovo_write(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't write %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t coulomb_show(struct class *c, struct class_attribute *attr,
+				char *ubuf)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	int rc;
+	int comp_val_uC;
+	s64 regval_uC, gain;
+
+	rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+	regval_uC = buf[1] << 8 | buf[0];
+	regval_uC = div_s64(regval_uC * params[i].reg_to_unit_multiplier,
+					params[i].reg_to_unit_divider)
+			- params[i].reg_to_unit_offset;
+
+	if (chip->dt.external_rsense)
+		gain = chip->external_i_gain_mega;
+	else
+		gain = chip->internal_i_gain_mega;
+
+	comp_val_uC = div_s64(regval_uC * gain, 1000000);
+	return snprintf(ubuf, PAGE_SIZE, "%d%s\n",
+			comp_val_uC, params[i].units_str);
+}
+
+static ssize_t coulomb_store(struct class *c, struct class_attribute *attr,
+		       const char *ubuf, size_t count)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	int rc;
+	unsigned long val_uC;
+	s64 regval;
+	s64 gain;
+
+	if (kstrtoul(ubuf, 10, &val_uC))
+		return -EINVAL;
+
+	if (val_uC < params[i].min_val || val_uC > params[i].max_val) {
+		pr_err("Out of Range %d%s for %s\n", (int)val_uC,
+				params[i].units_str,
+				params[i].name);
+		return -ERANGE;
+	}
+
+	if (chip->dt.external_rsense)
+		gain = chip->external_i_gain_mega;
+	else
+		gain = chip->internal_i_gain_mega;
+
+	regval = div_s64((s64)val_uC * 1000000, gain);
+
+	regval = div_s64((regval + params[i].reg_to_unit_offset)
+			* params[i].reg_to_unit_divider,
+			params[i].reg_to_unit_multiplier);
+
+	buf[0] = regval & 0xFF;
+	buf[1] = ((u64)regval >> 8) & 0xFF;
+
+	rc = qnovo_write(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't write %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t batt_prop_show(struct class *c, struct class_attribute *attr,
+				char *ubuf)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	int rc = -EINVAL;
+	int prop = params[i].start_addr;
+	union power_supply_propval pval = {0};
+
+	if (!is_batt_available(chip))
+		return -EINVAL;
+
+	rc = power_supply_get_property(chip->batt_psy, prop, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't read battery prop %s rc = %d\n",
+				params[i].name, rc);
+		return -EINVAL;
+	}
+
+	return snprintf(ubuf, PAGE_SIZE, "%d%s\n",
+			pval.intval, params[i].units_str);
+}
+
+static struct class_attribute qnovo_attributes[] = {
+	[VER]			= __ATTR_RO(version),
+	[OK_TO_QNOVO]		= __ATTR_RO(ok_to_qnovo),
+	[ENABLE]		= __ATTR(enable, 0644,
+					enable_show, enable_store),
+	[FV_REQUEST]		= __ATTR(fv_uV_request, 0644,
+					val_show, val_store),
+	[FCC_REQUEST]		= __ATTR(fcc_uA_request, 0644,
+					val_show, val_store),
+	[PE_CTRL_REG]		= __ATTR(PE_CTRL_REG, 0644,
+					reg_show, reg_store),
+	[PE_CTRL2_REG]		= __ATTR(PE_CTRL2_REG, 0644,
+					reg_show, reg_store),
+	[PTRAIN_STS_REG]	= __ATTR(PTRAIN_STS_REG, 0644,
+					reg_show, reg_store),
+	[INT_RT_STS_REG]	= __ATTR(INT_RT_STS_REG, 0644,
+					reg_show, reg_store),
+	[PREST1]		= __ATTR(PREST1_mS, 0644,
+					time_show, time_store),
+	[PPULS1]		= __ATTR(PPULS1_uC, 0644,
+					coulomb_show, coulomb_store),
+	[NREST1]		= __ATTR(NREST1_mS, 0644,
+					time_show, time_store),
+	[NPULS1]		= __ATTR(NPULS1_mS, 0644,
+					time_show, time_store),
+	[PPCNT]			= __ATTR(PPCNT, 0644,
+					time_show, time_store),
+	[VLIM1]			= __ATTR(VLIM1_uV, 0644,
+					voltage_show, voltage_store),
+	[PVOLT1]		= __ATTR(PVOLT1_uV, 0444,
+					voltage_show, NULL),
+	[PCUR1]			= __ATTR(PCUR1_uA, 0444,
+					current_show, NULL),
+	[PTTIME]		= __ATTR(PTTIME_S, 0444,
+					time_show, NULL),
+	[PREST2]		= __ATTR(PREST2_mS, 0644,
+					time_show, time_store),
+	[PPULS2]		= __ATTR(PPULS2_mS, 0644,
+					coulomb_show, coulomb_store),
+	[NREST2]		= __ATTR(NREST2_mS, 0644,
+					time_show, time_store),
+	[NPULS2]		= __ATTR(NPULS2_mS, 0644,
+					time_show, time_store),
+	[VLIM2]			= __ATTR(VLIM2_uV, 0644,
+					voltage_show, voltage_store),
+	[PVOLT2]		= __ATTR(PVOLT2_uV, 0444,
+					voltage_show, NULL),
+	[RVOLT2]		= __ATTR(RVOLT2_uV, 0444,
+					voltage_show, NULL),
+	[PCUR2]			= __ATTR(PCUR2_uA, 0444,
+					current_show, NULL),
+	[SCNT]			= __ATTR(SCNT, 0644,
+					time_show, time_store),
+	[VMAX]			= __ATTR(VMAX_uV, 0444,
+					voltage_show, NULL),
+	[SNUM]			= __ATTR(SNUM, 0644,
+					time_show, time_store),
+	[VBATT]			= __ATTR(VBATT_uV, 0444,
+					batt_prop_show, NULL),
+	[IBATT]			= __ATTR(IBATT_uA, 0444,
+					batt_prop_show, NULL),
+	[BATTTEMP]		= __ATTR(BATTTEMP_deciDegC, 0444,
+					batt_prop_show, NULL),
+	[BATTSOC]		= __ATTR(BATTSOC, 0444,
+					batt_prop_show, NULL),
+	__ATTR_NULL,
+};
+
+static void get_chg_props(struct qnovo *chip, struct chg_props *cp)
+{
+	union power_supply_propval pval;
+	u8 val = 0;
+	int rc;
+
+	cp->charging = true;
+	rc = qnovo_read(chip, QNOVO_ERROR_STS, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read error sts rc = %d\n", rc);
+		cp->charging = false;
+	} else {
+		cp->charging = (!(val & QNOVO_ERROR_BIT));
+	}
+
+	if (chip->wa_flags & QNOVO_NO_ERR_STS_BIT) {
+		/*
+		 * on v1.0 and v1.1 pmic's force charging to true
+		 * if things are not good to charge s/w gets a PTRAIN_DONE
+		 * interrupt
+		 */
+		cp->charging = true;
+	}
+
+	cp->usb_online = false;
+	if (!chip->usb_psy)
+		chip->usb_psy = power_supply_get_by_name("usb");
+	if (chip->usb_psy) {
+		rc = power_supply_get_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_ONLINE, &pval);
+		if (rc < 0)
+			pr_err("Couldn't read usb online rc = %d\n", rc);
+		else
+			cp->usb_online = (bool)pval.intval;
+	}
+
+	cp->dc_online = false;
+	if (!chip->dc_psy)
+		chip->dc_psy = power_supply_get_by_name("dc");
+	if (chip->dc_psy) {
+		rc = power_supply_get_property(chip->dc_psy,
+				POWER_SUPPLY_PROP_ONLINE, &pval);
+		if (rc < 0)
+			pr_err("Couldn't read dc online rc = %d\n", rc);
+		else
+			cp->dc_online = (bool)pval.intval;
+	}
+}
+
+static void get_chg_status(struct qnovo *chip, const struct chg_props *cp,
+				struct chg_status *cs)
+{
+	cs->ok_to_qnovo = false;
+
+	if (cp->charging &&
+		(cp->usb_online || cp->dc_online))
+		cs->ok_to_qnovo = true;
+}
+
+static void status_change_work(struct work_struct *work)
+{
+	struct qnovo *chip = container_of(work,
+			struct qnovo, status_change_work);
+	bool notify_uevent = false;
+	struct chg_props cp;
+	struct chg_status cs;
+
+	get_chg_props(chip, &cp);
+	get_chg_status(chip, &cp, &cs);
+
+	if (cs.ok_to_qnovo ^ chip->cs.ok_to_qnovo) {
+		/*
+		 * when it is not okay to Qnovo charge, disable both voters,
+		 * so that when it becomes okay to Qnovo charge the user voter
+		 * has to specifically enable its vote to being Qnovo charging
+		 */
+		if (!cs.ok_to_qnovo) {
+			vote(chip->disable_votable, OK_TO_QNOVO_VOTER, 1, 0);
+			vote(chip->disable_votable, USER_VOTER, 1, 0);
+		} else {
+			vote(chip->disable_votable, OK_TO_QNOVO_VOTER, 0, 0);
+		}
+		notify_uevent = true;
+	}
+
+	memcpy(&chip->cp, &cp, sizeof(struct chg_props));
+	memcpy(&chip->cs, &cs, sizeof(struct chg_status));
+
+	if (notify_uevent)
+		kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
+}
+
+static int qnovo_notifier_call(struct notifier_block *nb,
+		unsigned long ev, void *v)
+{
+	struct power_supply *psy = v;
+	struct qnovo *chip = container_of(nb, struct qnovo, nb);
+
+	if (ev != PSY_EVENT_PROP_CHANGED)
+		return NOTIFY_OK;
+	if ((strcmp(psy->desc->name, "battery") == 0)
+		|| (strcmp(psy->desc->name, "usb") == 0))
+		schedule_work(&chip->status_change_work);
+
+	return NOTIFY_OK;
+}
+
+static irqreturn_t handle_ptrain_done(int irq, void *data)
+{
+	struct qnovo *chip = data;
+
+	/* disable user voter here */
+	vote(chip->disable_votable, USER_VOTER, 0, 0);
+	kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
+	return IRQ_HANDLED;
+}
+
+static int qnovo_hw_init(struct qnovo *chip)
+{
+	int rc;
+	u8 iadc_offset_external, iadc_offset_internal;
+	u8 iadc_gain_external, iadc_gain_internal;
+	u8 vadc_offset, vadc_gain;
+	u8 val;
+
+	vote(chip->disable_votable, USER_VOTER, 1, 0);
+
+	rc = qnovo_read(chip, QNOVO_IADC_OFFSET_0, &iadc_offset_external, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read iadc exernal offset rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = qnovo_read(chip, QNOVO_IADC_OFFSET_1, &iadc_offset_internal, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read iadc internal offset rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = qnovo_read(chip, QNOVO_IADC_GAIN_0, &iadc_gain_external, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read iadc external gain rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = qnovo_read(chip, QNOVO_IADC_GAIN_1, &iadc_gain_internal, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read iadc internal gain rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = qnovo_read(chip, QNOVO_VADC_OFFSET, &vadc_offset, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read vadc offset rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = qnovo_read(chip, QNOVO_VADC_GAIN, &vadc_gain, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read vadc external gain rc = %d\n", rc);
+		return rc;
+	}
+
+	chip->external_offset_nA = (s64)iadc_offset_external * IADC_LSB_NA;
+	chip->internal_offset_nA = (s64)iadc_offset_internal * IADC_LSB_NA;
+	chip->offset_nV = (s64)vadc_offset * VADC_LSB_NA;
+	chip->external_i_gain_mega
+		= 1000000000 + (s64)iadc_gain_external * GAIN_LSB_FACTOR;
+	chip->external_i_gain_mega
+		= div_s64(chip->external_i_gain_mega, 1000);
+	chip->internal_i_gain_mega
+		= 1000000000 + (s64)iadc_gain_internal * GAIN_LSB_FACTOR;
+	chip->internal_i_gain_mega
+		= div_s64(chip->internal_i_gain_mega, 1000);
+	chip->v_gain_mega = 1000000000 + (s64)vadc_gain * GAIN_LSB_FACTOR;
+	chip->v_gain_mega = div_s64(chip->v_gain_mega, 1000);
+
+	val = 0;
+	rc = qnovo_write(chip, QNOVO_STRM_CTRL, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't write iadc bitsteam control rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = qnovo_read(chip, QNOVO_TR_IADC_OFFSET_0, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read iadc offset rc = %d\n", rc);
+		return rc;
+	}
+
+	val *= -1;
+	rc = qnovo_write(chip, QNOVO_TR_IADC_OFFSET_0, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't write iadc offset rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = qnovo_read(chip, QNOVO_TR_IADC_OFFSET_1, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read iadc offset rc = %d\n", rc);
+		return rc;
+	}
+
+	val *= -1;
+	rc = qnovo_write(chip, QNOVO_TR_IADC_OFFSET_1, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't write iadc offset rc = %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qnovo_register_notifier(struct qnovo *chip)
+{
+	int rc;
+
+	chip->nb.notifier_call = qnovo_notifier_call;
+	rc = power_supply_reg_notifier(&chip->nb);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qnovo_determine_initial_status(struct qnovo *chip)
+{
+	status_change_work(&chip->status_change_work);
+	return 0;
+}
+
+static int qnovo_request_interrupts(struct qnovo *chip)
+{
+	int rc = 0;
+	int irq_ptrain_done = of_irq_get_byname(chip->dev->of_node,
+						"ptrain-done");
+
+	rc = devm_request_threaded_irq(chip->dev, irq_ptrain_done, NULL,
+					handle_ptrain_done,
+					IRQF_ONESHOT, "ptrain-done", chip);
+	if (rc < 0) {
+		pr_err("Couldn't request irq %d rc = %d\n",
+					irq_ptrain_done, rc);
+		return rc;
+	}
+	return rc;
+}
+
+static int qnovo_probe(struct platform_device *pdev)
+{
+	struct qnovo *chip;
+	int rc = 0;
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->fv_uV_request = -EINVAL;
+	chip->fcc_uA_request = -EINVAL;
+	chip->dev = &pdev->dev;
+	mutex_init(&chip->write_lock);
+
+	chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
+	if (!chip->regmap) {
+		pr_err("parent regmap is missing\n");
+		return -EINVAL;
+	}
+
+	rc = qnovo_parse_dt(chip);
+	if (rc < 0) {
+		pr_err("Couldn't parse device tree rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = qnovo_check_chg_version(chip);
+	if (rc < 0) {
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't check version rc=%d\n", rc);
+		return rc;
+	}
+
+	/* set driver data before resources request it */
+	platform_set_drvdata(pdev, chip);
+
+	chip->disable_votable = create_votable("QNOVO_DISABLE", VOTE_SET_ANY,
+					qnovo_disable_cb, chip);
+	if (IS_ERR(chip->disable_votable)) {
+		rc = PTR_ERR(chip->disable_votable);
+		goto cleanup;
+	}
+
+	INIT_WORK(&chip->status_change_work, status_change_work);
+
+	rc = qnovo_hw_init(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize hardware rc=%d\n", rc);
+		goto destroy_votable;
+	}
+
+	rc = qnovo_register_notifier(chip);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		goto unreg_notifier;
+	}
+
+	rc = qnovo_determine_initial_status(chip);
+	if (rc < 0) {
+		pr_err("Couldn't determine initial status rc=%d\n", rc);
+		goto unreg_notifier;
+	}
+
+	rc = qnovo_request_interrupts(chip);
+	if (rc < 0) {
+		pr_err("Couldn't request interrupts rc=%d\n", rc);
+		goto unreg_notifier;
+	}
+	chip->qnovo_class.name = "qnovo",
+	chip->qnovo_class.owner = THIS_MODULE,
+	chip->qnovo_class.class_attrs = qnovo_attributes;
+
+	rc = class_register(&chip->qnovo_class);
+	if (rc < 0) {
+		pr_err("couldn't register qnovo sysfs class rc = %d\n", rc);
+		goto unreg_notifier;
+	}
+
+	return rc;
+
+unreg_notifier:
+	power_supply_unreg_notifier(&chip->nb);
+destroy_votable:
+	destroy_votable(chip->disable_votable);
+cleanup:
+	platform_set_drvdata(pdev, NULL);
+	return rc;
+}
+
+static int qnovo_remove(struct platform_device *pdev)
+{
+	struct qnovo *chip = platform_get_drvdata(pdev);
+
+	class_unregister(&chip->qnovo_class);
+	power_supply_unreg_notifier(&chip->nb);
+	destroy_votable(chip->disable_votable);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static const struct of_device_id match_table[] = {
+	{ .compatible = "qcom,qpnp-qnovo", },
+	{ },
+};
+
+static struct platform_driver qnovo_driver = {
+	.driver		= {
+		.name		= "qcom,qnovo-driver",
+		.owner		= THIS_MODULE,
+		.of_match_table	= match_table,
+	},
+	.probe		= qnovo_probe,
+	.remove		= qnovo_remove,
+};
+module_platform_driver(qnovo_driver);
+
+MODULE_DESCRIPTION("QPNP Qnovo Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
new file mode 100644
index 0000000..dab7888
--- /dev/null
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -0,0 +1,2240 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/power_supply.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
+#include "smb-reg.h"
+#include "smb-lib.h"
+#include "storm-watch.h"
+#include "pmic-voter.h"
+
+#define SMB2_DEFAULT_WPWR_UW	8000000
+
+static struct smb_params v1_params = {
+	.fcc			= {
+		.name	= "fast charge current",
+		.reg	= FAST_CHARGE_CURRENT_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 4500000,
+		.step_u	= 25000,
+	},
+	.fv			= {
+		.name	= "float voltage",
+		.reg	= FLOAT_VOLTAGE_CFG_REG,
+		.min_u	= 3487500,
+		.max_u	= 4920000,
+		.step_u	= 7500,
+	},
+	.usb_icl		= {
+		.name	= "usb input current limit",
+		.reg	= USBIN_CURRENT_LIMIT_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 4800000,
+		.step_u	= 25000,
+	},
+	.icl_stat		= {
+		.name	= "input current limit status",
+		.reg	= ICL_STATUS_REG,
+		.min_u	= 0,
+		.max_u	= 4800000,
+		.step_u	= 25000,
+	},
+	.otg_cl			= {
+		.name	= "usb otg current limit",
+		.reg	= OTG_CURRENT_LIMIT_CFG_REG,
+		.min_u	= 250000,
+		.max_u	= 2000000,
+		.step_u	= 250000,
+	},
+	.dc_icl			= {
+		.name	= "dc input current limit",
+		.reg	= DCIN_CURRENT_LIMIT_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 6000000,
+		.step_u	= 25000,
+	},
+	.dc_icl_pt_lv		= {
+		.name	= "dc icl PT <8V",
+		.reg	= ZIN_ICL_PT_REG,
+		.min_u	= 0,
+		.max_u	= 3000000,
+		.step_u	= 25000,
+	},
+	.dc_icl_pt_hv		= {
+		.name	= "dc icl PT >8V",
+		.reg	= ZIN_ICL_PT_HV_REG,
+		.min_u	= 0,
+		.max_u	= 3000000,
+		.step_u	= 25000,
+	},
+	.dc_icl_div2_lv		= {
+		.name	= "dc icl div2 <5.5V",
+		.reg	= ZIN_ICL_LV_REG,
+		.min_u	= 0,
+		.max_u	= 3000000,
+		.step_u	= 25000,
+	},
+	.dc_icl_div2_mid_lv	= {
+		.name	= "dc icl div2 5.5-6.5V",
+		.reg	= ZIN_ICL_MID_LV_REG,
+		.min_u	= 0,
+		.max_u	= 3000000,
+		.step_u	= 25000,
+	},
+	.dc_icl_div2_mid_hv	= {
+		.name	= "dc icl div2 6.5-8.0V",
+		.reg	= ZIN_ICL_MID_HV_REG,
+		.min_u	= 0,
+		.max_u	= 3000000,
+		.step_u	= 25000,
+	},
+	.dc_icl_div2_hv		= {
+		.name	= "dc icl div2 >8.0V",
+		.reg	= ZIN_ICL_HV_REG,
+		.min_u	= 0,
+		.max_u	= 3000000,
+		.step_u	= 25000,
+	},
+	.jeita_cc_comp		= {
+		.name	= "jeita fcc reduction",
+		.reg	= JEITA_CCCOMP_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 1575000,
+		.step_u	= 25000,
+	},
+	.step_soc_threshold[0]		= {
+		.name	= "step charge soc threshold 1",
+		.reg	= STEP_CHG_SOC_OR_BATT_V_TH1_REG,
+		.min_u	= 0,
+		.max_u	= 100,
+		.step_u	= 1,
+	},
+	.step_soc_threshold[1]		= {
+		.name	= "step charge soc threshold 2",
+		.reg	= STEP_CHG_SOC_OR_BATT_V_TH2_REG,
+		.min_u	= 0,
+		.max_u	= 100,
+		.step_u	= 1,
+	},
+	.step_soc_threshold[2]         = {
+		.name	= "step charge soc threshold 3",
+		.reg	= STEP_CHG_SOC_OR_BATT_V_TH3_REG,
+		.min_u	= 0,
+		.max_u	= 100,
+		.step_u	= 1,
+	},
+	.step_soc_threshold[3]         = {
+		.name	= "step charge soc threshold 4",
+		.reg	= STEP_CHG_SOC_OR_BATT_V_TH4_REG,
+		.min_u	= 0,
+		.max_u	= 100,
+		.step_u	= 1,
+	},
+	.step_soc			= {
+		.name	= "step charge soc",
+		.reg	= STEP_CHG_SOC_VBATT_V_REG,
+		.min_u	= 0,
+		.max_u	= 100,
+		.step_u	= 1,
+		.set_proc	= smblib_mapping_soc_from_field_value,
+	},
+	.step_cc_delta[0]	= {
+		.name	= "step charge current delta 1",
+		.reg	= STEP_CHG_CURRENT_DELTA1_REG,
+		.min_u	= 100000,
+		.max_u	= 3200000,
+		.step_u	= 100000,
+		.get_proc	= smblib_mapping_cc_delta_to_field_value,
+		.set_proc	= smblib_mapping_cc_delta_from_field_value,
+	},
+	.step_cc_delta[1]	= {
+		.name	= "step charge current delta 2",
+		.reg	= STEP_CHG_CURRENT_DELTA2_REG,
+		.min_u	= 100000,
+		.max_u	= 3200000,
+		.step_u	= 100000,
+		.get_proc	= smblib_mapping_cc_delta_to_field_value,
+		.set_proc	= smblib_mapping_cc_delta_from_field_value,
+	},
+	.step_cc_delta[2]	= {
+		.name	= "step charge current delta 3",
+		.reg	= STEP_CHG_CURRENT_DELTA3_REG,
+		.min_u	= 100000,
+		.max_u	= 3200000,
+		.step_u	= 100000,
+		.get_proc	= smblib_mapping_cc_delta_to_field_value,
+		.set_proc	= smblib_mapping_cc_delta_from_field_value,
+	},
+	.step_cc_delta[3]	= {
+		.name	= "step charge current delta 4",
+		.reg	= STEP_CHG_CURRENT_DELTA4_REG,
+		.min_u	= 100000,
+		.max_u	= 3200000,
+		.step_u	= 100000,
+		.get_proc	= smblib_mapping_cc_delta_to_field_value,
+		.set_proc	= smblib_mapping_cc_delta_from_field_value,
+	},
+	.step_cc_delta[4]	= {
+		.name	= "step charge current delta 5",
+		.reg	= STEP_CHG_CURRENT_DELTA5_REG,
+		.min_u	= 100000,
+		.max_u	= 3200000,
+		.step_u	= 100000,
+		.get_proc	= smblib_mapping_cc_delta_to_field_value,
+		.set_proc	= smblib_mapping_cc_delta_from_field_value,
+	},
+	.freq_buck		= {
+		.name	= "buck switching frequency",
+		.reg	= CFG_BUCKBOOST_FREQ_SELECT_BUCK_REG,
+		.min_u	= 600,
+		.max_u	= 2000,
+		.step_u	= 200,
+	},
+	.freq_boost		= {
+		.name	= "boost switching frequency",
+		.reg	= CFG_BUCKBOOST_FREQ_SELECT_BOOST_REG,
+		.min_u	= 600,
+		.max_u	= 2000,
+		.step_u	= 200,
+	},
+};
+
+static struct smb_params pm660_params = {
+	.freq_buck		= {
+		.name	= "buck switching frequency",
+		.reg	= FREQ_CLK_DIV_REG,
+		.min_u	= 600,
+		.max_u	= 1600,
+		.set_proc = smblib_set_chg_freq,
+	},
+	.freq_boost		= {
+		.name	= "boost switching frequency",
+		.reg	= FREQ_CLK_DIV_REG,
+		.min_u	= 600,
+		.max_u	= 1600,
+		.set_proc = smblib_set_chg_freq,
+	},
+};
+
+#define STEP_CHARGING_MAX_STEPS	5
+struct smb_dt_props {
+	int	fcc_ua;
+	int	usb_icl_ua;
+	int	otg_cl_ua;
+	int	dc_icl_ua;
+	int	boost_threshold_ua;
+	int	fv_uv;
+	int	wipower_max_uw;
+	u32	step_soc_threshold[STEP_CHARGING_MAX_STEPS - 1];
+	s32	step_cc_delta[STEP_CHARGING_MAX_STEPS];
+	struct	device_node *revid_dev_node;
+	int	float_option;
+	int	chg_inhibit_thr_mv;
+	bool	no_battery;
+	bool	hvdcp_disable;
+	bool	auto_recharge_soc;
+};
+
+struct smb2 {
+	struct smb_charger	chg;
+	struct dentry		*dfs_root;
+	struct smb_dt_props	dt;
+	bool			bad_part;
+};
+
+static int __debug_mask;
+module_param_named(
+	debug_mask, __debug_mask, int, 0600
+);
+
+#define MICRO_1P5A	1500000
+#define MICRO_P1A	100000
+static int smb2_parse_dt(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct device_node *node = chg->dev->of_node;
+	int rc, byte_len;
+
+	if (!node) {
+		pr_err("device tree node missing\n");
+		return -EINVAL;
+	}
+
+	chg->step_chg_enabled = true;
+
+	if (of_property_count_u32_elems(node, "qcom,step-soc-thresholds")
+			!= STEP_CHARGING_MAX_STEPS - 1)
+		chg->step_chg_enabled = false;
+
+	rc = of_property_read_u32_array(node, "qcom,step-soc-thresholds",
+			chip->dt.step_soc_threshold,
+			STEP_CHARGING_MAX_STEPS - 1);
+	if (rc < 0)
+		chg->step_chg_enabled = false;
+
+	if (of_property_count_u32_elems(node, "qcom,step-current-deltas")
+			!= STEP_CHARGING_MAX_STEPS)
+		chg->step_chg_enabled = false;
+
+	rc = of_property_read_u32_array(node, "qcom,step-current-deltas",
+			chip->dt.step_cc_delta,
+			STEP_CHARGING_MAX_STEPS);
+	if (rc < 0)
+		chg->step_chg_enabled = false;
+
+	chip->dt.no_battery = of_property_read_bool(node,
+						"qcom,batteryless-platform");
+
+	chg->external_vconn = of_property_read_bool(node,
+						"qcom,external-vconn");
+
+	rc = of_property_read_u32(node,
+				"qcom,fcc-max-ua", &chip->dt.fcc_ua);
+	if (rc < 0)
+		chip->dt.fcc_ua = -EINVAL;
+
+	rc = of_property_read_u32(node,
+				"qcom,fv-max-uv", &chip->dt.fv_uv);
+	if (rc < 0)
+		chip->dt.fv_uv = -EINVAL;
+
+	rc = of_property_read_u32(node,
+				"qcom,usb-icl-ua", &chip->dt.usb_icl_ua);
+	if (rc < 0)
+		chip->dt.usb_icl_ua = -EINVAL;
+
+	rc = of_property_read_u32(node,
+				"qcom,otg-cl-ua", &chip->dt.otg_cl_ua);
+	if (rc < 0)
+		chip->dt.otg_cl_ua = MICRO_1P5A;
+
+	rc = of_property_read_u32(node,
+				"qcom,dc-icl-ua", &chip->dt.dc_icl_ua);
+	if (rc < 0)
+		chip->dt.dc_icl_ua = -EINVAL;
+
+	rc = of_property_read_u32(node,
+				"qcom,boost-threshold-ua",
+				&chip->dt.boost_threshold_ua);
+	if (rc < 0)
+		chip->dt.boost_threshold_ua = MICRO_P1A;
+
+	rc = of_property_read_u32(node, "qcom,wipower-max-uw",
+				&chip->dt.wipower_max_uw);
+	if (rc < 0)
+		chip->dt.wipower_max_uw = -EINVAL;
+
+	if (of_find_property(node, "qcom,thermal-mitigation", &byte_len)) {
+		chg->thermal_mitigation = devm_kzalloc(chg->dev, byte_len,
+			GFP_KERNEL);
+
+		if (chg->thermal_mitigation == NULL)
+			return -ENOMEM;
+
+		chg->thermal_levels = byte_len / sizeof(u32);
+		rc = of_property_read_u32_array(node,
+				"qcom,thermal-mitigation",
+				chg->thermal_mitigation,
+				chg->thermal_levels);
+		if (rc < 0) {
+			dev_err(chg->dev,
+				"Couldn't read threm limits rc = %d\n", rc);
+			return rc;
+		}
+	}
+
+	of_property_read_u32(node, "qcom,float-option", &chip->dt.float_option);
+	if (chip->dt.float_option < 0 || chip->dt.float_option > 4) {
+		pr_err("qcom,float-option is out of range [0, 4]\n");
+		return -EINVAL;
+	}
+
+	chip->dt.hvdcp_disable = of_property_read_bool(node,
+						"qcom,hvdcp-disable");
+
+	of_property_read_u32(node, "qcom,chg-inhibit-threshold-mv",
+				&chip->dt.chg_inhibit_thr_mv);
+	if ((chip->dt.chg_inhibit_thr_mv < 0 ||
+		chip->dt.chg_inhibit_thr_mv > 300)) {
+		pr_err("qcom,chg-inhibit-threshold-mv is incorrect\n");
+		return -EINVAL;
+	}
+
+	chip->dt.auto_recharge_soc = of_property_read_bool(node,
+						"qcom,auto-recharge-soc");
+
+	chg->micro_usb_mode = of_property_read_bool(node, "qcom,micro-usb");
+
+	chg->dcp_icl_ua = chip->dt.usb_icl_ua;
+
+	chg->suspend_input_on_debug_batt = of_property_read_bool(node,
+					"qcom,suspend-input-on-debug-batt");
+	return 0;
+}
+
+/************************
+ * USB PSY REGISTRATION *
+ ************************/
+
+static enum power_supply_property smb2_usb_props[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_VOLTAGE_MIN,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_PD_CURRENT_MAX,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
+	POWER_SUPPLY_PROP_TYPE,
+	POWER_SUPPLY_PROP_TYPEC_MODE,
+	POWER_SUPPLY_PROP_TYPEC_POWER_ROLE,
+	POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION,
+	POWER_SUPPLY_PROP_PD_ALLOWED,
+	POWER_SUPPLY_PROP_PD_ACTIVE,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
+	POWER_SUPPLY_PROP_BOOST_CURRENT,
+	POWER_SUPPLY_PROP_PE_START,
+	POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
+};
+
+static int smb2_usb_get_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		union power_supply_propval *val)
+{
+	struct smb2 *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_PRESENT:
+		if (chip->bad_part)
+			val->intval = 1;
+		else
+			rc = smblib_get_prop_usb_present(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		rc = smblib_get_prop_usb_online(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+		val->intval = chg->voltage_min_uv;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		val->intval = chg->voltage_max_uv;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		rc = smblib_get_prop_usb_voltage_now(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_CURRENT_MAX:
+		rc = smblib_get_prop_pd_current_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smblib_get_prop_usb_current_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TYPE:
+		if (chip->bad_part)
+			val->intval = POWER_SUPPLY_TYPE_USB;
+		else
+			val->intval = chg->usb_psy_desc.type;
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_MODE:
+		if (chg->micro_usb_mode)
+			val->intval = POWER_SUPPLY_TYPEC_NONE;
+		else if (chip->bad_part)
+			val->intval = POWER_SUPPLY_TYPEC_SOURCE_DEFAULT;
+		else
+			rc = smblib_get_prop_typec_mode(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
+		if (chg->micro_usb_mode)
+			val->intval = POWER_SUPPLY_TYPEC_PR_NONE;
+		else
+			rc = smblib_get_prop_typec_power_role(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION:
+		if (chg->micro_usb_mode)
+			val->intval = 0;
+		else
+			rc = smblib_get_prop_typec_cc_orientation(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_ALLOWED:
+		rc = smblib_get_prop_pd_allowed(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_ACTIVE:
+		val->intval = chg->pd_active;
+		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED:
+		rc = smblib_get_prop_input_current_settled(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_NOW:
+		rc = smblib_get_prop_usb_current_now(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_BOOST_CURRENT:
+		val->intval = chg->boost_current_ua;
+		break;
+	case POWER_SUPPLY_PROP_PD_IN_HARD_RESET:
+		rc = smblib_get_prop_pd_in_hard_reset(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED:
+		val->intval = chg->system_suspend_supported;
+		break;
+	case POWER_SUPPLY_PROP_PE_START:
+		rc = smblib_get_pe_start(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CTM_CURRENT_MAX:
+		val->intval = get_client_vote(chg->usb_icl_votable, CTM_VOTER);
+		break;
+	default:
+		pr_err("get prop %d is not supported in usb\n", psp);
+		rc = -EINVAL;
+		break;
+	}
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
+		return -ENODATA;
+	}
+	return 0;
+}
+
+static int smb2_usb_set_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		const union power_supply_propval *val)
+{
+	struct smb2 *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+		rc = smblib_set_prop_usb_voltage_min(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smblib_set_prop_usb_voltage_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_CURRENT_MAX:
+		rc = smblib_set_prop_pd_current_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smblib_set_prop_usb_current_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
+		rc = smblib_set_prop_typec_power_role(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_ACTIVE:
+		rc = smblib_set_prop_pd_active(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_IN_HARD_RESET:
+		rc = smblib_set_prop_pd_in_hard_reset(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED:
+		chg->system_suspend_supported = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_BOOST_CURRENT:
+		rc = smblib_set_prop_boost_current(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CTM_CURRENT_MAX:
+		rc = vote(chg->usb_icl_votable, CTM_VOTER,
+						val->intval >= 0, val->intval);
+		break;
+	default:
+		pr_err("set prop %d is not supported\n", psp);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int smb2_usb_prop_is_writeable(struct power_supply *psy,
+		enum power_supply_property psp)
+{
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
+	case POWER_SUPPLY_PROP_CTM_CURRENT_MAX:
+		return 1;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int smb2_init_usb_psy(struct smb2 *chip)
+{
+	struct power_supply_config usb_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+
+	chg->usb_psy_desc.name			= "usb";
+	chg->usb_psy_desc.type			= POWER_SUPPLY_TYPE_UNKNOWN;
+	chg->usb_psy_desc.properties		= smb2_usb_props;
+	chg->usb_psy_desc.num_properties	= ARRAY_SIZE(smb2_usb_props);
+	chg->usb_psy_desc.get_property		= smb2_usb_get_prop;
+	chg->usb_psy_desc.set_property		= smb2_usb_set_prop;
+	chg->usb_psy_desc.property_is_writeable	= smb2_usb_prop_is_writeable;
+
+	usb_cfg.drv_data = chip;
+	usb_cfg.of_node = chg->dev->of_node;
+	chg->usb_psy = devm_power_supply_register(chg->dev,
+						  &chg->usb_psy_desc,
+						  &usb_cfg);
+	if (IS_ERR(chg->usb_psy)) {
+		pr_err("Couldn't register USB power supply\n");
+		return PTR_ERR(chg->usb_psy);
+	}
+
+	return 0;
+}
+
+/*****************************
+ * USB MAIN PSY REGISTRATION *
+ *****************************/
+
+static enum power_supply_property smb2_usb_main_props[] = {
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_ICL_REDUCTION,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+	POWER_SUPPLY_PROP_TYPE,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+	POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED,
+	POWER_SUPPLY_PROP_FCC_DELTA,
+	/*
+	 * TODO move the TEMP and TEMP_MAX properties here,
+	 * and update the thermal balancer to look here
+	 */
+};
+
+static int smb2_usb_main_get_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		union power_supply_propval *val)
+{
+	struct smb2 *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smblib_get_charge_param(chg, &chg->param.fv, &val->intval);
+		break;
+	case POWER_SUPPLY_PROP_ICL_REDUCTION:
+		val->intval = chg->icl_reduction_ua;
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		rc = smblib_get_charge_param(chg, &chg->param.fcc,
+							&val->intval);
+		break;
+	case POWER_SUPPLY_PROP_TYPE:
+		val->intval = POWER_SUPPLY_TYPE_MAIN;
+		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED:
+		rc = smblib_get_prop_input_current_settled(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED:
+		rc = smblib_get_prop_input_voltage_settled(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_FCC_DELTA:
+		rc = smblib_get_prop_fcc_delta(chg, val);
+		break;
+	default:
+		pr_debug("get prop %d is not supported in usb-main\n", psp);
+		rc = -EINVAL;
+		break;
+	}
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
+		return -ENODATA;
+	}
+	return 0;
+}
+
+static int smb2_usb_main_set_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		const union power_supply_propval *val)
+{
+	struct smb2 *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smblib_set_charge_param(chg, &chg->param.fv, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_ICL_REDUCTION:
+		rc = smblib_set_icl_reduction(chg, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		rc = smblib_set_charge_param(chg, &chg->param.fcc, val->intval);
+		break;
+	default:
+		pr_err("set prop %d is not supported\n", psp);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static const struct power_supply_desc usb_main_psy_desc = {
+	.name		= "main",
+	.type		= POWER_SUPPLY_TYPE_MAIN,
+	.properties	= smb2_usb_main_props,
+	.num_properties	= ARRAY_SIZE(smb2_usb_main_props),
+	.get_property	= smb2_usb_main_get_prop,
+	.set_property	= smb2_usb_main_set_prop,
+};
+
+static int smb2_init_usb_main_psy(struct smb2 *chip)
+{
+	struct power_supply_config usb_main_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+
+	usb_main_cfg.drv_data = chip;
+	usb_main_cfg.of_node = chg->dev->of_node;
+	chg->usb_main_psy = devm_power_supply_register(chg->dev,
+						  &usb_main_psy_desc,
+						  &usb_main_cfg);
+	if (IS_ERR(chg->usb_main_psy)) {
+		pr_err("Couldn't register USB main power supply\n");
+		return PTR_ERR(chg->usb_main_psy);
+	}
+
+	return 0;
+}
+
+/*************************
+ * DC PSY REGISTRATION   *
+ *************************/
+
+static enum power_supply_property smb2_dc_props[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
+};
+
+static int smb2_dc_get_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		union power_supply_propval *val)
+{
+	struct smb2 *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_PRESENT:
+		rc = smblib_get_prop_dc_present(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		rc = smblib_get_prop_dc_online(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smblib_get_prop_dc_current_max(chg, val);
+		break;
+	default:
+		return -EINVAL;
+	}
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
+		return -ENODATA;
+	}
+	return 0;
+}
+
+static int smb2_dc_set_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		const union power_supply_propval *val)
+{
+	struct smb2 *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smblib_set_prop_dc_current_max(chg, val);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int smb2_dc_prop_is_writeable(struct power_supply *psy,
+		enum power_supply_property psp)
+{
+	int rc;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = 1;
+		break;
+	default:
+		rc = 0;
+		break;
+	}
+
+	return rc;
+}
+
+static const struct power_supply_desc dc_psy_desc = {
+	.name = "dc",
+	.type = POWER_SUPPLY_TYPE_WIPOWER,
+	.properties = smb2_dc_props,
+	.num_properties = ARRAY_SIZE(smb2_dc_props),
+	.get_property = smb2_dc_get_prop,
+	.set_property = smb2_dc_set_prop,
+	.property_is_writeable = smb2_dc_prop_is_writeable,
+};
+
+static int smb2_init_dc_psy(struct smb2 *chip)
+{
+	struct power_supply_config dc_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+
+	dc_cfg.drv_data = chip;
+	dc_cfg.of_node = chg->dev->of_node;
+	chg->dc_psy = devm_power_supply_register(chg->dev,
+						  &dc_psy_desc,
+						  &dc_cfg);
+	if (IS_ERR(chg->dc_psy)) {
+		pr_err("Couldn't register USB power supply\n");
+		return PTR_ERR(chg->dc_psy);
+	}
+
+	return 0;
+}
+
+/*************************
+ * BATT PSY REGISTRATION *
+ *************************/
+
+static enum power_supply_property smb2_batt_props[] = {
+	POWER_SUPPLY_PROP_INPUT_SUSPEND,
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
+	POWER_SUPPLY_PROP_CHARGER_TEMP,
+	POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+	POWER_SUPPLY_PROP_TEMP,
+	POWER_SUPPLY_PROP_TECHNOLOGY,
+	POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_STEP_CHARGING_STEP,
+	POWER_SUPPLY_PROP_CHARGE_DONE,
+	POWER_SUPPLY_PROP_PARALLEL_DISABLE,
+	POWER_SUPPLY_PROP_SET_SHIP_MODE,
+	POWER_SUPPLY_PROP_DIE_HEALTH,
+	POWER_SUPPLY_PROP_RERUN_AICL,
+	POWER_SUPPLY_PROP_DP_DM,
+};
+
+static int smb2_batt_get_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		union power_supply_propval *val)
+{
+	struct smb_charger *chg = power_supply_get_drvdata(psy);
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_STATUS:
+		rc = smblib_get_prop_batt_status(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		rc = smblib_get_prop_batt_health(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		rc = smblib_get_prop_batt_present(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smblib_get_prop_input_suspend(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		rc = smblib_get_prop_batt_charge_type(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		rc = smblib_get_prop_batt_capacity(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+		rc = smblib_get_prop_system_temp_level(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGER_TEMP:
+		rc = smblib_get_prop_charger_temp(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
+		rc = smblib_get_prop_charger_temp_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+		rc = smblib_get_prop_input_current_limited(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
+		val->intval = chg->step_chg_enabled;
+		break;
+	case POWER_SUPPLY_PROP_STEP_CHARGING_STEP:
+		rc = smblib_get_prop_step_chg_step(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		rc = smblib_get_prop_batt_voltage_now(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		val->intval = get_client_vote(chg->fv_votable, DEFAULT_VOTER);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_QNOVO:
+		val->intval = chg->qnovo_fv_uv;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+		rc = smblib_get_prop_batt_current_now(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_QNOVO:
+		val->intval = chg->qnovo_fcc_ua;
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		val->intval = get_client_vote(chg->fcc_votable,
+					      DEFAULT_VOTER);
+		break;
+	case POWER_SUPPLY_PROP_TEMP:
+		rc = smblib_get_prop_batt_temp(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TECHNOLOGY:
+		val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_DONE:
+		rc = smblib_get_prop_batt_charge_done(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
+		val->intval = get_client_vote(chg->pl_disable_votable,
+					      USER_VOTER);
+		break;
+	case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+		/* Not in ship mode as long as device is active */
+		val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_DIE_HEALTH:
+		rc = smblib_get_prop_die_health(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_DP_DM:
+		val->intval = chg->pulse_cnt;
+		break;
+	case POWER_SUPPLY_PROP_RERUN_AICL:
+		val->intval = 0;
+		break;
+	default:
+		pr_err("batt power supply prop %d not supported\n", psp);
+		return -EINVAL;
+	}
+
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
+		return -ENODATA;
+	}
+
+	return 0;
+}
+
+static int smb2_batt_set_prop(struct power_supply *psy,
+		enum power_supply_property prop,
+		const union power_supply_propval *val)
+{
+	int rc = 0;
+	struct smb_charger *chg = power_supply_get_drvdata(psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smblib_set_prop_input_suspend(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+		rc = smblib_set_prop_system_temp_level(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		rc = smblib_set_prop_batt_capacity(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
+		vote(chg->pl_disable_votable, USER_VOTER, (bool)val->intval, 0);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		vote(chg->fv_votable, DEFAULT_VOTER, true, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_QNOVO:
+		chg->qnovo_fv_uv = val->intval;
+		rc = rerun_election(chg->fv_votable);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_QNOVO:
+		chg->qnovo_fcc_ua = val->intval;
+		rc = rerun_election(chg->fcc_votable);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		vote(chg->fcc_votable, DEFAULT_VOTER, true, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+		/* Not in ship mode as long as the device is active */
+		if (!val->intval)
+			break;
+		if (chg->pl.psy)
+			power_supply_set_property(chg->pl.psy,
+				POWER_SUPPLY_PROP_SET_SHIP_MODE, val);
+		rc = smblib_set_prop_ship_mode(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_RERUN_AICL:
+		rc = smblib_rerun_aicl(chg);
+		break;
+	case POWER_SUPPLY_PROP_DP_DM:
+		rc = smblib_dp_dm(chg, val->intval);
+		break;
+	default:
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int smb2_batt_prop_is_writeable(struct power_supply *psy,
+		enum power_supply_property psp)
+{
+	switch (psp) {
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+	case POWER_SUPPLY_PROP_CAPACITY:
+	case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
+	case POWER_SUPPLY_PROP_DP_DM:
+	case POWER_SUPPLY_PROP_RERUN_AICL:
+		return 1;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static const struct power_supply_desc batt_psy_desc = {
+	.name = "battery",
+	.type = POWER_SUPPLY_TYPE_BATTERY,
+	.properties = smb2_batt_props,
+	.num_properties = ARRAY_SIZE(smb2_batt_props),
+	.get_property = smb2_batt_get_prop,
+	.set_property = smb2_batt_set_prop,
+	.property_is_writeable = smb2_batt_prop_is_writeable,
+};
+
+static int smb2_init_batt_psy(struct smb2 *chip)
+{
+	struct power_supply_config batt_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	batt_cfg.drv_data = chg;
+	batt_cfg.of_node = chg->dev->of_node;
+	chg->batt_psy = devm_power_supply_register(chg->dev,
+						   &batt_psy_desc,
+						   &batt_cfg);
+	if (IS_ERR(chg->batt_psy)) {
+		pr_err("Couldn't register battery power supply\n");
+		return PTR_ERR(chg->batt_psy);
+	}
+
+	return rc;
+}
+
+/******************************
+ * VBUS REGULATOR REGISTRATION *
+ ******************************/
+
+struct regulator_ops smb2_vbus_reg_ops = {
+	.enable = smblib_vbus_regulator_enable,
+	.disable = smblib_vbus_regulator_disable,
+	.is_enabled = smblib_vbus_regulator_is_enabled,
+};
+
+static int smb2_init_vbus_regulator(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct regulator_config cfg = {};
+	int rc = 0;
+
+	chg->vbus_vreg = devm_kzalloc(chg->dev, sizeof(*chg->vbus_vreg),
+				      GFP_KERNEL);
+	if (!chg->vbus_vreg)
+		return -ENOMEM;
+
+	cfg.dev = chg->dev;
+	cfg.driver_data = chip;
+
+	chg->vbus_vreg->rdesc.owner = THIS_MODULE;
+	chg->vbus_vreg->rdesc.type = REGULATOR_VOLTAGE;
+	chg->vbus_vreg->rdesc.ops = &smb2_vbus_reg_ops;
+	chg->vbus_vreg->rdesc.of_match = "qcom,smb2-vbus";
+	chg->vbus_vreg->rdesc.name = "qcom,smb2-vbus";
+
+	chg->vbus_vreg->rdev = devm_regulator_register(chg->dev,
+						&chg->vbus_vreg->rdesc, &cfg);
+	if (IS_ERR(chg->vbus_vreg->rdev)) {
+		rc = PTR_ERR(chg->vbus_vreg->rdev);
+		chg->vbus_vreg->rdev = NULL;
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't register VBUS regualtor rc=%d\n", rc);
+	}
+
+	return rc;
+}
+
+/******************************
+ * VCONN REGULATOR REGISTRATION *
+ ******************************/
+
+struct regulator_ops smb2_vconn_reg_ops = {
+	.enable = smblib_vconn_regulator_enable,
+	.disable = smblib_vconn_regulator_disable,
+	.is_enabled = smblib_vconn_regulator_is_enabled,
+};
+
+static int smb2_init_vconn_regulator(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct regulator_config cfg = {};
+	int rc = 0;
+
+	chg->vconn_vreg = devm_kzalloc(chg->dev, sizeof(*chg->vconn_vreg),
+				      GFP_KERNEL);
+	if (!chg->vconn_vreg)
+		return -ENOMEM;
+
+	cfg.dev = chg->dev;
+	cfg.driver_data = chip;
+
+	chg->vconn_vreg->rdesc.owner = THIS_MODULE;
+	chg->vconn_vreg->rdesc.type = REGULATOR_VOLTAGE;
+	chg->vconn_vreg->rdesc.ops = &smb2_vconn_reg_ops;
+	chg->vconn_vreg->rdesc.of_match = "qcom,smb2-vconn";
+	chg->vconn_vreg->rdesc.name = "qcom,smb2-vconn";
+
+	chg->vconn_vreg->rdev = devm_regulator_register(chg->dev,
+						&chg->vconn_vreg->rdesc, &cfg);
+	if (IS_ERR(chg->vconn_vreg->rdev)) {
+		rc = PTR_ERR(chg->vconn_vreg->rdev);
+		chg->vconn_vreg->rdev = NULL;
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't register VCONN regualtor rc=%d\n", rc);
+	}
+
+	return rc;
+}
+
+/***************************
+ * HARDWARE INITIALIZATION *
+ ***************************/
+static int smb2_config_step_charging(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+	int i;
+
+	if (!chg->step_chg_enabled)
+		return rc;
+
+	for (i = 0; i < STEP_CHARGING_MAX_STEPS - 1; i++) {
+		rc = smblib_set_charge_param(chg,
+					     &chg->param.step_soc_threshold[i],
+					     chip->dt.step_soc_threshold[i]);
+		if (rc < 0) {
+			pr_err("Couldn't configure soc thresholds rc = %d\n",
+				rc);
+			goto err_out;
+		}
+	}
+
+	for (i = 0; i < STEP_CHARGING_MAX_STEPS; i++) {
+		rc = smblib_set_charge_param(chg, &chg->param.step_cc_delta[i],
+					     chip->dt.step_cc_delta[i]);
+		if (rc < 0) {
+			pr_err("Couldn't configure cc delta rc = %d\n",
+				rc);
+			goto err_out;
+		}
+	}
+
+	rc = smblib_write(chg, STEP_CHG_UPDATE_REQUEST_TIMEOUT_CFG_REG,
+			  STEP_CHG_UPDATE_REQUEST_TIMEOUT_40S);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure soc request timeout reg rc=%d\n",
+			 rc);
+		goto err_out;
+	}
+
+	rc = smblib_write(chg, STEP_CHG_UPDATE_FAIL_TIMEOUT_CFG_REG,
+			  STEP_CHG_UPDATE_FAIL_TIMEOUT_120S);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure soc fail timeout reg rc=%d\n",
+			rc);
+		goto err_out;
+	}
+
+	/*
+	 *  enable step charging, source soc, standard mode, go to final
+	 *  state in case of failure.
+	 */
+	rc = smblib_write(chg, CHGR_STEP_CHG_MODE_CFG_REG,
+			       STEP_CHARGING_ENABLE_BIT |
+			       STEP_CHARGING_SOURCE_SELECT_BIT |
+			       STEP_CHARGING_SOC_FAIL_OPTION_BIT);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure charger rc=%d\n", rc);
+		goto err_out;
+	}
+
+	return 0;
+err_out:
+	chg->step_chg_enabled = false;
+	return rc;
+}
+
+static int smb2_config_wipower_input_power(struct smb2 *chip, int uw)
+{
+	int rc;
+	int ua;
+	struct smb_charger *chg = &chip->chg;
+	s64 nw = (s64)uw * 1000;
+
+	if (uw < 0)
+		return 0;
+
+	ua = div_s64(nw, ZIN_ICL_PT_MAX_MV);
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl_pt_lv, ua);
+	if (rc < 0) {
+		pr_err("Couldn't configure dc_icl_pt_lv rc = %d\n", rc);
+		return rc;
+	}
+
+	ua = div_s64(nw, ZIN_ICL_PT_HV_MAX_MV);
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl_pt_hv, ua);
+	if (rc < 0) {
+		pr_err("Couldn't configure dc_icl_pt_hv rc = %d\n", rc);
+		return rc;
+	}
+
+	ua = div_s64(nw, ZIN_ICL_LV_MAX_MV);
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl_div2_lv, ua);
+	if (rc < 0) {
+		pr_err("Couldn't configure dc_icl_div2_lv rc = %d\n", rc);
+		return rc;
+	}
+
+	ua = div_s64(nw, ZIN_ICL_MID_LV_MAX_MV);
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl_div2_mid_lv, ua);
+	if (rc < 0) {
+		pr_err("Couldn't configure dc_icl_div2_mid_lv rc = %d\n", rc);
+		return rc;
+	}
+
+	ua = div_s64(nw, ZIN_ICL_MID_HV_MAX_MV);
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl_div2_mid_hv, ua);
+	if (rc < 0) {
+		pr_err("Couldn't configure dc_icl_div2_mid_hv rc = %d\n", rc);
+		return rc;
+	}
+
+	ua = div_s64(nw, ZIN_ICL_HV_MAX_MV);
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl_div2_hv, ua);
+	if (rc < 0) {
+		pr_err("Couldn't configure dc_icl_div2_hv rc = %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int smb2_configure_typec(struct smb_charger *chg)
+{
+	int rc;
+
+	/*
+	 * trigger the usb-typec-change interrupt only when the CC state
+	 * changes
+	 */
+	rc = smblib_write(chg, TYPE_C_INTRPT_ENB_REG,
+			  TYPEC_CCSTATE_CHANGE_INT_EN_BIT);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure Type-C interrupts rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure power role for dual-role */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 TYPEC_POWER_ROLE_CMD_MASK, 0);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure power role for DRP rc=%d\n", rc);
+		return rc;
+	}
+
+	/*
+	 * disable Type-C factory mode and stay in Attached.SRC state when VCONN
+	 * over-current happens
+	 */
+	rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
+			FACTORY_MODE_DETECTION_EN_BIT | VCONN_OC_CFG_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure Type-C rc=%d\n", rc);
+		return rc;
+	}
+
+	/* increase VCONN softstart */
+	rc = smblib_masked_write(chg, TYPE_C_CFG_2_REG,
+			VCONN_SOFTSTART_CFG_MASK, VCONN_SOFTSTART_CFG_MASK);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't increase VCONN softstart rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	/* disable try.SINK mode */
+	rc = smblib_masked_write(chg, TYPE_C_CFG_3_REG, EN_TRYSINK_MODE_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't set TRYSINK_MODE rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int smb2_disable_typec(struct smb_charger *chg)
+{
+	int rc;
+
+	/* configure FSM in idle state */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+			TYPEC_DISABLE_CMD_BIT, TYPEC_DISABLE_CMD_BIT);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't put FSM in idle rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure micro USB mode */
+	rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
+			TYPE_C_OR_U_USB_BIT, TYPE_C_OR_U_USB_BIT);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't enable micro USB mode rc=%d\n", rc);
+		return rc;
+	}
+
+	/* release FSM from idle state */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+			TYPEC_DISABLE_CMD_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't release FSM rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int smb2_init_hw(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc;
+	u8 stat;
+
+	if (chip->dt.no_battery)
+		chg->fake_capacity = 50;
+
+	if (chip->dt.fcc_ua < 0)
+		smblib_get_charge_param(chg, &chg->param.fcc, &chip->dt.fcc_ua);
+
+	if (chip->dt.fv_uv < 0)
+		smblib_get_charge_param(chg, &chg->param.fv, &chip->dt.fv_uv);
+
+	smblib_get_charge_param(chg, &chg->param.usb_icl,
+				&chg->default_icl_ua);
+	if (chip->dt.usb_icl_ua < 0)
+		chip->dt.usb_icl_ua = chg->default_icl_ua;
+
+	if (chip->dt.dc_icl_ua < 0)
+		smblib_get_charge_param(chg, &chg->param.dc_icl,
+					&chip->dt.dc_icl_ua);
+
+	/* set a slower soft start setting for OTG */
+	rc = smblib_masked_write(chg, DC_ENG_SSUPPLY_CFG2_REG,
+				ENG_SSUPPLY_IVREF_OTG_SS_MASK, OTG_SS_SLOW);
+	if (rc < 0) {
+		pr_err("Couldn't set otg soft start rc=%d\n", rc);
+		return rc;
+	}
+
+	/* set OTG current limit */
+	rc = smblib_set_charge_param(chg, &chg->param.otg_cl,
+							chip->dt.otg_cl_ua);
+	if (rc < 0) {
+		pr_err("Couldn't set otg current limit rc=%d\n", rc);
+		return rc;
+	}
+
+	chg->boost_threshold_ua = chip->dt.boost_threshold_ua;
+
+	rc = smblib_read(chg, APSD_RESULT_STATUS_REG, &stat);
+	if (rc < 0) {
+		pr_err("Couldn't read APSD_RESULT_STATUS rc=%d\n", rc);
+		return rc;
+	}
+
+	smblib_rerun_apsd_if_required(chg);
+
+	/* clear the ICL override if it is set */
+	if (smblib_icl_override(chg, false) < 0) {
+		pr_err("Couldn't disable ICL override rc=%d\n", rc);
+		return rc;
+	}
+
+	/* votes must be cast before configuring software control */
+	/* vote 0mA on usb_icl for non battery platforms */
+	vote(chg->usb_icl_votable,
+		DEFAULT_VOTER, chip->dt.no_battery, 0);
+	vote(chg->dc_suspend_votable,
+		DEFAULT_VOTER, chip->dt.no_battery, 0);
+	vote(chg->fcc_votable,
+		DEFAULT_VOTER, true, chip->dt.fcc_ua);
+	vote(chg->fv_votable,
+		DEFAULT_VOTER, true, chip->dt.fv_uv);
+	vote(chg->dc_icl_votable,
+		DEFAULT_VOTER, true, chip->dt.dc_icl_ua);
+	vote(chg->hvdcp_disable_votable_indirect, DEFAULT_VOTER,
+		chip->dt.hvdcp_disable, 0);
+	vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
+			true, 0);
+	vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER,
+			true, 0);
+	vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
+			true, 0);
+	vote(chg->pd_disallowed_votable_indirect, MICRO_USB_VOTER,
+			chg->micro_usb_mode, 0);
+	vote(chg->hvdcp_enable_votable, MICRO_USB_VOTER,
+			chg->micro_usb_mode, 0);
+
+	/*
+	 * AICL configuration:
+	 * start from min and AICL ADC disable
+	 */
+	rc = smblib_masked_write(chg, USBIN_AICL_OPTIONS_CFG_REG,
+			USBIN_AICL_START_AT_MAX_BIT
+				| USBIN_AICL_ADC_EN_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure AICL rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Configure charge enable for software control; active high */
+	rc = smblib_masked_write(chg, CHGR_CFG2_REG,
+				 CHG_EN_POLARITY_BIT |
+				 CHG_EN_SRC_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure charger rc=%d\n", rc);
+		return rc;
+	}
+
+	/* enable the charging path */
+	rc = vote(chg->chg_disable_votable, DEFAULT_VOTER, false, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't enable charging rc=%d\n", rc);
+		return rc;
+	}
+
+	if (chg->micro_usb_mode)
+		rc = smb2_disable_typec(chg);
+	else
+		rc = smb2_configure_typec(chg);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure Type-C interrupts rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure VCONN for software control */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 VCONN_EN_SRC_BIT | VCONN_EN_VALUE_BIT,
+				 VCONN_EN_SRC_BIT);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure VCONN for SW control rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure VBUS for software control */
+	rc = smblib_masked_write(chg, OTG_CFG_REG, OTG_EN_SRC_CFG_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure VBUS for SW control rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smblib_masked_write(chg, QNOVO_PT_ENABLE_CMD_REG,
+			QNOVO_PT_ENABLE_CMD_BIT, QNOVO_PT_ENABLE_CMD_BIT);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't enable qnovo rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure step charging */
+	rc = smb2_config_step_charging(chip);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure step charging rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	/* configure wipower watts */
+	rc = smb2_config_wipower_input_power(chip, chip->dt.wipower_max_uw);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure wipower rc=%d\n", rc);
+		return rc;
+	}
+
+	/* disable SW STAT override */
+	rc = smblib_masked_write(chg, STAT_CFG_REG,
+				 STAT_SW_OVERRIDE_CFG_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't disable SW STAT override rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	/* configure float charger options */
+	switch (chip->dt.float_option) {
+	case 1:
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+				FLOAT_OPTIONS_MASK, 0);
+		break;
+	case 2:
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+				FLOAT_OPTIONS_MASK, FORCE_FLOAT_SDP_CFG_BIT);
+		break;
+	case 3:
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+				FLOAT_OPTIONS_MASK, FLOAT_DIS_CHGING_CFG_BIT);
+		break;
+	case 4:
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+				FLOAT_OPTIONS_MASK, SUSPEND_FLOAT_CFG_BIT);
+		break;
+	default:
+		rc = 0;
+		break;
+	}
+
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure float charger options rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	switch (chip->dt.chg_inhibit_thr_mv) {
+	case 50:
+		rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+				CHARGE_INHIBIT_THRESHOLD_MASK,
+				CHARGE_INHIBIT_THRESHOLD_50MV);
+		break;
+	case 100:
+		rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+				CHARGE_INHIBIT_THRESHOLD_MASK,
+				CHARGE_INHIBIT_THRESHOLD_100MV);
+		break;
+	case 200:
+		rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+				CHARGE_INHIBIT_THRESHOLD_MASK,
+				CHARGE_INHIBIT_THRESHOLD_200MV);
+		break;
+	case 300:
+		rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+				CHARGE_INHIBIT_THRESHOLD_MASK,
+				CHARGE_INHIBIT_THRESHOLD_300MV);
+		break;
+	case 0:
+		rc = smblib_masked_write(chg, CHGR_CFG2_REG,
+				CHARGER_INHIBIT_BIT, 0);
+	default:
+		break;
+	}
+
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure charge inhibit threshold rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (chip->dt.auto_recharge_soc) {
+		rc = smblib_masked_write(chg, FG_UPDATE_CFG_2_SEL_REG,
+				SOC_LT_CHG_RECHARGE_THRESH_SEL_BIT |
+				VBT_LT_CHG_RECHARGE_THRESH_SEL_BIT,
+				VBT_LT_CHG_RECHARGE_THRESH_SEL_BIT);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't configure FG_UPDATE_CFG2_SEL_REG rc=%d\n",
+				rc);
+			return rc;
+		}
+	} else {
+		rc = smblib_masked_write(chg, FG_UPDATE_CFG_2_SEL_REG,
+				SOC_LT_CHG_RECHARGE_THRESH_SEL_BIT |
+				VBT_LT_CHG_RECHARGE_THRESH_SEL_BIT,
+				SOC_LT_CHG_RECHARGE_THRESH_SEL_BIT);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't configure FG_UPDATE_CFG2_SEL_REG rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int smb2_chg_config_init(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct pmic_revid_data *pmic_rev_id;
+	struct device_node *revid_dev_node;
+
+	revid_dev_node = of_parse_phandle(chip->chg.dev->of_node,
+					  "qcom,pmic-revid", 0);
+	if (!revid_dev_node) {
+		pr_err("Missing qcom,pmic-revid property\n");
+		return -EINVAL;
+	}
+
+	pmic_rev_id = get_revid_data(revid_dev_node);
+	if (IS_ERR_OR_NULL(pmic_rev_id)) {
+		/*
+		 * the revid peripheral must be registered, any failure
+		 * here only indicates that the rev-id module has not
+		 * probed yet.
+		 */
+		return -EPROBE_DEFER;
+	}
+
+	switch (pmic_rev_id->pmic_subtype) {
+	case PMI8998_SUBTYPE:
+		chip->chg.smb_version = PMI8998_SUBTYPE;
+		chip->chg.wa_flags |= BOOST_BACK_WA | QC_AUTH_INTERRUPT_WA_BIT;
+		if (pmic_rev_id->rev4 == PMI8998_V1P1_REV4) /* PMI rev 1.1 */
+			chg->wa_flags |= QC_CHARGER_DETECTION_WA_BIT;
+		if (pmic_rev_id->rev4 == PMI8998_V2P0_REV4) /* PMI rev 2.0 */
+			chg->wa_flags |= TYPEC_CC2_REMOVAL_WA_BIT;
+		chg->chg_freq.freq_5V		= 600;
+		chg->chg_freq.freq_6V_8V	= 800;
+		chg->chg_freq.freq_9V		= 1000;
+		chg->chg_freq.freq_12V		= 1200;
+		chg->chg_freq.freq_removal	= 1000;
+		chg->chg_freq.freq_below_otg_threshold = 2000;
+		chg->chg_freq.freq_above_otg_threshold = 800;
+		break;
+	case PM660_SUBTYPE:
+		chip->chg.smb_version = PM660_SUBTYPE;
+		chip->chg.wa_flags |= BOOST_BACK_WA;
+		chg->param.freq_buck = pm660_params.freq_buck;
+		chg->param.freq_boost = pm660_params.freq_boost;
+		chg->chg_freq.freq_5V		= 600;
+		chg->chg_freq.freq_6V_8V	= 800;
+		chg->chg_freq.freq_9V		= 1050;
+		chg->chg_freq.freq_12V		= 1200;
+		chg->chg_freq.freq_removal	= 1050;
+		chg->chg_freq.freq_below_otg_threshold = 1600;
+		chg->chg_freq.freq_above_otg_threshold = 800;
+		break;
+	default:
+		pr_err("PMIC subtype %d not supported\n",
+				pmic_rev_id->pmic_subtype);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/****************************
+ * DETERMINE INITIAL STATUS *
+ ****************************/
+
+static int smb2_determine_initial_status(struct smb2 *chip)
+{
+	struct smb_irq_data irq_data = {chip, "determine-initial-status"};
+	struct smb_charger *chg = &chip->chg;
+
+	if (chg->bms_psy)
+		smblib_suspend_on_debug_battery(chg);
+	smblib_handle_usb_plugin(0, &irq_data);
+	smblib_handle_usb_typec_change(0, &irq_data);
+	smblib_handle_usb_source_change(0, &irq_data);
+	smblib_handle_chg_state_change(0, &irq_data);
+	smblib_handle_icl_change(0, &irq_data);
+	smblib_handle_step_chg_state_change(0, &irq_data);
+	smblib_handle_step_chg_soc_update_request(0, &irq_data);
+
+	return 0;
+}
+
+/**************************
+ * INTERRUPT REGISTRATION *
+ **************************/
+
+static struct smb_irq_info smb2_irqs[] = {
+/* CHARGER IRQs */
+	[CHG_ERROR_IRQ] = {
+		.name		= "chg-error",
+		.handler	= smblib_handle_debug,
+	},
+	[CHG_STATE_CHANGE_IRQ] = {
+		.name		= "chg-state-change",
+		.handler	= smblib_handle_chg_state_change,
+		.wake		= true,
+	},
+	[STEP_CHG_STATE_CHANGE_IRQ] = {
+		.name		= "step-chg-state-change",
+		.handler	= smblib_handle_step_chg_state_change,
+		.wake		= true,
+	},
+	[STEP_CHG_SOC_UPDATE_FAIL_IRQ] = {
+		.name		= "step-chg-soc-update-fail",
+		.handler	= smblib_handle_step_chg_soc_update_fail,
+		.wake		= true,
+	},
+	[STEP_CHG_SOC_UPDATE_REQ_IRQ] = {
+		.name		= "step-chg-soc-update-request",
+		.handler	= smblib_handle_step_chg_soc_update_request,
+		.wake		= true,
+	},
+/* OTG IRQs */
+	[OTG_FAIL_IRQ] = {
+		.name		= "otg-fail",
+		.handler	= smblib_handle_debug,
+	},
+	[OTG_OVERCURRENT_IRQ] = {
+		.name		= "otg-overcurrent",
+		.handler	= smblib_handle_otg_overcurrent,
+	},
+	[OTG_OC_DIS_SW_STS_IRQ] = {
+		.name		= "otg-oc-dis-sw-sts",
+		.handler	= smblib_handle_debug,
+	},
+	[TESTMODE_CHANGE_DET_IRQ] = {
+		.name		= "testmode-change-detect",
+		.handler	= smblib_handle_debug,
+	},
+/* BATTERY IRQs */
+	[BATT_TEMP_IRQ] = {
+		.name		= "bat-temp",
+		.handler	= smblib_handle_batt_temp_changed,
+	},
+	[BATT_OCP_IRQ] = {
+		.name		= "bat-ocp",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_OV_IRQ] = {
+		.name		= "bat-ov",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_LOW_IRQ] = {
+		.name		= "bat-low",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_THERM_ID_MISS_IRQ] = {
+		.name		= "bat-therm-or-id-missing",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_TERM_MISS_IRQ] = {
+		.name		= "bat-terminal-missing",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+/* USB INPUT IRQs */
+	[USBIN_COLLAPSE_IRQ] = {
+		.name		= "usbin-collapse",
+		.handler	= smblib_handle_debug,
+	},
+	[USBIN_LT_3P6V_IRQ] = {
+		.name		= "usbin-lt-3p6v",
+		.handler	= smblib_handle_debug,
+	},
+	[USBIN_UV_IRQ] = {
+		.name		= "usbin-uv",
+		.handler	= smblib_handle_usbin_uv,
+	},
+	[USBIN_OV_IRQ] = {
+		.name		= "usbin-ov",
+		.handler	= smblib_handle_debug,
+	},
+	[USBIN_PLUGIN_IRQ] = {
+		.name		= "usbin-plugin",
+		.handler	= smblib_handle_usb_plugin,
+		.wake		= true,
+	},
+	[USBIN_SRC_CHANGE_IRQ] = {
+		.name		= "usbin-src-change",
+		.handler	= smblib_handle_usb_source_change,
+		.wake		= true,
+	},
+	[USBIN_ICL_CHANGE_IRQ] = {
+		.name		= "usbin-icl-change",
+		.handler	= smblib_handle_icl_change,
+		.wake		= true,
+	},
+	[TYPE_C_CHANGE_IRQ] = {
+		.name		= "type-c-change",
+		.handler	= smblib_handle_usb_typec_change,
+		.wake		= true,
+	},
+/* DC INPUT IRQs */
+	[DCIN_COLLAPSE_IRQ] = {
+		.name		= "dcin-collapse",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_LT_3P6V_IRQ] = {
+		.name		= "dcin-lt-3p6v",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_UV_IRQ] = {
+		.name		= "dcin-uv",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_OV_IRQ] = {
+		.name		= "dcin-ov",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_PLUGIN_IRQ] = {
+		.name		= "dcin-plugin",
+		.handler	= smblib_handle_dc_plugin,
+		.wake		= true,
+	},
+	[DIV2_EN_DG_IRQ] = {
+		.name		= "div2-en-dg",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_ICL_CHANGE_IRQ] = {
+		.name		= "dcin-icl-change",
+		.handler	= smblib_handle_debug,
+	},
+/* MISCELLANEOUS IRQs */
+	[WDOG_SNARL_IRQ] = {
+		.name		= "wdog-snarl",
+		.handler	= NULL,
+	},
+	[WDOG_BARK_IRQ] = {
+		.name		= "wdog-bark",
+		.handler	= NULL,
+	},
+	[AICL_FAIL_IRQ] = {
+		.name		= "aicl-fail",
+		.handler	= smblib_handle_debug,
+	},
+	[AICL_DONE_IRQ] = {
+		.name		= "aicl-done",
+		.handler	= smblib_handle_debug,
+	},
+	[HIGH_DUTY_CYCLE_IRQ] = {
+		.name		= "high-duty-cycle",
+		.handler	= smblib_handle_high_duty_cycle,
+		.wake		= true,
+	},
+	[INPUT_CURRENT_LIMIT_IRQ] = {
+		.name		= "input-current-limiting",
+		.handler	= smblib_handle_debug,
+	},
+	[TEMPERATURE_CHANGE_IRQ] = {
+		.name		= "temperature-change",
+		.handler	= smblib_handle_debug,
+	},
+	[SWITCH_POWER_OK_IRQ] = {
+		.name		= "switcher-power-ok",
+		.handler	= smblib_handle_switcher_power_ok,
+		.storm_data	= {true, 1000, 3},
+	},
+};
+
+static int smb2_get_irq_index_byname(const char *irq_name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(smb2_irqs); i++) {
+		if (strcmp(smb2_irqs[i].name, irq_name) == 0)
+			return i;
+	}
+
+	return -ENOENT;
+}
+
+static int smb2_request_interrupt(struct smb2 *chip,
+				struct device_node *node, const char *irq_name)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc, irq, irq_index;
+	struct smb_irq_data *irq_data;
+
+	irq = of_irq_get_byname(node, irq_name);
+	if (irq < 0) {
+		pr_err("Couldn't get irq %s byname\n", irq_name);
+		return irq;
+	}
+
+	irq_index = smb2_get_irq_index_byname(irq_name);
+	if (irq_index < 0) {
+		pr_err("%s is not a defined irq\n", irq_name);
+		return irq_index;
+	}
+
+	if (!smb2_irqs[irq_index].handler)
+		return 0;
+
+	irq_data = devm_kzalloc(chg->dev, sizeof(*irq_data), GFP_KERNEL);
+	if (!irq_data)
+		return -ENOMEM;
+
+	irq_data->parent_data = chip;
+	irq_data->name = irq_name;
+	irq_data->storm_data = smb2_irqs[irq_index].storm_data;
+	mutex_init(&irq_data->storm_data.storm_lock);
+
+	rc = devm_request_threaded_irq(chg->dev, irq, NULL,
+					smb2_irqs[irq_index].handler,
+					IRQF_ONESHOT, irq_name, irq_data);
+	if (rc < 0) {
+		pr_err("Couldn't request irq %d\n", irq);
+		return rc;
+	}
+
+	smb2_irqs[irq_index].irq = irq;
+	smb2_irqs[irq_index].irq_data = irq_data;
+	if (smb2_irqs[irq_index].wake)
+		enable_irq_wake(irq);
+
+	return rc;
+}
+
+static int smb2_request_interrupts(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct device_node *node = chg->dev->of_node;
+	struct device_node *child;
+	int rc = 0;
+	const char *name;
+	struct property *prop;
+
+	for_each_available_child_of_node(node, child) {
+		of_property_for_each_string(child, "interrupt-names",
+					    prop, name) {
+			rc = smb2_request_interrupt(chip, child, name);
+			if (rc < 0)
+				return rc;
+		}
+	}
+
+	return rc;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int force_batt_psy_update_write(void *data, u64 val)
+{
+	struct smb_charger *chg = data;
+
+	power_supply_changed(chg->batt_psy);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_batt_psy_update_ops, NULL,
+			force_batt_psy_update_write, "0x%02llx\n");
+
+static int force_usb_psy_update_write(void *data, u64 val)
+{
+	struct smb_charger *chg = data;
+
+	power_supply_changed(chg->usb_psy);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_usb_psy_update_ops, NULL,
+			force_usb_psy_update_write, "0x%02llx\n");
+
+static int force_dc_psy_update_write(void *data, u64 val)
+{
+	struct smb_charger *chg = data;
+
+	power_supply_changed(chg->dc_psy);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_dc_psy_update_ops, NULL,
+			force_dc_psy_update_write, "0x%02llx\n");
+
+static void smb2_create_debugfs(struct smb2 *chip)
+{
+	struct dentry *file;
+
+	chip->dfs_root = debugfs_create_dir("charger", NULL);
+	if (IS_ERR_OR_NULL(chip->dfs_root)) {
+		pr_err("Couldn't create charger debugfs rc=%ld\n",
+			(long)chip->dfs_root);
+		return;
+	}
+
+	file = debugfs_create_file("force_batt_psy_update", 0600,
+			    chip->dfs_root, chip, &force_batt_psy_update_ops);
+	if (IS_ERR_OR_NULL(file))
+		pr_err("Couldn't create force_batt_psy_update file rc=%ld\n",
+			(long)file);
+
+	file = debugfs_create_file("force_usb_psy_update", 0600,
+			    chip->dfs_root, chip, &force_usb_psy_update_ops);
+	if (IS_ERR_OR_NULL(file))
+		pr_err("Couldn't create force_usb_psy_update file rc=%ld\n",
+			(long)file);
+
+	file = debugfs_create_file("force_dc_psy_update", 0600,
+			    chip->dfs_root, chip, &force_dc_psy_update_ops);
+	if (IS_ERR_OR_NULL(file))
+		pr_err("Couldn't create force_dc_psy_update file rc=%ld\n",
+			(long)file);
+}
+
+#else
+
+static void smb2_create_debugfs(struct smb2 *chip)
+{}
+
+#endif
+
+static int smb2_probe(struct platform_device *pdev)
+{
+	struct smb2 *chip;
+	struct smb_charger *chg;
+	int rc = 0;
+	union power_supply_propval val;
+	int usb_present, batt_present, batt_health, batt_charge_type;
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chg = &chip->chg;
+	chg->dev = &pdev->dev;
+	chg->param = v1_params;
+	chg->debug_mask = &__debug_mask;
+	chg->mode = PARALLEL_MASTER;
+	chg->irq_info = smb2_irqs;
+	chg->name = "PMI";
+
+	chg->regmap = dev_get_regmap(chg->dev->parent, NULL);
+	if (!chg->regmap) {
+		pr_err("parent regmap is missing\n");
+		return -EINVAL;
+	}
+
+	rc = smb2_chg_config_init(chip);
+	if (rc < 0) {
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't setup chg_config rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smblib_init(chg);
+	if (rc < 0) {
+		pr_err("Smblib_init failed rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb2_parse_dt(chip);
+	if (rc < 0) {
+		pr_err("Couldn't parse device tree rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	/* set driver data before resources request it */
+	platform_set_drvdata(pdev, chip);
+
+	rc = smb2_init_vbus_regulator(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize vbus regulator rc=%d\n",
+			rc);
+		goto cleanup;
+	}
+
+	rc = smb2_init_vconn_regulator(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize vconn regulator rc=%d\n",
+			rc);
+		goto cleanup;
+	}
+
+	/* extcon registration */
+	chg->extcon = devm_extcon_dev_allocate(chg->dev, smblib_extcon_cable);
+	if (IS_ERR(chg->extcon)) {
+		rc = PTR_ERR(chg->extcon);
+		dev_err(chg->dev, "failed to allocate extcon device rc=%d\n",
+				rc);
+		goto cleanup;
+	}
+
+	rc = devm_extcon_dev_register(chg->dev, chg->extcon);
+	if (rc < 0) {
+		dev_err(chg->dev, "failed to register extcon device rc=%d\n",
+				rc);
+		goto cleanup;
+	}
+
+	rc = smb2_init_hw(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize hardware rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb2_init_dc_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize dc psy rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb2_init_usb_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize usb psy rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb2_init_usb_main_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize usb psy rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb2_init_batt_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize batt psy rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb2_determine_initial_status(chip);
+	if (rc < 0) {
+		pr_err("Couldn't determine initial status rc=%d\n",
+			rc);
+		goto cleanup;
+	}
+
+	rc = smb2_request_interrupts(chip);
+	if (rc < 0) {
+		pr_err("Couldn't request interrupts rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	smb2_create_debugfs(chip);
+
+	rc = smblib_get_prop_usb_present(chg, &val);
+	if (rc < 0) {
+		pr_err("Couldn't get usb present rc=%d\n", rc);
+		goto cleanup;
+	}
+	usb_present = val.intval;
+
+	rc = smblib_get_prop_batt_present(chg, &val);
+	if (rc < 0) {
+		pr_err("Couldn't get batt present rc=%d\n", rc);
+		goto cleanup;
+	}
+	batt_present = val.intval;
+
+	rc = smblib_get_prop_batt_health(chg, &val);
+	if (rc < 0) {
+		pr_err("Couldn't get batt health rc=%d\n", rc);
+		goto cleanup;
+	}
+	batt_health = val.intval;
+
+	rc = smblib_get_prop_batt_charge_type(chg, &val);
+	if (rc < 0) {
+		pr_err("Couldn't get batt charge type rc=%d\n", rc);
+		goto cleanup;
+	}
+	batt_charge_type = val.intval;
+
+	pr_info("QPNP SMB2 probed successfully usb:present=%d type=%d batt:present = %d health = %d charge = %d\n",
+		usb_present, chg->usb_psy_desc.type,
+		batt_present, batt_health, batt_charge_type);
+	return rc;
+
+cleanup:
+	smblib_deinit(chg);
+	if (chg->usb_psy)
+		power_supply_unregister(chg->usb_psy);
+	if (chg->batt_psy)
+		power_supply_unregister(chg->batt_psy);
+	if (chg->vconn_vreg && chg->vconn_vreg->rdev)
+		regulator_unregister(chg->vconn_vreg->rdev);
+	if (chg->vbus_vreg && chg->vbus_vreg->rdev)
+		regulator_unregister(chg->vbus_vreg->rdev);
+	platform_set_drvdata(pdev, NULL);
+	return rc;
+}
+
+static int smb2_remove(struct platform_device *pdev)
+{
+	struct smb2 *chip = platform_get_drvdata(pdev);
+	struct smb_charger *chg = &chip->chg;
+
+	power_supply_unregister(chg->batt_psy);
+	power_supply_unregister(chg->usb_psy);
+	regulator_unregister(chg->vconn_vreg->rdev);
+	regulator_unregister(chg->vbus_vreg->rdev);
+
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static void smb2_shutdown(struct platform_device *pdev)
+{
+	struct smb2 *chip = platform_get_drvdata(pdev);
+	struct smb_charger *chg = &chip->chg;
+
+	/* configure power role for UFP */
+	smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				TYPEC_POWER_ROLE_CMD_MASK, UFP_EN_CMD_BIT);
+
+	/* force HVDCP to 5V */
+	smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+				HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT, 0);
+	smblib_write(chg, CMD_HVDCP_2_REG, FORCE_5V_BIT);
+
+	/* force enable APSD */
+	smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+				 AUTO_SRC_DETECT_BIT, AUTO_SRC_DETECT_BIT);
+}
+
+static const struct of_device_id match_table[] = {
+	{ .compatible = "qcom,qpnp-smb2", },
+	{ },
+};
+
+static struct platform_driver smb2_driver = {
+	.driver		= {
+		.name		= "qcom,qpnp-smb2",
+		.owner		= THIS_MODULE,
+		.of_match_table	= match_table,
+	},
+	.probe		= smb2_probe,
+	.remove		= smb2_remove,
+	.shutdown	= smb2_shutdown,
+};
+module_platform_driver(smb2_driver);
+
+MODULE_DESCRIPTION("QPNP SMB2 Charger Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
new file mode 100644
index 0000000..eb6727b
--- /dev/null
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -0,0 +1,4290 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/iio/consumer.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/driver.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/irq.h>
+#include "smb-lib.h"
+#include "smb-reg.h"
+#include "storm-watch.h"
+#include "pmic-voter.h"
+
+#define smblib_err(chg, fmt, ...)		\
+	pr_err("%s: %s: " fmt, chg->name,	\
+		__func__, ##__VA_ARGS__)	\
+
+#define smblib_dbg(chg, reason, fmt, ...)			\
+	do {							\
+		if (*chg->debug_mask & (reason))		\
+			pr_info("%s: %s: " fmt, chg->name,	\
+				__func__, ##__VA_ARGS__);	\
+		else						\
+			pr_debug("%s: %s: " fmt, chg->name,	\
+				__func__, ##__VA_ARGS__);	\
+	} while (0)
+
+static bool is_secure(struct smb_charger *chg, int addr)
+{
+	if (addr == SHIP_MODE_REG || addr == FREQ_CLK_DIV_REG)
+		return true;
+	/* assume everything above 0xA0 is secure */
+	return (bool)((addr & 0xFF) >= 0xA0);
+}
+
+int smblib_read(struct smb_charger *chg, u16 addr, u8 *val)
+{
+	unsigned int temp;
+	int rc = 0;
+
+	rc = regmap_read(chg->regmap, addr, &temp);
+	if (rc >= 0)
+		*val = (u8)temp;
+
+	return rc;
+}
+
+int smblib_multibyte_read(struct smb_charger *chg, u16 addr, u8 *val,
+				int count)
+{
+	return regmap_bulk_read(chg->regmap, addr, val, count);
+}
+
+int smblib_masked_write(struct smb_charger *chg, u16 addr, u8 mask, u8 val)
+{
+	int rc = 0;
+
+	mutex_lock(&chg->write_lock);
+	if (is_secure(chg, addr)) {
+		rc = regmap_write(chg->regmap, (addr & 0xFF00) | 0xD0, 0xA5);
+		if (rc < 0)
+			goto unlock;
+	}
+
+	rc = regmap_update_bits(chg->regmap, addr, mask, val);
+
+unlock:
+	mutex_unlock(&chg->write_lock);
+	return rc;
+}
+
+int smblib_write(struct smb_charger *chg, u16 addr, u8 val)
+{
+	int rc = 0;
+
+	mutex_lock(&chg->write_lock);
+
+	if (is_secure(chg, addr)) {
+		rc = regmap_write(chg->regmap, (addr & ~(0xFF)) | 0xD0, 0xA5);
+		if (rc < 0)
+			goto unlock;
+	}
+
+	rc = regmap_write(chg->regmap, addr, val);
+
+unlock:
+	mutex_unlock(&chg->write_lock);
+	return rc;
+}
+
+static int smblib_get_step_cc_delta(struct smb_charger *chg, int *cc_delta_ua)
+{
+	int rc, step_state;
+	u8 stat;
+
+	if (!chg->step_chg_enabled) {
+		*cc_delta_ua = 0;
+		return 0;
+	}
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	step_state = (stat & STEP_CHARGING_STATUS_MASK) >>
+				STEP_CHARGING_STATUS_SHIFT;
+	rc = smblib_get_charge_param(chg, &chg->param.step_cc_delta[step_state],
+				     cc_delta_ua);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get step cc delta rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int smblib_get_jeita_cc_delta(struct smb_charger *chg, int *cc_delta_ua)
+{
+	int rc, cc_minus_ua;
+	u8 stat;
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_2_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (!(stat & BAT_TEMP_STATUS_SOFT_LIMIT_MASK)) {
+		*cc_delta_ua = 0;
+		return 0;
+	}
+
+	rc = smblib_get_charge_param(chg, &chg->param.jeita_cc_comp,
+				     &cc_minus_ua);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get jeita cc minus rc=%d\n", rc);
+		return rc;
+	}
+
+	*cc_delta_ua = -cc_minus_ua;
+	return 0;
+}
+
+int smblib_icl_override(struct smb_charger *chg, bool override)
+{
+	int rc;
+	bool override_status;
+	u8 stat;
+	u16 reg;
+
+	switch (chg->smb_version) {
+	case PMI8998_SUBTYPE:
+		reg = APSD_RESULT_STATUS_REG;
+		break;
+	case PM660_SUBTYPE:
+		reg = AICL_STATUS_REG;
+		break;
+	default:
+		smblib_dbg(chg, PR_MISC, "Unknown chip version=%x\n",
+				chg->smb_version);
+		return -EINVAL;
+	}
+
+	rc = smblib_read(chg, reg, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read reg=%x rc=%d\n", reg, rc);
+		return rc;
+	}
+	override_status = (bool)(stat & ICL_OVERRIDE_LATCH_BIT);
+
+	if (override != override_status) {
+		rc = smblib_masked_write(chg, CMD_APSD_REG,
+				ICL_OVERRIDE_BIT, ICL_OVERRIDE_BIT);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't override ICL rc=%d\n", rc);
+			return rc;
+		}
+	}
+	return 0;
+}
+
+/********************
+ * REGISTER GETTERS *
+ ********************/
+
+int smblib_get_charge_param(struct smb_charger *chg,
+			    struct smb_chg_param *param, int *val_u)
+{
+	int rc = 0;
+	u8 val_raw;
+
+	rc = smblib_read(chg, param->reg, &val_raw);
+	if (rc < 0) {
+		smblib_err(chg, "%s: Couldn't read from 0x%04x rc=%d\n",
+			param->name, param->reg, rc);
+		return rc;
+	}
+
+	if (param->get_proc)
+		*val_u = param->get_proc(param, val_raw);
+	else
+		*val_u = val_raw * param->step_u + param->min_u;
+	smblib_dbg(chg, PR_REGISTER, "%s = %d (0x%02x)\n",
+		   param->name, *val_u, val_raw);
+
+	return rc;
+}
+
+int smblib_get_usb_suspend(struct smb_charger *chg, int *suspend)
+{
+	int rc = 0;
+	u8 temp;
+
+	rc = smblib_read(chg, USBIN_CMD_IL_REG, &temp);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read USBIN_CMD_IL rc=%d\n", rc);
+		return rc;
+	}
+	*suspend = temp & USBIN_SUSPEND_BIT;
+
+	return rc;
+}
+
+struct apsd_result {
+	const char * const name;
+	const u8 bit;
+	const enum power_supply_type pst;
+};
+
+enum {
+	UNKNOWN,
+	SDP,
+	CDP,
+	DCP,
+	OCP,
+	FLOAT,
+	HVDCP2,
+	HVDCP3,
+	MAX_TYPES
+};
+
+static const struct apsd_result const smblib_apsd_results[] = {
+	[UNKNOWN] = {
+		.name	= "UNKNOWN",
+		.bit	= 0,
+		.pst	= POWER_SUPPLY_TYPE_UNKNOWN
+	},
+	[SDP] = {
+		.name	= "SDP",
+		.bit	= SDP_CHARGER_BIT,
+		.pst	= POWER_SUPPLY_TYPE_USB
+	},
+	[CDP] = {
+		.name	= "CDP",
+		.bit	= CDP_CHARGER_BIT,
+		.pst	= POWER_SUPPLY_TYPE_USB_CDP
+	},
+	[DCP] = {
+		.name	= "DCP",
+		.bit	= DCP_CHARGER_BIT,
+		.pst	= POWER_SUPPLY_TYPE_USB_DCP
+	},
+	[OCP] = {
+		.name	= "OCP",
+		.bit	= OCP_CHARGER_BIT,
+		.pst	= POWER_SUPPLY_TYPE_USB_DCP
+	},
+	[FLOAT] = {
+		.name	= "FLOAT",
+		.bit	= FLOAT_CHARGER_BIT,
+		.pst	= POWER_SUPPLY_TYPE_USB_DCP
+	},
+	[HVDCP2] = {
+		.name	= "HVDCP2",
+		.bit	= DCP_CHARGER_BIT | QC_2P0_BIT,
+		.pst	= POWER_SUPPLY_TYPE_USB_HVDCP
+	},
+	[HVDCP3] = {
+		.name	= "HVDCP3",
+		.bit	= DCP_CHARGER_BIT | QC_3P0_BIT,
+		.pst	= POWER_SUPPLY_TYPE_USB_HVDCP_3,
+	},
+};
+
+static const struct apsd_result *smblib_get_apsd_result(struct smb_charger *chg)
+{
+	int rc, i;
+	u8 apsd_stat, stat;
+	const struct apsd_result *result = &smblib_apsd_results[UNKNOWN];
+
+	rc = smblib_read(chg, APSD_STATUS_REG, &apsd_stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read APSD_STATUS rc=%d\n", rc);
+		return result;
+	}
+	smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", apsd_stat);
+
+	if (!(apsd_stat & APSD_DTC_STATUS_DONE_BIT))
+		return result;
+
+	rc = smblib_read(chg, APSD_RESULT_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read APSD_RESULT_STATUS rc=%d\n",
+			rc);
+		return result;
+	}
+	stat &= APSD_RESULT_STATUS_MASK;
+
+	for (i = 0; i < ARRAY_SIZE(smblib_apsd_results); i++) {
+		if (smblib_apsd_results[i].bit == stat)
+			result = &smblib_apsd_results[i];
+	}
+
+	if (apsd_stat & QC_CHARGER_BIT) {
+		/* since its a qc_charger, either return HVDCP3 or HVDCP2 */
+		if (result != &smblib_apsd_results[HVDCP3])
+			result = &smblib_apsd_results[HVDCP2];
+	}
+
+	return result;
+}
+
+/********************
+ * REGISTER SETTERS *
+ ********************/
+
+static int chg_freq_list[] = {
+	9600, 9600, 6400, 4800, 3800, 3200, 2700, 2400, 2100, 1900, 1700,
+	1600, 1500, 1400, 1300, 1200,
+};
+
+int smblib_set_chg_freq(struct smb_chg_param *param,
+				int val_u, u8 *val_raw)
+{
+	u8 i;
+
+	if (val_u > param->max_u || val_u < param->min_u)
+		return -EINVAL;
+
+	/* Charger FSW is the configured freqency / 2 */
+	val_u *= 2;
+	for (i = 0; i < ARRAY_SIZE(chg_freq_list); i++) {
+		if (chg_freq_list[i] == val_u)
+			break;
+	}
+	if (i == ARRAY_SIZE(chg_freq_list)) {
+		pr_err("Invalid frequency %d Hz\n", val_u / 2);
+		return -EINVAL;
+	}
+
+	*val_raw = i;
+
+	return 0;
+}
+
+static int smblib_set_opt_freq_buck(struct smb_charger *chg, int fsw_khz)
+{
+	union power_supply_propval pval = {0, };
+	int rc = 0;
+
+	rc = smblib_set_charge_param(chg, &chg->param.freq_buck, fsw_khz);
+	if (rc < 0)
+		dev_err(chg->dev, "Error in setting freq_buck rc=%d\n", rc);
+
+	if (chg->mode == PARALLEL_MASTER && chg->pl.psy) {
+		pval.intval = fsw_khz;
+		/*
+		 * Some parallel charging implementations may not have
+		 * PROP_BUCK_FREQ property - they could be running
+		 * with a fixed frequency
+		 */
+		power_supply_set_property(chg->pl.psy,
+				POWER_SUPPLY_PROP_BUCK_FREQ, &pval);
+	}
+
+	return rc;
+}
+
+int smblib_set_charge_param(struct smb_charger *chg,
+			    struct smb_chg_param *param, int val_u)
+{
+	int rc = 0;
+	u8 val_raw;
+
+	if (param->set_proc) {
+		rc = param->set_proc(param, val_u, &val_raw);
+		if (rc < 0)
+			return -EINVAL;
+	} else {
+		if (val_u > param->max_u || val_u < param->min_u) {
+			smblib_err(chg, "%s: %d is out of range [%d, %d]\n",
+				param->name, val_u, param->min_u, param->max_u);
+			return -EINVAL;
+		}
+
+		val_raw = (val_u - param->min_u) / param->step_u;
+	}
+
+	rc = smblib_write(chg, param->reg, val_raw);
+	if (rc < 0) {
+		smblib_err(chg, "%s: Couldn't write 0x%02x to 0x%04x rc=%d\n",
+			param->name, val_raw, param->reg, rc);
+		return rc;
+	}
+
+	smblib_dbg(chg, PR_REGISTER, "%s = %d (0x%02x)\n",
+		   param->name, val_u, val_raw);
+
+	return rc;
+}
+
+static int step_charge_soc_update(struct smb_charger *chg, int capacity)
+{
+	int rc = 0;
+
+	rc = smblib_set_charge_param(chg, &chg->param.step_soc, capacity);
+	if (rc < 0) {
+		smblib_err(chg, "Error in updating soc, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smblib_write(chg, STEP_CHG_SOC_VBATT_V_UPDATE_REG,
+			STEP_CHG_SOC_VBATT_V_UPDATE_BIT);
+	if (rc < 0) {
+		smblib_err(chg,
+			"Couldn't set STEP_CHG_SOC_VBATT_V_UPDATE_REG rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+int smblib_set_usb_suspend(struct smb_charger *chg, bool suspend)
+{
+	int rc = 0;
+
+	rc = smblib_masked_write(chg, USBIN_CMD_IL_REG, USBIN_SUSPEND_BIT,
+				 suspend ? USBIN_SUSPEND_BIT : 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't write %s to USBIN_SUSPEND_BIT rc=%d\n",
+			suspend ? "suspend" : "resume", rc);
+
+	return rc;
+}
+
+int smblib_set_dc_suspend(struct smb_charger *chg, bool suspend)
+{
+	int rc = 0;
+
+	rc = smblib_masked_write(chg, DCIN_CMD_IL_REG, DCIN_SUSPEND_BIT,
+				 suspend ? DCIN_SUSPEND_BIT : 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't write %s to DCIN_SUSPEND_BIT rc=%d\n",
+			suspend ? "suspend" : "resume", rc);
+
+	return rc;
+}
+
+static int smblib_set_adapter_allowance(struct smb_charger *chg,
+					u8 allowed_voltage)
+{
+	int rc = 0;
+
+	switch (allowed_voltage) {
+	case USBIN_ADAPTER_ALLOW_12V:
+	case USBIN_ADAPTER_ALLOW_5V_OR_12V:
+	case USBIN_ADAPTER_ALLOW_9V_TO_12V:
+	case USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V:
+	case USBIN_ADAPTER_ALLOW_5V_TO_12V:
+		/* PM660 only support max. 9V */
+		if (chg->smb_version == PM660_SUBTYPE) {
+			smblib_dbg(chg, PR_MISC, "voltage not supported=%d\n",
+					allowed_voltage);
+			allowed_voltage = USBIN_ADAPTER_ALLOW_5V_TO_9V;
+		}
+		break;
+	}
+
+	rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_CFG_REG, allowed_voltage);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't write 0x%02x to USBIN_ADAPTER_ALLOW_CFG rc=%d\n",
+			allowed_voltage, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+#define MICRO_5V	5000000
+#define MICRO_9V	9000000
+#define MICRO_12V	12000000
+static int smblib_set_usb_pd_allowed_voltage(struct smb_charger *chg,
+					int min_allowed_uv, int max_allowed_uv)
+{
+	int rc;
+	u8 allowed_voltage;
+
+	if (min_allowed_uv == MICRO_5V && max_allowed_uv == MICRO_5V) {
+		allowed_voltage = USBIN_ADAPTER_ALLOW_5V;
+		smblib_set_opt_freq_buck(chg, chg->chg_freq.freq_5V);
+	} else if (min_allowed_uv == MICRO_9V && max_allowed_uv == MICRO_9V) {
+		allowed_voltage = USBIN_ADAPTER_ALLOW_9V;
+		smblib_set_opt_freq_buck(chg, chg->chg_freq.freq_9V);
+	} else if (min_allowed_uv == MICRO_12V && max_allowed_uv == MICRO_12V) {
+		allowed_voltage = USBIN_ADAPTER_ALLOW_12V;
+		smblib_set_opt_freq_buck(chg, chg->chg_freq.freq_12V);
+	} else if (min_allowed_uv < MICRO_9V && max_allowed_uv <= MICRO_9V) {
+		allowed_voltage = USBIN_ADAPTER_ALLOW_5V_TO_9V;
+	} else if (min_allowed_uv < MICRO_9V && max_allowed_uv <= MICRO_12V) {
+		allowed_voltage = USBIN_ADAPTER_ALLOW_5V_TO_12V;
+	} else if (min_allowed_uv < MICRO_12V && max_allowed_uv <= MICRO_12V) {
+		allowed_voltage = USBIN_ADAPTER_ALLOW_9V_TO_12V;
+	} else {
+		smblib_err(chg, "invalid allowed voltage [%d, %d]\n",
+			min_allowed_uv, max_allowed_uv);
+		return -EINVAL;
+	}
+
+	rc = smblib_set_adapter_allowance(chg, allowed_voltage);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't configure adapter allowance rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+/********************
+ * HELPER FUNCTIONS *
+ ********************/
+
+static int try_rerun_apsd_for_hvdcp(struct smb_charger *chg)
+{
+	const struct apsd_result *apsd_result;
+
+	/*
+	 * PD_INACTIVE_VOTER on hvdcp_disable_votable indicates whether
+	 * apsd rerun was tried earlier
+	 */
+	if (get_client_vote(chg->hvdcp_disable_votable_indirect,
+						PD_INACTIVE_VOTER)) {
+		vote(chg->hvdcp_disable_votable_indirect,
+				PD_INACTIVE_VOTER, false, 0);
+		/* ensure hvdcp is enabled */
+		if (!get_effective_result(
+				chg->hvdcp_disable_votable_indirect)) {
+			apsd_result = smblib_get_apsd_result(chg);
+			if (apsd_result->bit & (QC_2P0_BIT | QC_3P0_BIT)) {
+				/* rerun APSD */
+				smblib_dbg(chg, PR_MISC, "rerun APSD\n");
+				smblib_masked_write(chg, CMD_APSD_REG,
+						APSD_RERUN_BIT,
+						APSD_RERUN_BIT);
+			}
+		}
+	}
+	return 0;
+}
+
+static const struct apsd_result *smblib_update_usb_type(struct smb_charger *chg)
+{
+	const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
+
+	/* if PD is active, APSD is disabled so won't have a valid result */
+	if (chg->pd_active) {
+		chg->usb_psy_desc.type = POWER_SUPPLY_TYPE_USB_PD;
+		return apsd_result;
+	}
+
+	chg->usb_psy_desc.type = apsd_result->pst;
+	return apsd_result;
+}
+
+static int smblib_notifier_call(struct notifier_block *nb,
+		unsigned long ev, void *v)
+{
+	struct power_supply *psy = v;
+	struct smb_charger *chg = container_of(nb, struct smb_charger, nb);
+
+	if (!strcmp(psy->desc->name, "bms")) {
+		if (!chg->bms_psy)
+			chg->bms_psy = psy;
+		if (ev == PSY_EVENT_PROP_CHANGED)
+			schedule_work(&chg->bms_update_work);
+	}
+
+	if (!chg->pl.psy && !strcmp(psy->desc->name, "parallel"))
+		chg->pl.psy = psy;
+
+	return NOTIFY_OK;
+}
+
+static int smblib_register_notifier(struct smb_charger *chg)
+{
+	int rc;
+
+	chg->nb.notifier_call = smblib_notifier_call;
+	rc = power_supply_reg_notifier(&chg->nb);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't register psy notifier rc = %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+int smblib_mapping_soc_from_field_value(struct smb_chg_param *param,
+					     int val_u, u8 *val_raw)
+{
+	if (val_u > param->max_u || val_u < param->min_u)
+		return -EINVAL;
+
+	*val_raw = val_u << 1;
+
+	return 0;
+}
+
+int smblib_mapping_cc_delta_to_field_value(struct smb_chg_param *param,
+					   u8 val_raw)
+{
+	int val_u  = val_raw * param->step_u + param->min_u;
+
+	if (val_u > param->max_u)
+		val_u -= param->max_u * 2;
+
+	return val_u;
+}
+
+int smblib_mapping_cc_delta_from_field_value(struct smb_chg_param *param,
+					     int val_u, u8 *val_raw)
+{
+	if (val_u > param->max_u || val_u < param->min_u - param->max_u)
+		return -EINVAL;
+
+	val_u += param->max_u * 2 - param->min_u;
+	val_u %= param->max_u * 2;
+	*val_raw = val_u / param->step_u;
+
+	return 0;
+}
+
+static void smblib_uusb_removal(struct smb_charger *chg)
+{
+	int rc;
+
+	/* reset both usbin current and voltage votes */
+	vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+	vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
+	vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, true, 0);
+
+	cancel_delayed_work_sync(&chg->hvdcp_detect_work);
+
+	if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
+		/* re-enable AUTH_IRQ_EN_CFG_BIT */
+		rc = smblib_masked_write(chg,
+				USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+				AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
+		if (rc < 0)
+			smblib_err(chg,
+				"Couldn't enable QC auth setting rc=%d\n", rc);
+	}
+
+	/* reconfigure allowed voltage for HVDCP */
+	rc = smblib_set_adapter_allowance(chg,
+			USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n",
+			rc);
+
+	chg->voltage_min_uv = MICRO_5V;
+	chg->voltage_max_uv = MICRO_5V;
+	chg->usb_icl_delta_ua = 0;
+	chg->pulse_cnt = 0;
+
+	/* clear USB ICL vote for USB_PSY_VOTER */
+	rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't un-vote for USB ICL rc=%d\n", rc);
+
+	/* clear USB ICL vote for DCP_VOTER */
+	rc = vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
+	if (rc < 0)
+		smblib_err(chg,
+			"Couldn't un-vote DCP from USB ICL rc=%d\n", rc);
+
+	/* clear USB ICL vote for PL_USBIN_USBIN_VOTER */
+	rc = vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
+	if (rc < 0)
+		smblib_err(chg,
+			"Couldn't un-vote PL_USBIN_USBIN from USB ICL rc=%d\n",
+			rc);
+}
+
+static bool smblib_sysok_reason_usbin(struct smb_charger *chg)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, SYSOK_REASON_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get SYSOK_REASON_STATUS rc=%d\n", rc);
+		/* assuming 'not usbin' in case of read failure */
+		return false;
+	}
+
+	return stat & SYSOK_REASON_USBIN_BIT;
+}
+
+void smblib_suspend_on_debug_battery(struct smb_charger *chg)
+{
+	int rc;
+	union power_supply_propval val;
+
+	if (!chg->suspend_input_on_debug_batt)
+		return;
+
+	rc = power_supply_get_property(chg->bms_psy,
+			POWER_SUPPLY_PROP_DEBUG_BATTERY, &val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get debug battery prop rc=%d\n", rc);
+		return;
+	}
+
+	vote(chg->usb_icl_votable, DEBUG_BOARD_VOTER, val.intval, 0);
+	vote(chg->dc_suspend_votable, DEBUG_BOARD_VOTER, val.intval, 0);
+	if (val.intval)
+		pr_info("Input suspended: Fake battery\n");
+}
+
+int smblib_rerun_apsd_if_required(struct smb_charger *chg)
+{
+	const struct apsd_result *apsd_result;
+	union power_supply_propval val;
+	int rc;
+
+	rc = smblib_get_prop_usb_present(chg, &val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get usb present rc = %d\n", rc);
+		return rc;
+	}
+
+	if (!val.intval)
+		return 0;
+
+	apsd_result = smblib_get_apsd_result(chg);
+	if ((apsd_result->pst == POWER_SUPPLY_TYPE_UNKNOWN)
+		|| (apsd_result->pst == POWER_SUPPLY_TYPE_USB)) {
+		/* rerun APSD */
+		pr_info("Reruning APSD type = %s at bootup\n",
+				apsd_result->name);
+		rc = smblib_masked_write(chg, CMD_APSD_REG,
+					APSD_RERUN_BIT,
+					APSD_RERUN_BIT);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't rerun APSD rc = %d\n", rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int smblib_get_pulse_cnt(struct smb_charger *chg, int *count)
+{
+	int rc;
+	u8 val[2];
+
+	switch (chg->smb_version) {
+	case PMI8998_SUBTYPE:
+		rc = smblib_read(chg, QC_PULSE_COUNT_STATUS_REG, val);
+		if (rc) {
+			pr_err("failed to read QC_PULSE_COUNT_STATUS_REG rc=%d\n",
+					rc);
+			return rc;
+		}
+		*count = val[0] & QC_PULSE_COUNT_MASK;
+		break;
+	case PM660_SUBTYPE:
+		rc = smblib_multibyte_read(chg,
+				QC_PULSE_COUNT_STATUS_1_REG, val, 2);
+		if (rc) {
+			pr_err("failed to read QC_PULSE_COUNT_STATUS_1_REG rc=%d\n",
+					rc);
+			return rc;
+		}
+		*count = (val[1] << 8) | val[0];
+		break;
+	default:
+		smblib_dbg(chg, PR_PARALLEL, "unknown SMB chip %d\n",
+				chg->smb_version);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*********************
+ * VOTABLE CALLBACKS *
+ *********************/
+
+static int smblib_dc_suspend_vote_callback(struct votable *votable, void *data,
+			int suspend, const char *client)
+{
+	struct smb_charger *chg = data;
+
+	/* resume input if suspend is invalid */
+	if (suspend < 0)
+		suspend = 0;
+
+	return smblib_set_dc_suspend(chg, (bool)suspend);
+}
+
+#define USBIN_25MA	25000
+#define USBIN_100MA	100000
+#define USBIN_150MA	150000
+#define USBIN_500MA	500000
+#define USBIN_900MA	900000
+
+
+static int set_sdp_current(struct smb_charger *chg, int icl_ua)
+{
+	int rc;
+	u8 icl_options;
+
+	/* power source is SDP */
+	switch (icl_ua) {
+	case USBIN_100MA:
+		/* USB 2.0 100mA */
+		icl_options = 0;
+		break;
+	case USBIN_150MA:
+		/* USB 3.0 150mA */
+		icl_options = CFG_USB3P0_SEL_BIT;
+		break;
+	case USBIN_500MA:
+		/* USB 2.0 500mA */
+		icl_options = USB51_MODE_BIT;
+		break;
+	case USBIN_900MA:
+		/* USB 3.0 900mA */
+		icl_options = CFG_USB3P0_SEL_BIT | USB51_MODE_BIT;
+		break;
+	default:
+		smblib_err(chg, "ICL %duA isn't supported for SDP\n", icl_ua);
+		return -EINVAL;
+	}
+
+	rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
+		CFG_USB3P0_SEL_BIT | USB51_MODE_BIT, icl_options);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set ICL options rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int smblib_usb_icl_vote_callback(struct votable *votable, void *data,
+			int icl_ua, const char *client)
+{
+	struct smb_charger *chg = data;
+	int rc = 0;
+	bool override;
+	union power_supply_propval pval;
+
+	/* suspend and return if 25mA or less is requested */
+	if (client && (icl_ua < USBIN_25MA))
+		return smblib_set_usb_suspend(chg, true);
+
+	disable_irq_nosync(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq);
+	if (!client)
+		goto override_suspend_config;
+
+	rc = smblib_get_prop_typec_mode(chg, &pval);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get typeC mode rc = %d\n", rc);
+		goto enable_icl_changed_interrupt;
+	}
+
+	/* configure current */
+	if (pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
+		&& (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB)) {
+		rc = set_sdp_current(chg, icl_ua);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't set SDP ICL rc=%d\n", rc);
+			goto enable_icl_changed_interrupt;
+		}
+	} else {
+		rc = smblib_set_charge_param(chg, &chg->param.usb_icl,
+				icl_ua - chg->icl_reduction_ua);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't set HC ICL rc=%d\n", rc);
+			goto enable_icl_changed_interrupt;
+		}
+	}
+
+override_suspend_config:
+	/* determine if override needs to be enforced */
+	override = true;
+	if (client == NULL) {
+		/* remove override if no voters - hw defaults is desired */
+		override = false;
+	} else if (pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) {
+		if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB)
+			/* For std cable with type = SDP never override */
+			override = false;
+		else if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB_CDP
+			&& icl_ua - chg->icl_reduction_ua == 1500000)
+			/*
+			 * For std cable with type = CDP override only if
+			 * current is not 1500mA
+			 */
+			override = false;
+	}
+
+	/* enforce override */
+	rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
+		USBIN_MODE_CHG_BIT, override ? USBIN_MODE_CHG_BIT : 0);
+
+	rc = smblib_icl_override(chg, override);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set ICL override rc=%d\n", rc);
+		goto enable_icl_changed_interrupt;
+	}
+
+	/* unsuspend after configuring current and override */
+	rc = smblib_set_usb_suspend(chg, false);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't resume input rc=%d\n", rc);
+		goto enable_icl_changed_interrupt;
+	}
+
+enable_icl_changed_interrupt:
+	enable_irq(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq);
+	return rc;
+}
+
+static int smblib_dc_icl_vote_callback(struct votable *votable, void *data,
+			int icl_ua, const char *client)
+{
+	struct smb_charger *chg = data;
+	int rc = 0;
+	bool suspend;
+
+	if (icl_ua < 0) {
+		smblib_dbg(chg, PR_MISC, "No Voter hence suspending\n");
+		icl_ua = 0;
+	}
+
+	suspend = (icl_ua < USBIN_25MA);
+	if (suspend)
+		goto suspend;
+
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl, icl_ua);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set DC input current limit rc=%d\n",
+			rc);
+		return rc;
+	}
+
+suspend:
+	rc = vote(chg->dc_suspend_votable, USER_VOTER, suspend, 0);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't vote to %s DC rc=%d\n",
+			suspend ? "suspend" : "resume", rc);
+		return rc;
+	}
+	return rc;
+}
+
+static int smblib_pd_disallowed_votable_indirect_callback(
+	struct votable *votable, void *data, int disallowed, const char *client)
+{
+	struct smb_charger *chg = data;
+	int rc;
+
+	rc = vote(chg->pd_allowed_votable, PD_DISALLOWED_INDIRECT_VOTER,
+		!disallowed, 0);
+
+	return rc;
+}
+
+static int smblib_awake_vote_callback(struct votable *votable, void *data,
+			int awake, const char *client)
+{
+	struct smb_charger *chg = data;
+
+	if (awake)
+		pm_stay_awake(chg->dev);
+	else
+		pm_relax(chg->dev);
+
+	return 0;
+}
+
+static int smblib_chg_disable_vote_callback(struct votable *votable, void *data,
+			int chg_disable, const char *client)
+{
+	struct smb_charger *chg = data;
+	int rc;
+
+	rc = smblib_masked_write(chg, CHARGING_ENABLE_CMD_REG,
+				 CHARGING_ENABLE_CMD_BIT,
+				 chg_disable ? 0 : CHARGING_ENABLE_CMD_BIT);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't %s charging rc=%d\n",
+			chg_disable ? "disable" : "enable", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int smblib_pl_enable_indirect_vote_callback(struct votable *votable,
+			void *data, int chg_enable, const char *client)
+{
+	struct smb_charger *chg = data;
+
+	vote(chg->pl_disable_votable, PL_INDIRECT_VOTER, !chg_enable, 0);
+
+	return 0;
+}
+
+static int smblib_hvdcp_enable_vote_callback(struct votable *votable,
+			void *data,
+			int hvdcp_enable, const char *client)
+{
+	struct smb_charger *chg = data;
+	int rc;
+	u8 val = HVDCP_AUTH_ALG_EN_CFG_BIT | HVDCP_EN_BIT;
+
+	/* vote to enable/disable HW autonomous INOV */
+	vote(chg->hvdcp_hw_inov_dis_votable, client, !hvdcp_enable, 0);
+
+	/*
+	 * Disable the autonomous bit and auth bit for disabling hvdcp.
+	 * This ensures only qc 2.0 detection runs but no vbus
+	 * negotiation happens.
+	 */
+	if (!hvdcp_enable)
+		val = HVDCP_EN_BIT;
+
+	rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+				 HVDCP_EN_BIT | HVDCP_AUTH_ALG_EN_CFG_BIT,
+				 val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't %s hvdcp rc=%d\n",
+			hvdcp_enable ? "enable" : "disable", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int smblib_hvdcp_disable_indirect_vote_callback(struct votable *votable,
+			void *data, int hvdcp_disable, const char *client)
+{
+	struct smb_charger *chg = data;
+
+	vote(chg->hvdcp_enable_votable, HVDCP_INDIRECT_VOTER,
+			!hvdcp_disable, 0);
+
+	return 0;
+}
+
+static int smblib_apsd_disable_vote_callback(struct votable *votable,
+			void *data,
+			int apsd_disable, const char *client)
+{
+	struct smb_charger *chg = data;
+	int rc;
+
+	if (apsd_disable) {
+		/* Don't run APSD on CC debounce when APSD is disabled */
+		rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
+							APSD_START_ON_CC_BIT,
+							0);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n",
+									rc);
+			return rc;
+		}
+
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+							AUTO_SRC_DETECT_BIT,
+							0);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't disable APSD rc=%d\n", rc);
+			return rc;
+		}
+	} else {
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+							AUTO_SRC_DETECT_BIT,
+							AUTO_SRC_DETECT_BIT);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't enable APSD rc=%d\n", rc);
+			return rc;
+		}
+
+		rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
+							APSD_START_ON_CC_BIT,
+							APSD_START_ON_CC_BIT);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't enable APSD_START_ON_CC rc=%d\n",
+									rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int smblib_hvdcp_hw_inov_dis_vote_callback(struct votable *votable,
+				void *data, int disable, const char *client)
+{
+	struct smb_charger *chg = data;
+	int rc;
+
+	if (disable) {
+		/*
+		 * the pulse count register get zeroed when autonomous mode is
+		 * disabled. Track that in variables before disabling
+		 */
+		rc = smblib_get_pulse_cnt(chg, &chg->pulse_cnt);
+		if (rc < 0) {
+			pr_err("failed to read QC_PULSE_COUNT_STATUS_REG rc=%d\n",
+					rc);
+			return rc;
+		}
+	}
+
+	rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+			HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT,
+			disable ? 0 : HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't %s hvdcp rc=%d\n",
+				disable ? "disable" : "enable", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+/*******************
+ * VCONN REGULATOR *
+ * *****************/
+
+#define MAX_OTG_SS_TRIES 2
+static int _smblib_vconn_regulator_enable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	u8 otg_stat, stat4;
+	int rc = 0, i;
+
+	if (!chg->external_vconn) {
+		/*
+		 * Hardware based OTG soft start should complete within 1ms, so
+		 * wait for 2ms in the worst case.
+		 */
+		for (i = 0; i < MAX_OTG_SS_TRIES; ++i) {
+			usleep_range(1000, 1100);
+			rc = smblib_read(chg, OTG_STATUS_REG, &otg_stat);
+			if (rc < 0) {
+				smblib_err(chg, "Couldn't read OTG status rc=%d\n",
+									rc);
+				return rc;
+			}
+
+			if (otg_stat & BOOST_SOFTSTART_DONE_BIT)
+				break;
+		}
+
+		if (!(otg_stat & BOOST_SOFTSTART_DONE_BIT)) {
+			smblib_err(chg, "Couldn't enable VCONN; OTG soft start failed\n");
+			return -EAGAIN;
+		}
+	}
+
+	/*
+	 * VCONN_EN_ORIENTATION is overloaded with overriding the CC pin used
+	 * for Vconn, and it should be set with reverse polarity of CC_OUT.
+	 */
+	rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat4);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+		return rc;
+	}
+
+	smblib_dbg(chg, PR_OTG, "enabling VCONN\n");
+	stat4 = stat4 & CC_ORIENTATION_BIT ? 0 : VCONN_EN_ORIENTATION_BIT;
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 VCONN_EN_VALUE_BIT | VCONN_EN_ORIENTATION_BIT,
+				 VCONN_EN_VALUE_BIT | stat4);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't enable vconn setting rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+int smblib_vconn_regulator_enable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	mutex_lock(&chg->otg_oc_lock);
+	if (chg->vconn_en)
+		goto unlock;
+
+	rc = _smblib_vconn_regulator_enable(rdev);
+	if (rc >= 0)
+		chg->vconn_en = true;
+
+unlock:
+	mutex_unlock(&chg->otg_oc_lock);
+	return rc;
+}
+
+static int _smblib_vconn_regulator_disable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	smblib_dbg(chg, PR_OTG, "disabling VCONN\n");
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 VCONN_EN_VALUE_BIT, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't disable vconn regulator rc=%d\n", rc);
+
+	return rc;
+}
+
+int smblib_vconn_regulator_disable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	mutex_lock(&chg->otg_oc_lock);
+	if (!chg->vconn_en)
+		goto unlock;
+
+	rc = _smblib_vconn_regulator_disable(rdev);
+	if (rc >= 0)
+		chg->vconn_en = false;
+
+unlock:
+	mutex_unlock(&chg->otg_oc_lock);
+	return rc;
+}
+
+int smblib_vconn_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int ret;
+
+	mutex_lock(&chg->otg_oc_lock);
+	ret = chg->vconn_en;
+	mutex_unlock(&chg->otg_oc_lock);
+	return ret;
+}
+
+/*****************
+ * OTG REGULATOR *
+ *****************/
+
+static int _smblib_vbus_regulator_enable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int rc;
+
+	smblib_dbg(chg, PR_OTG, "halt 1 in 8 mode\n");
+	rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
+				 ENG_BUCKBOOST_HALT1_8_MODE_BIT,
+				 ENG_BUCKBOOST_HALT1_8_MODE_BIT);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set OTG_ENG_OTG_CFG_REG rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	smblib_dbg(chg, PR_OTG, "enabling OTG\n");
+	rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't enable OTG regulator rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+int smblib_vbus_regulator_enable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	mutex_lock(&chg->otg_oc_lock);
+	if (chg->otg_en)
+		goto unlock;
+
+	rc = _smblib_vbus_regulator_enable(rdev);
+	if (rc >= 0)
+		chg->otg_en = true;
+
+unlock:
+	mutex_unlock(&chg->otg_oc_lock);
+	return rc;
+}
+
+static int _smblib_vbus_regulator_disable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int rc;
+
+	if (!chg->external_vconn && chg->vconn_en) {
+		smblib_dbg(chg, PR_OTG, "Killing VCONN before disabling OTG\n");
+		rc = _smblib_vconn_regulator_disable(rdev);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't disable VCONN rc=%d\n", rc);
+	}
+
+	smblib_dbg(chg, PR_OTG, "disabling OTG\n");
+	rc = smblib_write(chg, CMD_OTG_REG, 0);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't disable OTG regulator rc=%d\n", rc);
+		return rc;
+	}
+
+	smblib_dbg(chg, PR_OTG, "start 1 in 8 mode\n");
+	rc = smblib_write(chg, CMD_OTG_REG, 0);
+	rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
+				 ENG_BUCKBOOST_HALT1_8_MODE_BIT, 0);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set OTG_ENG_OTG_CFG_REG rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+int smblib_vbus_regulator_disable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	mutex_lock(&chg->otg_oc_lock);
+	if (!chg->otg_en)
+		goto unlock;
+
+	rc = _smblib_vbus_regulator_disable(rdev);
+	if (rc >= 0)
+		chg->otg_en = false;
+
+unlock:
+	mutex_unlock(&chg->otg_oc_lock);
+	return rc;
+}
+
+int smblib_vbus_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int ret;
+
+	mutex_lock(&chg->otg_oc_lock);
+	ret = chg->otg_en;
+	mutex_unlock(&chg->otg_oc_lock);
+	return ret;
+}
+
+/********************
+ * BATT PSY GETTERS *
+ ********************/
+
+int smblib_get_prop_input_suspend(struct smb_charger *chg,
+				  union power_supply_propval *val)
+{
+	val->intval
+		= (get_client_vote(chg->usb_icl_votable, USER_VOTER) == 0)
+		 && get_client_vote(chg->dc_suspend_votable, USER_VOTER);
+	return 0;
+}
+
+int smblib_get_prop_batt_present(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, BATIF_BASE + INT_RT_STS_OFFSET, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATIF_INT_RT_STS rc=%d\n", rc);
+		return rc;
+	}
+
+	val->intval = !(stat & (BAT_THERM_OR_ID_MISSING_RT_STS_BIT
+					| BAT_TERMINAL_MISSING_RT_STS_BIT));
+
+	return rc;
+}
+
+int smblib_get_prop_batt_capacity(struct smb_charger *chg,
+				  union power_supply_propval *val)
+{
+	int rc = -EINVAL;
+
+	if (chg->fake_capacity >= 0) {
+		val->intval = chg->fake_capacity;
+		return 0;
+	}
+
+	if (chg->bms_psy)
+		rc = power_supply_get_property(chg->bms_psy,
+				POWER_SUPPLY_PROP_CAPACITY, val);
+	return rc;
+}
+
+int smblib_get_prop_batt_status(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	union power_supply_propval pval = {0, };
+	bool usb_online, dc_online;
+	u8 stat;
+	int rc;
+
+	rc = smblib_get_prop_usb_online(chg, &pval);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get usb online property rc=%d\n",
+			rc);
+		return rc;
+	}
+	usb_online = (bool)pval.intval;
+
+	rc = smblib_get_prop_dc_online(chg, &pval);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get dc online property rc=%d\n",
+			rc);
+		return rc;
+	}
+	dc_online = (bool)pval.intval;
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+			rc);
+		return rc;
+	}
+	stat = stat & BATTERY_CHARGER_STATUS_MASK;
+
+	if (!usb_online && !dc_online) {
+		switch (stat) {
+		case TERMINATE_CHARGE:
+		case INHIBIT_CHARGE:
+			val->intval = POWER_SUPPLY_STATUS_FULL;
+			break;
+		default:
+			val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+			break;
+		}
+		return rc;
+	}
+
+	switch (stat) {
+	case TRICKLE_CHARGE:
+	case PRE_CHARGE:
+	case FAST_CHARGE:
+	case FULLON_CHARGE:
+	case TAPER_CHARGE:
+		val->intval = POWER_SUPPLY_STATUS_CHARGING;
+		break;
+	case TERMINATE_CHARGE:
+	case INHIBIT_CHARGE:
+		val->intval = POWER_SUPPLY_STATUS_FULL;
+		break;
+	case DISABLE_CHARGE:
+		val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+		break;
+	default:
+		val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+		break;
+	}
+
+	return 0;
+}
+
+int smblib_get_prop_batt_charge_type(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	switch (stat & BATTERY_CHARGER_STATUS_MASK) {
+	case TRICKLE_CHARGE:
+	case PRE_CHARGE:
+		val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+		break;
+	case FAST_CHARGE:
+	case FULLON_CHARGE:
+		val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
+		break;
+	case TAPER_CHARGE:
+		val->intval = POWER_SUPPLY_CHARGE_TYPE_TAPER;
+		break;
+	default:
+		val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+	}
+
+	return rc;
+}
+
+int smblib_get_prop_batt_health(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	union power_supply_propval pval;
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_2_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
+			rc);
+		return rc;
+	}
+	smblib_dbg(chg, PR_REGISTER, "BATTERY_CHARGER_STATUS_2 = 0x%02x\n",
+		   stat);
+
+	if (stat & CHARGER_ERROR_STATUS_BAT_OV_BIT) {
+		rc = smblib_get_prop_batt_voltage_now(chg, &pval);
+		if (!rc) {
+			/*
+			 * If Vbatt is within 40mV above Vfloat, then don't
+			 * treat it as overvoltage.
+			 */
+			if (pval.intval >=
+				get_effective_result(chg->fv_votable) + 40000) {
+				val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+				smblib_err(chg, "battery over-voltage\n");
+				goto done;
+			}
+		}
+	}
+
+	if (stat & BAT_TEMP_STATUS_TOO_COLD_BIT)
+		val->intval = POWER_SUPPLY_HEALTH_COLD;
+	else if (stat & BAT_TEMP_STATUS_TOO_HOT_BIT)
+		val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+	else if (stat & BAT_TEMP_STATUS_COLD_SOFT_LIMIT_BIT)
+		val->intval = POWER_SUPPLY_HEALTH_COOL;
+	else if (stat & BAT_TEMP_STATUS_HOT_SOFT_LIMIT_BIT)
+		val->intval = POWER_SUPPLY_HEALTH_WARM;
+	else
+		val->intval = POWER_SUPPLY_HEALTH_GOOD;
+
+done:
+	return rc;
+}
+
+int smblib_get_prop_system_temp_level(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	val->intval = chg->system_temp_level;
+	return 0;
+}
+
+int smblib_get_prop_input_current_limited(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	u8 stat;
+	int rc;
+
+	rc = smblib_read(chg, AICL_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read AICL_STATUS rc=%d\n", rc);
+		return rc;
+	}
+	val->intval = (stat & SOFT_ILIMIT_BIT) || chg->is_hdc;
+	return 0;
+}
+
+int smblib_get_prop_batt_voltage_now(struct smb_charger *chg,
+				     union power_supply_propval *val)
+{
+	int rc;
+
+	if (!chg->bms_psy)
+		return -EINVAL;
+
+	rc = power_supply_get_property(chg->bms_psy,
+				       POWER_SUPPLY_PROP_VOLTAGE_NOW, val);
+	return rc;
+}
+
+int smblib_get_prop_batt_current_now(struct smb_charger *chg,
+				     union power_supply_propval *val)
+{
+	int rc;
+
+	if (!chg->bms_psy)
+		return -EINVAL;
+
+	rc = power_supply_get_property(chg->bms_psy,
+				       POWER_SUPPLY_PROP_CURRENT_NOW, val);
+	return rc;
+}
+
+int smblib_get_prop_batt_temp(struct smb_charger *chg,
+			      union power_supply_propval *val)
+{
+	int rc;
+
+	if (!chg->bms_psy)
+		return -EINVAL;
+
+	rc = power_supply_get_property(chg->bms_psy,
+				       POWER_SUPPLY_PROP_TEMP, val);
+	return rc;
+}
+
+int smblib_get_prop_step_chg_step(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	if (!chg->step_chg_enabled) {
+		val->intval = -1;
+		return 0;
+	}
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	val->intval = (stat & STEP_CHARGING_STATUS_MASK) >>
+				STEP_CHARGING_STATUS_SHIFT;
+
+	return rc;
+}
+
+int smblib_get_prop_batt_charge_done(struct smb_charger *chg,
+					union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	stat = stat & BATTERY_CHARGER_STATUS_MASK;
+	val->intval = (stat == TERMINATE_CHARGE);
+	return 0;
+}
+
+/***********************
+ * BATTERY PSY SETTERS *
+ ***********************/
+
+int smblib_set_prop_input_suspend(struct smb_charger *chg,
+				  const union power_supply_propval *val)
+{
+	int rc;
+
+	/* vote 0mA when suspended */
+	rc = vote(chg->usb_icl_votable, USER_VOTER, (bool)val->intval, 0);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't vote to %s USB rc=%d\n",
+			(bool)val->intval ? "suspend" : "resume", rc);
+		return rc;
+	}
+
+	rc = vote(chg->dc_suspend_votable, USER_VOTER, (bool)val->intval, 0);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't vote to %s DC rc=%d\n",
+			(bool)val->intval ? "suspend" : "resume", rc);
+		return rc;
+	}
+
+	power_supply_changed(chg->batt_psy);
+	return rc;
+}
+
+int smblib_set_prop_batt_capacity(struct smb_charger *chg,
+				  const union power_supply_propval *val)
+{
+	chg->fake_capacity = val->intval;
+
+	power_supply_changed(chg->batt_psy);
+
+	return 0;
+}
+
+int smblib_set_prop_system_temp_level(struct smb_charger *chg,
+				const union power_supply_propval *val)
+{
+	if (val->intval < 0)
+		return -EINVAL;
+
+	if (chg->thermal_levels <= 0)
+		return -EINVAL;
+
+	if (val->intval > chg->thermal_levels)
+		return -EINVAL;
+
+	chg->system_temp_level = val->intval;
+	if (chg->system_temp_level == chg->thermal_levels)
+		return vote(chg->chg_disable_votable,
+			THERMAL_DAEMON_VOTER, true, 0);
+
+	vote(chg->chg_disable_votable, THERMAL_DAEMON_VOTER, false, 0);
+	if (chg->system_temp_level == 0)
+		return vote(chg->fcc_votable, THERMAL_DAEMON_VOTER, false, 0);
+
+	vote(chg->fcc_votable, THERMAL_DAEMON_VOTER, true,
+			chg->thermal_mitigation[chg->system_temp_level]);
+	return 0;
+}
+
+int smblib_rerun_aicl(struct smb_charger *chg)
+{
+	int rc, settled_icl_ua;
+	u8 stat;
+
+	rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
+								rc);
+		return rc;
+	}
+
+	/* USB is suspended so skip re-running AICL */
+	if (stat & USBIN_SUSPEND_STS_BIT)
+		return rc;
+
+	smblib_dbg(chg, PR_MISC, "re-running AICL\n");
+	switch (chg->smb_version) {
+	case PMI8998_SUBTYPE:
+		rc = smblib_get_charge_param(chg, &chg->param.icl_stat,
+							&settled_icl_ua);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't get settled ICL rc=%d\n", rc);
+			return rc;
+		}
+
+		vote(chg->usb_icl_votable, AICL_RERUN_VOTER, true,
+				max(settled_icl_ua - chg->param.usb_icl.step_u,
+				chg->param.usb_icl.step_u));
+		vote(chg->usb_icl_votable, AICL_RERUN_VOTER, false, 0);
+		break;
+	case PM660_SUBTYPE:
+		/*
+		 * Use restart_AICL instead of trigger_AICL as it runs the
+		 * complete AICL instead of starting from the last settled
+		 * value.
+		 */
+		rc = smblib_masked_write(chg, CMD_HVDCP_2_REG,
+					RESTART_AICL_BIT, RESTART_AICL_BIT);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+									rc);
+		break;
+	default:
+		smblib_dbg(chg, PR_PARALLEL, "unknown SMB chip %d\n",
+				chg->smb_version);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int smblib_dp_pulse(struct smb_charger *chg)
+{
+	int rc;
+
+	/* QC 3.0 increment */
+	rc = smblib_masked_write(chg, CMD_HVDCP_2_REG, SINGLE_INCREMENT_BIT,
+			SINGLE_INCREMENT_BIT);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+				rc);
+
+	return rc;
+}
+
+static int smblib_dm_pulse(struct smb_charger *chg)
+{
+	int rc;
+
+	/* QC 3.0 decrement */
+	rc = smblib_masked_write(chg, CMD_HVDCP_2_REG, SINGLE_DECREMENT_BIT,
+			SINGLE_DECREMENT_BIT);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+				rc);
+
+	return rc;
+}
+
+int smblib_dp_dm(struct smb_charger *chg, int val)
+{
+	int target_icl_ua, rc = 0;
+
+	switch (val) {
+	case POWER_SUPPLY_DP_DM_DP_PULSE:
+		rc = smblib_dp_pulse(chg);
+		if (!rc)
+			chg->pulse_cnt++;
+		smblib_dbg(chg, PR_PARALLEL, "DP_DM_DP_PULSE rc=%d cnt=%d\n",
+				rc, chg->pulse_cnt);
+		break;
+	case POWER_SUPPLY_DP_DM_DM_PULSE:
+		rc = smblib_dm_pulse(chg);
+		if (!rc && chg->pulse_cnt)
+			chg->pulse_cnt--;
+		smblib_dbg(chg, PR_PARALLEL, "DP_DM_DM_PULSE rc=%d cnt=%d\n",
+				rc, chg->pulse_cnt);
+		break;
+	case POWER_SUPPLY_DP_DM_ICL_DOWN:
+		chg->usb_icl_delta_ua -= 100000;
+		target_icl_ua = get_effective_result(chg->usb_icl_votable);
+		vote(chg->usb_icl_votable, SW_QC3_VOTER, true,
+				target_icl_ua + chg->usb_icl_delta_ua);
+		break;
+	case POWER_SUPPLY_DP_DM_ICL_UP:
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+/*******************
+ * DC PSY GETTERS *
+ *******************/
+
+int smblib_get_prop_dc_present(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, DCIN_BASE + INT_RT_STS_OFFSET, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read DCIN_RT_STS rc=%d\n", rc);
+		return rc;
+	}
+
+	val->intval = (bool)(stat & DCIN_PLUGIN_RT_STS_BIT);
+	return 0;
+}
+
+int smblib_get_prop_dc_online(struct smb_charger *chg,
+			       union power_supply_propval *val)
+{
+	int rc = 0;
+	u8 stat;
+
+	if (get_client_vote(chg->dc_suspend_votable, USER_VOTER)) {
+		val->intval = false;
+		return rc;
+	}
+
+	rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
+			rc);
+		return rc;
+	}
+	smblib_dbg(chg, PR_REGISTER, "POWER_PATH_STATUS = 0x%02x\n",
+		   stat);
+
+	val->intval = (stat & USE_DCIN_BIT) &&
+		      (stat & VALID_INPUT_POWER_SOURCE_STS_BIT);
+
+	return rc;
+}
+
+int smblib_get_prop_dc_current_max(struct smb_charger *chg,
+				    union power_supply_propval *val)
+{
+	val->intval = get_effective_result_locked(chg->dc_icl_votable);
+	return 0;
+}
+
+/*******************
+ * DC PSY SETTERS *
+ * *****************/
+
+int smblib_set_prop_dc_current_max(struct smb_charger *chg,
+				    const union power_supply_propval *val)
+{
+	int rc;
+
+	rc = vote(chg->dc_icl_votable, USER_VOTER, true, val->intval);
+	return rc;
+}
+
+/*******************
+ * USB PSY GETTERS *
+ *******************/
+
+int smblib_get_prop_usb_present(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read USBIN_RT_STS rc=%d\n", rc);
+		return rc;
+	}
+
+	val->intval = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
+	return 0;
+}
+
+int smblib_get_prop_usb_online(struct smb_charger *chg,
+			       union power_supply_propval *val)
+{
+	int rc = 0;
+	u8 stat;
+
+	if (get_client_vote(chg->usb_icl_votable, USER_VOTER) == 0) {
+		val->intval = false;
+		return rc;
+	}
+
+	rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
+			rc);
+		return rc;
+	}
+	smblib_dbg(chg, PR_REGISTER, "POWER_PATH_STATUS = 0x%02x\n",
+		   stat);
+
+	val->intval = (stat & USE_USBIN_BIT) &&
+		      (stat & VALID_INPUT_POWER_SOURCE_STS_BIT);
+	return rc;
+}
+
+int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
+				    union power_supply_propval *val)
+{
+	int rc = 0;
+
+	rc = smblib_get_prop_usb_present(chg, val);
+	if (rc < 0 || !val->intval)
+		return rc;
+
+	if (!chg->iio.usbin_v_chan ||
+		PTR_ERR(chg->iio.usbin_v_chan) == -EPROBE_DEFER)
+		chg->iio.usbin_v_chan = iio_channel_get(chg->dev, "usbin_v");
+
+	if (IS_ERR(chg->iio.usbin_v_chan))
+		return PTR_ERR(chg->iio.usbin_v_chan);
+
+	return iio_read_channel_processed(chg->iio.usbin_v_chan, &val->intval);
+}
+
+int smblib_get_prop_pd_current_max(struct smb_charger *chg,
+				    union power_supply_propval *val)
+{
+	val->intval = get_client_vote_locked(chg->usb_icl_votable, PD_VOTER);
+	return 0;
+}
+
+int smblib_get_prop_usb_current_max(struct smb_charger *chg,
+				    union power_supply_propval *val)
+{
+	val->intval = get_client_vote_locked(chg->usb_icl_votable,
+			USB_PSY_VOTER);
+	return 0;
+}
+
+int smblib_get_prop_usb_current_now(struct smb_charger *chg,
+				    union power_supply_propval *val)
+{
+	int rc = 0;
+
+	rc = smblib_get_prop_usb_present(chg, val);
+	if (rc < 0 || !val->intval)
+		return rc;
+
+	if (!chg->iio.usbin_i_chan ||
+		PTR_ERR(chg->iio.usbin_i_chan) == -EPROBE_DEFER)
+		chg->iio.usbin_i_chan = iio_channel_get(chg->dev, "usbin_i");
+
+	if (IS_ERR(chg->iio.usbin_i_chan))
+		return PTR_ERR(chg->iio.usbin_i_chan);
+
+	return iio_read_channel_processed(chg->iio.usbin_i_chan, &val->intval);
+}
+
+int smblib_get_prop_charger_temp(struct smb_charger *chg,
+				 union power_supply_propval *val)
+{
+	int rc;
+
+	if (!chg->iio.temp_chan ||
+		PTR_ERR(chg->iio.temp_chan) == -EPROBE_DEFER)
+		chg->iio.temp_chan = iio_channel_get(chg->dev, "charger_temp");
+
+	if (IS_ERR(chg->iio.temp_chan))
+		return PTR_ERR(chg->iio.temp_chan);
+
+	rc = iio_read_channel_processed(chg->iio.temp_chan, &val->intval);
+	val->intval /= 100;
+	return rc;
+}
+
+int smblib_get_prop_charger_temp_max(struct smb_charger *chg,
+				    union power_supply_propval *val)
+{
+	int rc;
+
+	if (!chg->iio.temp_max_chan ||
+		PTR_ERR(chg->iio.temp_max_chan) == -EPROBE_DEFER)
+		chg->iio.temp_max_chan = iio_channel_get(chg->dev,
+							 "charger_temp_max");
+	if (IS_ERR(chg->iio.temp_max_chan))
+		return PTR_ERR(chg->iio.temp_max_chan);
+
+	rc = iio_read_channel_processed(chg->iio.temp_max_chan, &val->intval);
+	val->intval /= 100;
+	return rc;
+}
+
+int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg,
+					 union power_supply_propval *val)
+{
+	int rc = 0;
+	u8 stat;
+
+	rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+		return rc;
+	}
+	smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_4 = 0x%02x\n",
+		   stat);
+
+	if (stat & CC_ATTACHED_BIT)
+		val->intval = (bool)(stat & CC_ORIENTATION_BIT) + 1;
+	else
+		val->intval = 0;
+
+	return rc;
+}
+
+static const char * const smblib_typec_mode_name[] = {
+	[POWER_SUPPLY_TYPEC_NONE]		  = "NONE",
+	[POWER_SUPPLY_TYPEC_SOURCE_DEFAULT]	  = "SOURCE_DEFAULT",
+	[POWER_SUPPLY_TYPEC_SOURCE_MEDIUM]	  = "SOURCE_MEDIUM",
+	[POWER_SUPPLY_TYPEC_SOURCE_HIGH]	  = "SOURCE_HIGH",
+	[POWER_SUPPLY_TYPEC_NON_COMPLIANT]	  = "NON_COMPLIANT",
+	[POWER_SUPPLY_TYPEC_SINK]		  = "SINK",
+	[POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE]   = "SINK_POWERED_CABLE",
+	[POWER_SUPPLY_TYPEC_SINK_DEBUG_ACCESSORY] = "SINK_DEBUG_ACCESSORY",
+	[POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER]   = "SINK_AUDIO_ADAPTER",
+	[POWER_SUPPLY_TYPEC_POWERED_CABLE_ONLY]   = "POWERED_CABLE_ONLY",
+};
+
+static int smblib_get_prop_ufp_mode(struct smb_charger *chg)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, TYPE_C_STATUS_1_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_1 rc=%d\n", rc);
+		return POWER_SUPPLY_TYPEC_NONE;
+	}
+	smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_1 = 0x%02x\n", stat);
+
+	switch (stat) {
+	case 0:
+		return POWER_SUPPLY_TYPEC_NONE;
+	case UFP_TYPEC_RDSTD_BIT:
+		return POWER_SUPPLY_TYPEC_SOURCE_DEFAULT;
+	case UFP_TYPEC_RD1P5_BIT:
+		return POWER_SUPPLY_TYPEC_SOURCE_MEDIUM;
+	case UFP_TYPEC_RD3P0_BIT:
+		return POWER_SUPPLY_TYPEC_SOURCE_HIGH;
+	default:
+		break;
+	}
+
+	return POWER_SUPPLY_TYPEC_NON_COMPLIANT;
+}
+
+static int smblib_get_prop_dfp_mode(struct smb_charger *chg)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, TYPE_C_STATUS_2_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_2 rc=%d\n", rc);
+		return POWER_SUPPLY_TYPEC_NONE;
+	}
+	smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_2 = 0x%02x\n", stat);
+
+	switch (stat & DFP_TYPEC_MASK) {
+	case DFP_RA_RA_BIT:
+		return POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER;
+	case DFP_RD_RD_BIT:
+		return POWER_SUPPLY_TYPEC_SINK_DEBUG_ACCESSORY;
+	case DFP_RD_RA_VCONN_BIT:
+		return POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE;
+	case DFP_RD_OPEN_BIT:
+		return POWER_SUPPLY_TYPEC_SINK;
+	case DFP_RA_OPEN_BIT:
+		return POWER_SUPPLY_TYPEC_POWERED_CABLE_ONLY;
+	default:
+		break;
+	}
+
+	return POWER_SUPPLY_TYPEC_NONE;
+}
+
+int smblib_get_prop_typec_mode(struct smb_charger *chg,
+			       union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+		val->intval = POWER_SUPPLY_TYPEC_NONE;
+		return rc;
+	}
+	smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_4 = 0x%02x\n", stat);
+
+	if (!(stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT)) {
+		val->intval = POWER_SUPPLY_TYPEC_NONE;
+		return rc;
+	}
+
+	if (stat & UFP_DFP_MODE_STATUS_BIT)
+		val->intval = smblib_get_prop_dfp_mode(chg);
+	else
+		val->intval = smblib_get_prop_ufp_mode(chg);
+
+	return rc;
+}
+
+int smblib_get_prop_typec_power_role(struct smb_charger *chg,
+				     union power_supply_propval *val)
+{
+	int rc = 0;
+	u8 ctrl;
+
+	rc = smblib_read(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, &ctrl);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n",
+			rc);
+		return rc;
+	}
+	smblib_dbg(chg, PR_REGISTER, "TYPE_C_INTRPT_ENB_SOFTWARE_CTRL = 0x%02x\n",
+		   ctrl);
+
+	if (ctrl & TYPEC_DISABLE_CMD_BIT) {
+		val->intval = POWER_SUPPLY_TYPEC_PR_NONE;
+		return rc;
+	}
+
+	switch (ctrl & (DFP_EN_CMD_BIT | UFP_EN_CMD_BIT)) {
+	case 0:
+		val->intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+		break;
+	case DFP_EN_CMD_BIT:
+		val->intval = POWER_SUPPLY_TYPEC_PR_SOURCE;
+		break;
+	case UFP_EN_CMD_BIT:
+		val->intval = POWER_SUPPLY_TYPEC_PR_SINK;
+		break;
+	default:
+		val->intval = POWER_SUPPLY_TYPEC_PR_NONE;
+		smblib_err(chg, "unsupported power role 0x%02lx\n",
+			ctrl & (DFP_EN_CMD_BIT | UFP_EN_CMD_BIT));
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+int smblib_get_prop_pd_allowed(struct smb_charger *chg,
+			       union power_supply_propval *val)
+{
+	val->intval = get_effective_result(chg->pd_allowed_votable);
+	return 0;
+}
+
+int smblib_get_prop_input_current_settled(struct smb_charger *chg,
+					  union power_supply_propval *val)
+{
+	return smblib_get_charge_param(chg, &chg->param.icl_stat, &val->intval);
+}
+
+#define HVDCP3_STEP_UV	200000
+int smblib_get_prop_input_voltage_settled(struct smb_charger *chg,
+						union power_supply_propval *val)
+{
+	const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
+	int rc, pulses;
+	u8 stat;
+
+	val->intval = MICRO_5V;
+	if (apsd_result == NULL) {
+		smblib_err(chg, "APSD result is NULL\n");
+		return 0;
+	}
+
+	switch (apsd_result->pst) {
+	case POWER_SUPPLY_TYPE_USB_HVDCP_3:
+		rc = smblib_read(chg, QC_PULSE_COUNT_STATUS_REG, &stat);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't read QC_PULSE_COUNT rc=%d\n", rc);
+			return 0;
+		}
+		pulses = (stat & QC_PULSE_COUNT_MASK);
+		val->intval = MICRO_5V + HVDCP3_STEP_UV * pulses;
+		break;
+	default:
+		val->intval = MICRO_5V;
+		break;
+	}
+
+	return 0;
+}
+
+int smblib_get_prop_pd_in_hard_reset(struct smb_charger *chg,
+			       union power_supply_propval *val)
+{
+	int rc;
+	u8 ctrl;
+
+	rc = smblib_read(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, &ctrl);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG rc=%d\n",
+			rc);
+		return rc;
+	}
+	val->intval = ctrl & EXIT_SNK_BASED_ON_CC_BIT;
+	return 0;
+}
+
+int smblib_get_pe_start(struct smb_charger *chg,
+			       union power_supply_propval *val)
+{
+	/*
+	 * hvdcp timeout voter is the last one to allow pd. Use its vote
+	 * to indicate start of pe engine
+	 */
+	val->intval
+		= !get_client_vote_locked(chg->pd_disallowed_votable_indirect,
+			HVDCP_TIMEOUT_VOTER);
+	return 0;
+}
+
+int smblib_get_prop_die_health(struct smb_charger *chg,
+						union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, TEMP_RANGE_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TEMP_RANGE_STATUS_REG rc=%d\n",
+									rc);
+		return rc;
+	}
+
+	/* TEMP_RANGE bits are mutually exclusive */
+	switch (stat & TEMP_RANGE_MASK) {
+	case TEMP_BELOW_RANGE_BIT:
+		val->intval = POWER_SUPPLY_HEALTH_COOL;
+		break;
+	case TEMP_WITHIN_RANGE_BIT:
+		val->intval = POWER_SUPPLY_HEALTH_WARM;
+		break;
+	case TEMP_ABOVE_RANGE_BIT:
+		val->intval = POWER_SUPPLY_HEALTH_HOT;
+		break;
+	case ALERT_LEVEL_BIT:
+		val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+		break;
+	default:
+		val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
+	}
+
+	return 0;
+}
+
+/*******************
+ * USB PSY SETTERS *
+ * *****************/
+
+int smblib_set_prop_pd_current_max(struct smb_charger *chg,
+				    const union power_supply_propval *val)
+{
+	int rc;
+
+	if (chg->pd_active)
+		rc = vote(chg->usb_icl_votable, PD_VOTER, true, val->intval);
+	else
+		rc = -EPERM;
+
+	return rc;
+}
+
+int smblib_set_prop_usb_current_max(struct smb_charger *chg,
+				    const union power_supply_propval *val)
+{
+	int rc = 0;
+
+	if (!chg->pd_active) {
+		rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
+				true, val->intval);
+	} else if (chg->system_suspend_supported) {
+		if (val->intval <= USBIN_25MA)
+			rc = vote(chg->usb_icl_votable,
+				PD_SUSPEND_SUPPORTED_VOTER, true, val->intval);
+		else
+			rc = vote(chg->usb_icl_votable,
+				PD_SUSPEND_SUPPORTED_VOTER, false, 0);
+	}
+	return rc;
+}
+
+int smblib_set_prop_boost_current(struct smb_charger *chg,
+				    const union power_supply_propval *val)
+{
+	int rc = 0;
+
+	rc = smblib_set_charge_param(chg, &chg->param.freq_boost,
+				val->intval <= chg->boost_threshold_ua ?
+				chg->chg_freq.freq_below_otg_threshold :
+				chg->chg_freq.freq_above_otg_threshold);
+	if (rc < 0) {
+		dev_err(chg->dev, "Error in setting freq_boost rc=%d\n", rc);
+		return rc;
+	}
+
+	chg->boost_current_ua = val->intval;
+	return rc;
+}
+
+int smblib_set_prop_typec_power_role(struct smb_charger *chg,
+				     const union power_supply_propval *val)
+{
+	int rc = 0;
+	u8 power_role;
+
+	switch (val->intval) {
+	case POWER_SUPPLY_TYPEC_PR_NONE:
+		power_role = TYPEC_DISABLE_CMD_BIT;
+		break;
+	case POWER_SUPPLY_TYPEC_PR_DUAL:
+		power_role = 0;
+		break;
+	case POWER_SUPPLY_TYPEC_PR_SINK:
+		power_role = UFP_EN_CMD_BIT;
+		break;
+	case POWER_SUPPLY_TYPEC_PR_SOURCE:
+		power_role = DFP_EN_CMD_BIT;
+		break;
+	default:
+		smblib_err(chg, "power role %d not supported\n", val->intval);
+		return -EINVAL;
+	}
+
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 TYPEC_POWER_ROLE_CMD_MASK, power_role);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't write 0x%02x to TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n",
+			power_role, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+int smblib_set_prop_usb_voltage_min(struct smb_charger *chg,
+				    const union power_supply_propval *val)
+{
+	int rc, min_uv;
+
+	min_uv = min(val->intval, chg->voltage_max_uv);
+	rc = smblib_set_usb_pd_allowed_voltage(chg, min_uv,
+					       chg->voltage_max_uv);
+	if (rc < 0) {
+		smblib_err(chg, "invalid max voltage %duV rc=%d\n",
+			val->intval, rc);
+		return rc;
+	}
+
+	if (chg->mode == PARALLEL_MASTER)
+		vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER,
+		     min_uv > MICRO_5V, 0);
+
+	chg->voltage_min_uv = min_uv;
+	return rc;
+}
+
+int smblib_set_prop_usb_voltage_max(struct smb_charger *chg,
+				    const union power_supply_propval *val)
+{
+	int rc, max_uv;
+
+	max_uv = max(val->intval, chg->voltage_min_uv);
+	rc = smblib_set_usb_pd_allowed_voltage(chg, chg->voltage_min_uv,
+					       max_uv);
+	if (rc < 0) {
+		smblib_err(chg, "invalid min voltage %duV rc=%d\n",
+			val->intval, rc);
+		return rc;
+	}
+
+	chg->voltage_max_uv = max_uv;
+	rc = smblib_rerun_aicl(chg);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't re-run AICL rc=%d\n", rc);
+
+	return rc;
+}
+
+int smblib_set_prop_pd_active(struct smb_charger *chg,
+			      const union power_supply_propval *val)
+{
+	int rc;
+	u8 stat = 0;
+	bool cc_debounced;
+	bool orientation;
+	bool pd_active = val->intval;
+
+	if (!get_effective_result(chg->pd_allowed_votable)) {
+		smblib_err(chg, "PD is not allowed\n");
+		return -EINVAL;
+	}
+
+	vote(chg->apsd_disable_votable, PD_VOTER, pd_active, 0);
+	vote(chg->pd_allowed_votable, PD_VOTER, pd_active, 0);
+
+	/*
+	 * VCONN_EN_ORIENTATION_BIT controls whether to use CC1 or CC2 line
+	 * when TYPEC_SPARE_CFG_BIT (CC pin selection s/w override) is set
+	 * or when VCONN_EN_VALUE_BIT is set.
+	 */
+	rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+		return rc;
+	}
+
+	if (pd_active) {
+		orientation = stat & CC_ORIENTATION_BIT;
+		rc = smblib_masked_write(chg,
+				TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				VCONN_EN_ORIENTATION_BIT,
+				orientation ? 0 : VCONN_EN_ORIENTATION_BIT);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't enable vconn on CC line rc=%d\n", rc);
+			return rc;
+		}
+		/*
+		 * Enforce 500mA for PD until the real vote comes in later.
+		 * It is guaranteed that pd_active is set prior to
+		 * pd_current_max
+		 */
+		rc = vote(chg->usb_icl_votable, PD_VOTER, true, USBIN_500MA);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't vote for USB ICL rc=%d\n",
+					rc);
+			return rc;
+		}
+
+		/* clear USB ICL vote for DCP_VOTER */
+		rc = vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
+		if (rc < 0)
+			smblib_err(chg,
+				"Couldn't un-vote DCP from USB ICL rc=%d\n",
+				rc);
+
+		/* clear USB ICL vote for PL_USBIN_USBIN_VOTER */
+		rc = vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
+		if (rc < 0)
+			smblib_err(chg,
+					"Couldn't un-vote PL_USBIN_USBIN from USB ICL rc=%d\n",
+					rc);
+
+		/* remove USB_PSY_VOTER */
+		rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't unvote USB_PSY rc=%d\n", rc);
+			return rc;
+		}
+
+		/* pd active set, parallel charger can be enabled now */
+		rc = vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER,
+				false, 0);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't unvote PL_DELAY_HVDCP_VOTER rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	/* CC pin selection s/w override in PD session; h/w otherwise. */
+	rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG,
+				 TYPEC_SPARE_CFG_BIT,
+				 pd_active ? TYPEC_SPARE_CFG_BIT : 0);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't change cc_out ctrl to %s rc=%d\n",
+			pd_active ? "SW" : "HW", rc);
+		return rc;
+	}
+
+	cc_debounced = (bool)(stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT);
+	if (!pd_active && cc_debounced)
+		try_rerun_apsd_for_hvdcp(chg);
+
+	chg->pd_active = pd_active;
+	smblib_update_usb_type(chg);
+	power_supply_changed(chg->usb_psy);
+
+	return rc;
+}
+
+int smblib_set_prop_ship_mode(struct smb_charger *chg,
+				const union power_supply_propval *val)
+{
+	int rc;
+
+	smblib_dbg(chg, PR_MISC, "Set ship mode: %d!!\n", !!val->intval);
+
+	rc = smblib_masked_write(chg, SHIP_MODE_REG, SHIP_MODE_EN_BIT,
+			!!val->intval ? SHIP_MODE_EN_BIT : 0);
+	if (rc < 0)
+		dev_err(chg->dev, "Couldn't %s ship mode, rc=%d\n",
+				!!val->intval ? "enable" : "disable", rc);
+
+	return rc;
+}
+
+int smblib_reg_block_update(struct smb_charger *chg,
+				struct reg_info *entry)
+{
+	int rc = 0;
+
+	while (entry && entry->reg) {
+		rc = smblib_read(chg, entry->reg, &entry->bak);
+		if (rc < 0) {
+			dev_err(chg->dev, "Error in reading %s rc=%d\n",
+				entry->desc, rc);
+			break;
+		}
+		entry->bak &= entry->mask;
+
+		rc = smblib_masked_write(chg, entry->reg,
+					 entry->mask, entry->val);
+		if (rc < 0) {
+			dev_err(chg->dev, "Error in writing %s rc=%d\n",
+				entry->desc, rc);
+			break;
+		}
+		entry++;
+	}
+
+	return rc;
+}
+
+int smblib_reg_block_restore(struct smb_charger *chg,
+				struct reg_info *entry)
+{
+	int rc = 0;
+
+	while (entry && entry->reg) {
+		rc = smblib_masked_write(chg, entry->reg,
+					 entry->mask, entry->bak);
+		if (rc < 0) {
+			dev_err(chg->dev, "Error in writing %s rc=%d\n",
+				entry->desc, rc);
+			break;
+		}
+		entry++;
+	}
+
+	return rc;
+}
+
+static struct reg_info cc2_detach_settings[] = {
+	{
+		.reg	= TYPE_C_CFG_REG,
+		.mask	= APSD_START_ON_CC_BIT,
+		.val	= 0,
+		.desc	= "TYPE_C_CFG_REG",
+	},
+	{
+		.reg	= TYPE_C_CFG_2_REG,
+		.mask	= TYPE_C_UFP_MODE_BIT | EN_TRY_SOURCE_MODE_BIT,
+		.val	= TYPE_C_UFP_MODE_BIT,
+		.desc	= "TYPE_C_CFG_2_REG",
+	},
+	{
+		.reg	= TYPE_C_CFG_3_REG,
+		.mask	= EN_TRYSINK_MODE_BIT,
+		.val	= 0,
+		.desc	= "TYPE_C_CFG_3_REG",
+	},
+	{
+		.reg	= TAPER_TIMER_SEL_CFG_REG,
+		.mask	= TYPEC_SPARE_CFG_BIT,
+		.val	= TYPEC_SPARE_CFG_BIT,
+		.desc	= "TAPER_TIMER_SEL_CFG_REG",
+	},
+	{
+		.reg	= TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+		.mask	= VCONN_EN_ORIENTATION_BIT,
+		.val	= 0,
+		.desc	= "TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG",
+	},
+	{
+		.reg	= MISC_CFG_REG,
+		.mask	= TCC_DEBOUNCE_20MS_BIT,
+		.val	= TCC_DEBOUNCE_20MS_BIT,
+		.desc	= "Tccdebounce time"
+	},
+	{
+	},
+};
+
+static int smblib_cc2_sink_removal_enter(struct smb_charger *chg)
+{
+	int rc = 0;
+	union power_supply_propval cc2_val = {0, };
+
+	if ((chg->wa_flags & TYPEC_CC2_REMOVAL_WA_BIT) == 0)
+		return rc;
+
+	if (chg->cc2_sink_detach_flag != CC2_SINK_NONE)
+		return rc;
+
+	rc = smblib_get_prop_typec_cc_orientation(chg, &cc2_val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get cc orientation rc=%d\n", rc);
+		return rc;
+	}
+	if (cc2_val.intval == 1)
+		return rc;
+
+	rc = smblib_get_prop_typec_mode(chg, &cc2_val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get prop typec mode rc=%d\n", rc);
+		return rc;
+	}
+
+	switch (cc2_val.intval) {
+	case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
+		smblib_reg_block_update(chg, cc2_detach_settings);
+		chg->cc2_sink_detach_flag = CC2_SINK_STD;
+		schedule_work(&chg->rdstd_cc2_detach_work);
+		break;
+	case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
+	case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
+		chg->cc2_sink_detach_flag = CC2_SINK_MEDIUM_HIGH;
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+static int smblib_cc2_sink_removal_exit(struct smb_charger *chg)
+{
+	int rc = 0;
+
+	if ((chg->wa_flags & TYPEC_CC2_REMOVAL_WA_BIT) == 0)
+		return rc;
+
+	if (chg->cc2_sink_detach_flag == CC2_SINK_STD) {
+		cancel_work_sync(&chg->rdstd_cc2_detach_work);
+		smblib_reg_block_restore(chg, cc2_detach_settings);
+	}
+
+	chg->cc2_sink_detach_flag = CC2_SINK_NONE;
+
+	return rc;
+}
+
+int smblib_set_prop_pd_in_hard_reset(struct smb_charger *chg,
+				const union power_supply_propval *val)
+{
+	int rc;
+
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 EXIT_SNK_BASED_ON_CC_BIT,
+				 (val->intval) ? EXIT_SNK_BASED_ON_CC_BIT : 0);
+	if (rc < 0) {
+		smblib_err(chg, "Could not set EXIT_SNK_BASED_ON_CC rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, val->intval, 0);
+
+	if (val->intval)
+		rc = smblib_cc2_sink_removal_enter(chg);
+	else
+		rc = smblib_cc2_sink_removal_exit(chg);
+
+	if (rc < 0) {
+		smblib_err(chg, "Could not detect cc2 removal rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+/************************
+ * USB MAIN PSY GETTERS *
+ ************************/
+int smblib_get_prop_fcc_delta(struct smb_charger *chg,
+			       union power_supply_propval *val)
+{
+	int rc, jeita_cc_delta_ua, step_cc_delta_ua, hw_cc_delta_ua = 0;
+
+	rc = smblib_get_step_cc_delta(chg, &step_cc_delta_ua);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get step cc delta rc=%d\n", rc);
+		step_cc_delta_ua = 0;
+	} else {
+		hw_cc_delta_ua = step_cc_delta_ua;
+	}
+
+	rc = smblib_get_jeita_cc_delta(chg, &jeita_cc_delta_ua);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get jeita cc delta rc=%d\n", rc);
+		jeita_cc_delta_ua = 0;
+	} else if (jeita_cc_delta_ua < 0) {
+		/* HW will take the min between JEITA and step charge */
+		hw_cc_delta_ua = min(hw_cc_delta_ua, jeita_cc_delta_ua);
+	}
+
+	val->intval = hw_cc_delta_ua;
+	return 0;
+}
+
+/************************
+ * USB MAIN PSY SETTERS *
+ ************************/
+
+#define SDP_CURRENT_MA			500000
+#define CDP_CURRENT_MA			1500000
+#define DCP_CURRENT_MA			1500000
+#define HVDCP_CURRENT_MA		3000000
+#define TYPEC_DEFAULT_CURRENT_MA	900000
+#define TYPEC_MEDIUM_CURRENT_MA		1500000
+#define TYPEC_HIGH_CURRENT_MA		3000000
+static int smblib_get_charge_current(struct smb_charger *chg,
+				int *total_current_ua)
+{
+	const struct apsd_result *apsd_result = smblib_update_usb_type(chg);
+	union power_supply_propval val = {0, };
+	int rc, typec_source_rd, current_ua;
+	bool non_compliant;
+	u8 stat5;
+
+	rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat5);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_5 rc=%d\n", rc);
+		return rc;
+	}
+	non_compliant = stat5 & TYPEC_NONCOMP_LEGACY_CABLE_STATUS_BIT;
+
+	/* get settled ICL */
+	rc = smblib_get_prop_input_current_settled(chg, &val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get settled ICL rc=%d\n", rc);
+		return rc;
+	}
+
+	typec_source_rd = smblib_get_prop_ufp_mode(chg);
+
+	/* QC 2.0/3.0 adapter */
+	if (apsd_result->bit & (QC_3P0_BIT | QC_2P0_BIT)) {
+		*total_current_ua = HVDCP_CURRENT_MA;
+		return 0;
+	}
+
+	if (non_compliant) {
+		switch (apsd_result->bit) {
+		case CDP_CHARGER_BIT:
+			current_ua = CDP_CURRENT_MA;
+			break;
+		case DCP_CHARGER_BIT:
+		case OCP_CHARGER_BIT:
+		case FLOAT_CHARGER_BIT:
+			current_ua = DCP_CURRENT_MA;
+			break;
+		default:
+			current_ua = 0;
+			break;
+		}
+
+		*total_current_ua = max(current_ua, val.intval);
+		return 0;
+	}
+
+	switch (typec_source_rd) {
+	case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
+		switch (apsd_result->bit) {
+		case CDP_CHARGER_BIT:
+			current_ua = CDP_CURRENT_MA;
+			break;
+		case DCP_CHARGER_BIT:
+		case OCP_CHARGER_BIT:
+		case FLOAT_CHARGER_BIT:
+			current_ua = chg->default_icl_ua;
+			break;
+		default:
+			current_ua = 0;
+			break;
+		}
+		break;
+	case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
+		current_ua = TYPEC_MEDIUM_CURRENT_MA;
+		break;
+	case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
+		current_ua = TYPEC_HIGH_CURRENT_MA;
+		break;
+	case POWER_SUPPLY_TYPEC_NON_COMPLIANT:
+	case POWER_SUPPLY_TYPEC_NONE:
+	default:
+		current_ua = 0;
+		break;
+	}
+
+	*total_current_ua = max(current_ua, val.intval);
+	return 0;
+}
+
+int smblib_set_icl_reduction(struct smb_charger *chg, int reduction_ua)
+{
+	int current_ua, rc;
+
+	if (reduction_ua == 0) {
+		vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
+	} else {
+		/*
+		 * No usb_icl voter means we are defaulting to hw chosen
+		 * max limit. We need a vote from s/w to enforce the reduction.
+		 */
+		if (get_effective_result(chg->usb_icl_votable) == -EINVAL) {
+			rc = smblib_get_charge_current(chg, &current_ua);
+			if (rc < 0) {
+				pr_err("Failed to get ICL rc=%d\n", rc);
+				return rc;
+			}
+			vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, true,
+					current_ua);
+		}
+	}
+
+	chg->icl_reduction_ua = reduction_ua;
+
+	return rerun_election(chg->usb_icl_votable);
+}
+
+/************************
+ * PARALLEL PSY GETTERS *
+ ************************/
+
+int smblib_get_prop_slave_current_now(struct smb_charger *chg,
+				      union power_supply_propval *pval)
+{
+	if (IS_ERR_OR_NULL(chg->iio.batt_i_chan))
+		chg->iio.batt_i_chan = iio_channel_get(chg->dev, "batt_i");
+
+	if (IS_ERR(chg->iio.batt_i_chan))
+		return PTR_ERR(chg->iio.batt_i_chan);
+
+	return iio_read_channel_processed(chg->iio.batt_i_chan, &pval->intval);
+}
+
+/**********************
+ * INTERRUPT HANDLERS *
+ **********************/
+
+irqreturn_t smblib_handle_debug(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_otg_overcurrent(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, OTG_BASE + INT_RT_STS_OFFSET, &stat);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't read OTG_INT_RT_STS rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	if (stat & OTG_OVERCURRENT_RT_STS_BIT)
+		schedule_work(&chg->otg_oc_work);
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_chg_state_change(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	u8 stat;
+	int rc;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+			rc);
+		return IRQ_HANDLED;
+	}
+
+	stat = stat & BATTERY_CHARGER_STATUS_MASK;
+	power_supply_changed(chg->batt_psy);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_step_chg_state_change(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+	if (chg->step_chg_enabled)
+		rerun_election(chg->fcc_votable);
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_step_chg_soc_update_fail(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+	if (chg->step_chg_enabled)
+		rerun_election(chg->fcc_votable);
+
+	return IRQ_HANDLED;
+}
+
+#define STEP_SOC_REQ_MS	3000
+irqreturn_t smblib_handle_step_chg_soc_update_request(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	int rc;
+	union power_supply_propval pval = {0, };
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+	if (!chg->bms_psy) {
+		schedule_delayed_work(&chg->step_soc_req_work,
+				      msecs_to_jiffies(STEP_SOC_REQ_MS));
+		return IRQ_HANDLED;
+	}
+
+	rc = smblib_get_prop_batt_capacity(chg, &pval);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't get batt capacity rc=%d\n", rc);
+	else
+		step_charge_soc_update(chg, pval.intval);
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_batt_temp_changed(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	rerun_election(chg->fcc_votable);
+	power_supply_changed(chg->batt_psy);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_batt_psy_changed(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+	power_supply_changed(chg->batt_psy);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_usb_psy_changed(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+	power_supply_changed(chg->usb_psy);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_usbin_uv(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	struct storm_watch *wdata;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+	if (!chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data)
+		return IRQ_HANDLED;
+
+	wdata = &chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data->storm_data;
+	reset_storm_count(wdata);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	int rc;
+	u8 stat;
+	bool vbus_rising;
+
+	rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't read USB_INT_RT_STS rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	vbus_rising = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
+	smblib_set_opt_freq_buck(chg,
+		vbus_rising ? chg->chg_freq.freq_5V :
+			chg->chg_freq.freq_removal);
+
+	/* fetch the DPDM regulator */
+	if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
+						"dpdm-supply", NULL)) {
+		chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
+		if (IS_ERR(chg->dpdm_reg)) {
+			smblib_err(chg, "Couldn't get dpdm regulator rc=%ld\n",
+				PTR_ERR(chg->dpdm_reg));
+			chg->dpdm_reg = NULL;
+		}
+	}
+
+	if (vbus_rising) {
+		if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
+			smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
+			rc = regulator_enable(chg->dpdm_reg);
+			if (rc < 0)
+				smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n",
+					rc);
+		}
+	} else {
+		if (chg->wa_flags & BOOST_BACK_WA)
+			vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0);
+
+		if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
+			smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
+			rc = regulator_disable(chg->dpdm_reg);
+			if (rc < 0)
+				smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
+					rc);
+		}
+
+		if (chg->micro_usb_mode) {
+			smblib_update_usb_type(chg);
+			extcon_set_cable_state_(chg->extcon, EXTCON_USB, false);
+			smblib_uusb_removal(chg);
+		}
+	}
+
+	power_supply_changed(chg->usb_psy);
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s %s\n",
+		irq_data->name, vbus_rising ? "attached" : "detached");
+	return IRQ_HANDLED;
+}
+
+#define USB_WEAK_INPUT_UA	1400000
+#define ICL_CHANGE_DELAY_MS	1000
+irqreturn_t smblib_handle_icl_change(int irq, void *data)
+{
+	u8 stat;
+	int rc, settled_ua, delay = ICL_CHANGE_DELAY_MS;
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	if (chg->mode == PARALLEL_MASTER) {
+		rc = smblib_read(chg, AICL_STATUS_REG, &stat);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't read AICL_STATUS rc=%d\n",
+					rc);
+			return IRQ_HANDLED;
+		}
+
+		rc = smblib_get_charge_param(chg, &chg->param.icl_stat,
+				&settled_ua);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't get ICL status rc=%d\n", rc);
+			return IRQ_HANDLED;
+		}
+
+		/* If AICL settled then schedule work now */
+		if ((settled_ua == get_effective_result(chg->usb_icl_votable))
+				|| (stat & AICL_DONE_BIT))
+			delay = 0;
+
+		schedule_delayed_work(&chg->icl_change_work,
+						msecs_to_jiffies(delay));
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void smblib_handle_slow_plugin_timeout(struct smb_charger *chg,
+					      bool rising)
+{
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: slow-plugin-timeout %s\n",
+		   rising ? "rising" : "falling");
+}
+
+static void smblib_handle_sdp_enumeration_done(struct smb_charger *chg,
+					       bool rising)
+{
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: sdp-enumeration-done %s\n",
+		   rising ? "rising" : "falling");
+}
+
+#define QC3_PULSES_FOR_6V	5
+#define QC3_PULSES_FOR_9V	20
+#define QC3_PULSES_FOR_12V	35
+static void smblib_hvdcp_adaptive_voltage_change(struct smb_charger *chg)
+{
+	int rc;
+	u8 stat;
+	int pulses;
+
+	power_supply_changed(chg->usb_main_psy);
+	if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB_HVDCP) {
+		rc = smblib_read(chg, QC_CHANGE_STATUS_REG, &stat);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't read QC_CHANGE_STATUS rc=%d\n", rc);
+			return;
+		}
+
+		switch (stat & QC_2P0_STATUS_MASK) {
+		case QC_5V_BIT:
+			smblib_set_opt_freq_buck(chg,
+					chg->chg_freq.freq_5V);
+			break;
+		case QC_9V_BIT:
+			smblib_set_opt_freq_buck(chg,
+					chg->chg_freq.freq_9V);
+			break;
+		case QC_12V_BIT:
+			smblib_set_opt_freq_buck(chg,
+					chg->chg_freq.freq_12V);
+			break;
+		default:
+			smblib_set_opt_freq_buck(chg,
+					chg->chg_freq.freq_removal);
+			break;
+		}
+	}
+
+	if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB_HVDCP_3) {
+		rc = smblib_read(chg, QC_PULSE_COUNT_STATUS_REG, &stat);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't read QC_PULSE_COUNT rc=%d\n", rc);
+			return;
+		}
+		pulses = (stat & QC_PULSE_COUNT_MASK);
+
+		if (pulses < QC3_PULSES_FOR_6V)
+			smblib_set_opt_freq_buck(chg,
+				chg->chg_freq.freq_5V);
+		else if (pulses < QC3_PULSES_FOR_9V)
+			smblib_set_opt_freq_buck(chg,
+				chg->chg_freq.freq_6V_8V);
+		else if (pulses < QC3_PULSES_FOR_12V)
+			smblib_set_opt_freq_buck(chg,
+				chg->chg_freq.freq_9V);
+		else
+			smblib_set_opt_freq_buck(chg,
+				chg->chg_freq.freq_12V);
+	}
+}
+
+/* triggers when HVDCP 3.0 authentication has finished */
+static void smblib_handle_hvdcp_3p0_auth_done(struct smb_charger *chg,
+					      bool rising)
+{
+	const struct apsd_result *apsd_result;
+	int rc;
+
+	if (!rising)
+		return;
+
+	if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
+		/*
+		 * Disable AUTH_IRQ_EN_CFG_BIT to receive adapter voltage
+		 * change interrupt.
+		 */
+		rc = smblib_masked_write(chg,
+				USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+				AUTH_IRQ_EN_CFG_BIT, 0);
+		if (rc < 0)
+			smblib_err(chg,
+				"Couldn't enable QC auth setting rc=%d\n", rc);
+	}
+
+	if (chg->mode == PARALLEL_MASTER)
+		vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, true, 0);
+
+	/* QC authentication done, parallel charger can be enabled now */
+	vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, false, 0);
+
+	/* the APSD done handler will set the USB supply type */
+	apsd_result = smblib_get_apsd_result(chg);
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: hvdcp-3p0-auth-done rising; %s detected\n",
+		   apsd_result->name);
+}
+
+static void smblib_handle_hvdcp_check_timeout(struct smb_charger *chg,
+					      bool rising, bool qc_charger)
+{
+	const struct apsd_result *apsd_result = smblib_update_usb_type(chg);
+
+	/* Hold off PD only until hvdcp 2.0 detection timeout */
+	if (rising) {
+		vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
+								false, 0);
+		if (get_effective_result(chg->pd_disallowed_votable_indirect))
+			/* could be a legacy cable, try doing hvdcp */
+			try_rerun_apsd_for_hvdcp(chg);
+
+		/*
+		 * HVDCP detection timeout done
+		 * If adapter is not QC2.0/QC3.0 - it is a plain old DCP.
+		 */
+		if (!qc_charger && (apsd_result->bit & DCP_CHARGER_BIT))
+			/* enforce DCP ICL if specified */
+			vote(chg->usb_icl_votable, DCP_VOTER,
+				chg->dcp_icl_ua != -EINVAL, chg->dcp_icl_ua);
+		/*
+		 * If adapter is not QC2.0/QC3.0 remove vote for parallel
+		 * disable.
+		 * Otherwise if adapter is QC2.0/QC3.0 wait for authentication
+		 * to complete.
+		 */
+		if (!qc_charger)
+			vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER,
+					false, 0);
+	}
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: smblib_handle_hvdcp_check_timeout %s\n",
+		   rising ? "rising" : "falling");
+}
+
+/* triggers when HVDCP is detected */
+static void smblib_handle_hvdcp_detect_done(struct smb_charger *chg,
+					    bool rising)
+{
+	if (!rising)
+		return;
+
+	/* the APSD done handler will set the USB supply type */
+	cancel_delayed_work_sync(&chg->hvdcp_detect_work);
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: hvdcp-detect-done %s\n",
+		   rising ? "rising" : "falling");
+}
+
+#define HVDCP_DET_MS 2500
+static void smblib_handle_apsd_done(struct smb_charger *chg, bool rising)
+{
+	const struct apsd_result *apsd_result;
+
+	if (!rising)
+		return;
+
+	apsd_result = smblib_update_usb_type(chg);
+	switch (apsd_result->bit) {
+	case SDP_CHARGER_BIT:
+	case CDP_CHARGER_BIT:
+		if (chg->micro_usb_mode)
+			extcon_set_cable_state_(chg->extcon, EXTCON_USB,
+					true);
+		/* if not DCP then no hvdcp timeout happens. Enable pd here */
+		vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
+				false, 0);
+		break;
+	case OCP_CHARGER_BIT:
+	case FLOAT_CHARGER_BIT:
+		/*
+		 * if not DCP then no hvdcp timeout happens. Enable
+		 * pd/parallel here.
+		 */
+		vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
+				false, 0);
+		vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, false, 0);
+		break;
+	case DCP_CHARGER_BIT:
+		if (chg->wa_flags & QC_CHARGER_DETECTION_WA_BIT)
+			schedule_delayed_work(&chg->hvdcp_detect_work,
+					      msecs_to_jiffies(HVDCP_DET_MS));
+		break;
+	default:
+		break;
+	}
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: apsd-done rising; %s detected\n",
+		   apsd_result->name);
+}
+
+irqreturn_t smblib_handle_usb_source_change(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	int rc = 0;
+	u8 stat;
+
+	rc = smblib_read(chg, APSD_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read APSD_STATUS rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+	smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", stat);
+
+	smblib_handle_apsd_done(chg,
+		(bool)(stat & APSD_DTC_STATUS_DONE_BIT));
+
+	smblib_handle_hvdcp_detect_done(chg,
+		(bool)(stat & QC_CHARGER_BIT));
+
+	smblib_handle_hvdcp_check_timeout(chg,
+		(bool)(stat & HVDCP_CHECK_TIMEOUT_BIT),
+		(bool)(stat & QC_CHARGER_BIT));
+
+	smblib_handle_hvdcp_3p0_auth_done(chg,
+		(bool)(stat & QC_AUTH_DONE_STATUS_BIT));
+
+	smblib_handle_sdp_enumeration_done(chg,
+		(bool)(stat & ENUMERATION_DONE_BIT));
+
+	smblib_handle_slow_plugin_timeout(chg,
+		(bool)(stat & SLOW_PLUGIN_TIMEOUT_BIT));
+
+	smblib_hvdcp_adaptive_voltage_change(chg);
+
+	power_supply_changed(chg->usb_psy);
+
+	rc = smblib_read(chg, APSD_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read APSD_STATUS rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+	smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", stat);
+
+	return IRQ_HANDLED;
+}
+
+static void typec_source_removal(struct smb_charger *chg)
+{
+	int rc;
+
+	/* reset both usbin current and voltage votes */
+	vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+	vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
+
+	cancel_delayed_work_sync(&chg->hvdcp_detect_work);
+
+	if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
+		/* re-enable AUTH_IRQ_EN_CFG_BIT */
+		rc = smblib_masked_write(chg,
+				USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+				AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
+		if (rc < 0)
+			smblib_err(chg,
+				"Couldn't enable QC auth setting rc=%d\n", rc);
+	}
+
+	/* reconfigure allowed voltage for HVDCP */
+	rc = smblib_set_adapter_allowance(chg,
+			USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n",
+			rc);
+
+	chg->voltage_min_uv = MICRO_5V;
+	chg->voltage_max_uv = MICRO_5V;
+
+	/* clear USB ICL vote for PD_VOTER */
+	rc = vote(chg->usb_icl_votable, PD_VOTER, false, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't un-vote PD from USB ICL rc=%d\n", rc);
+
+	/* clear USB ICL vote for USB_PSY_VOTER */
+	rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
+	if (rc < 0)
+		smblib_err(chg,
+			"Couldn't un-vote USB_PSY from USB ICL rc=%d\n", rc);
+
+	/* clear USB ICL vote for DCP_VOTER */
+	rc = vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
+	if (rc < 0)
+		smblib_err(chg,
+			"Couldn't un-vote DCP from USB ICL rc=%d\n", rc);
+
+	/* clear USB ICL vote for PL_USBIN_USBIN_VOTER */
+	rc = vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
+	if (rc < 0)
+		smblib_err(chg,
+			"Couldn't un-vote PL_USBIN_USBIN from USB ICL rc=%d\n",
+			rc);
+}
+
+static void typec_source_insertion(struct smb_charger *chg)
+{
+}
+
+static void typec_sink_insertion(struct smb_charger *chg)
+{
+	/* when a sink is inserted we should not wait on hvdcp timeout to
+	 * enable pd
+	 */
+	vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
+			false, 0);
+}
+
+static void typec_sink_removal(struct smb_charger *chg)
+{
+	smblib_set_charge_param(chg, &chg->param.freq_boost,
+			chg->chg_freq.freq_above_otg_threshold);
+	chg->boost_current_ua = 0;
+}
+
+static void smblib_handle_typec_removal(struct smb_charger *chg)
+{
+	vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, true, 0);
+	vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER, true, 0);
+	vote(chg->pd_disallowed_votable_indirect, LEGACY_CABLE_VOTER, true, 0);
+	vote(chg->pd_disallowed_votable_indirect, VBUS_CC_SHORT_VOTER, true, 0);
+	vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, true, 0);
+
+	/* reset votes from vbus_cc_short */
+	vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
+			true, 0);
+	vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
+			true, 0);
+	/*
+	 * cable could be removed during hard reset, remove its vote to
+	 * disable apsd
+	 */
+	vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, false, 0);
+
+	chg->vconn_attempts = 0;
+	chg->otg_attempts = 0;
+	chg->pulse_cnt = 0;
+	chg->usb_icl_delta_ua = 0;
+
+	chg->usb_ever_removed = true;
+
+	smblib_update_usb_type(chg);
+
+	typec_source_removal(chg);
+	typec_sink_removal(chg);
+}
+
+static void smblib_handle_typec_insertion(struct smb_charger *chg,
+		bool sink_attached, bool legacy_cable)
+{
+	int rp;
+	bool vbus_cc_short = false;
+	bool valid_legacy_cable;
+
+	vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, false, 0);
+
+	if (sink_attached) {
+		typec_source_removal(chg);
+		typec_sink_insertion(chg);
+	} else {
+		typec_source_insertion(chg);
+		typec_sink_removal(chg);
+	}
+
+	valid_legacy_cable = legacy_cable &&
+		(chg->usb_ever_removed || !smblib_sysok_reason_usbin(chg));
+	vote(chg->pd_disallowed_votable_indirect, LEGACY_CABLE_VOTER,
+			valid_legacy_cable, 0);
+
+	if (valid_legacy_cable) {
+		rp = smblib_get_prop_ufp_mode(chg);
+		if (rp == POWER_SUPPLY_TYPEC_SOURCE_HIGH
+				|| rp == POWER_SUPPLY_TYPEC_NON_COMPLIANT) {
+			vbus_cc_short = true;
+			smblib_err(chg, "Disabling PD and HVDCP, VBUS-CC shorted, rp = %d found\n",
+					rp);
+		}
+	}
+
+	vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
+			vbus_cc_short, 0);
+	vote(chg->pd_disallowed_votable_indirect, VBUS_CC_SHORT_VOTER,
+			vbus_cc_short, 0);
+}
+
+static void smblib_handle_typec_debounce_done(struct smb_charger *chg,
+			bool rising, bool sink_attached, bool legacy_cable)
+{
+	int rc;
+	union power_supply_propval pval = {0, };
+
+	if (rising)
+		smblib_handle_typec_insertion(chg, sink_attached, legacy_cable);
+	else
+		smblib_handle_typec_removal(chg);
+
+	rc = smblib_get_prop_typec_mode(chg, &pval);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't get prop typec mode rc=%d\n", rc);
+
+	/*
+	 * HW BUG - after cable is removed, medium or high rd reading
+	 * falls to std. Use it for signal of typec cc detachment in
+	 * software WA.
+	 */
+	if (chg->cc2_sink_detach_flag == CC2_SINK_MEDIUM_HIGH
+		&& pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) {
+
+		chg->cc2_sink_detach_flag = CC2_SINK_WA_DONE;
+
+		rc = smblib_masked_write(chg,
+				TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				EXIT_SNK_BASED_ON_CC_BIT, 0);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't get prop typec mode rc=%d\n",
+				rc);
+	}
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: debounce-done %s; Type-C %s detected\n",
+		   rising ? "rising" : "falling",
+		   smblib_typec_mode_name[pval.intval]);
+}
+
+irqreturn_t smblib_handle_usb_typec_change_for_uusb(struct smb_charger *chg)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, TYPE_C_STATUS_3_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_3 rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+	smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_3 = 0x%02x OTG=%d\n",
+		stat, !!(stat & (U_USB_GND_NOVBUS_BIT | U_USB_GND_BIT)));
+
+	extcon_set_cable_state_(chg->extcon, EXTCON_USB_HOST,
+			!!(stat & (U_USB_GND_NOVBUS_BIT | U_USB_GND_BIT)));
+	power_supply_changed(chg->usb_psy);
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_usb_typec_change(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	int rc;
+	u8 stat4, stat5;
+	bool debounce_done, sink_attached, legacy_cable;
+
+	if (chg->micro_usb_mode)
+		return smblib_handle_usb_typec_change_for_uusb(chg);
+
+	/* WA - not when PD hard_reset WIP on cc2 in sink mode */
+	if (chg->cc2_sink_detach_flag == CC2_SINK_STD)
+		return IRQ_HANDLED;
+
+	rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat4);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat5);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_5 rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	debounce_done = (bool)(stat4 & TYPEC_DEBOUNCE_DONE_STATUS_BIT);
+	sink_attached = (bool)(stat4 & UFP_DFP_MODE_STATUS_BIT);
+	legacy_cable = (bool)(stat5 & TYPEC_LEGACY_CABLE_STATUS_BIT);
+
+	smblib_handle_typec_debounce_done(chg,
+			debounce_done, sink_attached, legacy_cable);
+
+	if (stat4 & TYPEC_VBUS_ERROR_STATUS_BIT)
+		smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s vbus-error\n",
+			irq_data->name);
+
+	if (stat4 & TYPEC_VCONN_OVERCURR_STATUS_BIT)
+		schedule_work(&chg->vconn_oc_work);
+
+	power_supply_changed(chg->usb_psy);
+	smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_4 = 0x%02x\n", stat4);
+	smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_5 = 0x%02x\n", stat5);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_dc_plugin(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	power_supply_changed(chg->dc_psy);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_high_duty_cycle(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	chg->is_hdc = true;
+	schedule_delayed_work(&chg->clear_hdc_work, msecs_to_jiffies(60));
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	int rc;
+	u8 stat;
+
+	if (!(chg->wa_flags & BOOST_BACK_WA))
+		return IRQ_HANDLED;
+
+	rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	if ((stat & USE_USBIN_BIT) &&
+			get_effective_result(chg->usb_icl_votable) < USBIN_25MA)
+		return IRQ_HANDLED;
+
+	if (stat & USE_DCIN_BIT)
+		return IRQ_HANDLED;
+
+	if (is_storming(&irq_data->storm_data)) {
+		smblib_err(chg, "Reverse boost detected: voting 0mA to suspend input\n");
+		vote(chg->usb_icl_votable, BOOST_BACK_VOTER, true, 0);
+	}
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_wdog_bark(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	int rc;
+
+	rc = smblib_write(chg, BARK_BITE_WDOG_PET_REG, BARK_BITE_WDOG_PET_BIT);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't pet the dog rc=%d\n", rc);
+
+	return IRQ_HANDLED;
+}
+
+/***************
+ * Work Queues *
+ ***************/
+
+static void smblib_hvdcp_detect_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+					       hvdcp_detect_work.work);
+
+	vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
+				false, 0);
+	if (get_effective_result(chg->pd_disallowed_votable_indirect))
+		/* pd is still disabled, try hvdcp */
+		try_rerun_apsd_for_hvdcp(chg);
+	else
+		/* notify pd now that pd is allowed */
+		power_supply_changed(chg->usb_psy);
+}
+
+static void bms_update_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						bms_update_work);
+
+	smblib_suspend_on_debug_battery(chg);
+
+	if (chg->batt_psy)
+		power_supply_changed(chg->batt_psy);
+}
+
+static void step_soc_req_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						step_soc_req_work.work);
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	rc = smblib_get_prop_batt_capacity(chg, &pval);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get batt capacity rc=%d\n", rc);
+		return;
+	}
+
+	step_charge_soc_update(chg, pval.intval);
+}
+
+static void clear_hdc_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						clear_hdc_work.work);
+
+	chg->is_hdc = 0;
+}
+
+static void rdstd_cc2_detach_work(struct work_struct *work)
+{
+	int rc;
+	u8 stat;
+	struct smb_irq_data irq_data = {NULL, "cc2-removal-workaround"};
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						rdstd_cc2_detach_work);
+
+	/*
+	 * WA steps -
+	 * 1. Enable both UFP and DFP, wait for 10ms.
+	 * 2. Disable DFP, wait for 30ms.
+	 * 3. Removal detected if both TYPEC_DEBOUNCE_DONE_STATUS
+	 *    and TIMER_STAGE bits are gone, otherwise repeat all by
+	 *    work rescheduling.
+	 * Note, work will be cancelled when pd_hard_reset is 0.
+	 */
+
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 UFP_EN_CMD_BIT | DFP_EN_CMD_BIT,
+				 UFP_EN_CMD_BIT | DFP_EN_CMD_BIT);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't write TYPE_C_CTRL_REG rc=%d\n", rc);
+		return;
+	}
+
+	usleep_range(10000, 11000);
+
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 UFP_EN_CMD_BIT | DFP_EN_CMD_BIT,
+				 UFP_EN_CMD_BIT);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't write TYPE_C_CTRL_REG rc=%d\n", rc);
+		return;
+	}
+
+	usleep_range(30000, 31000);
+
+	rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
+			rc);
+		return;
+	}
+	if (stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT)
+		goto rerun;
+
+	rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg,
+			"Couldn't read TYPE_C_STATUS_5_REG rc=%d\n", rc);
+		return;
+	}
+	if (stat & TIMER_STAGE_2_BIT)
+		goto rerun;
+
+	/* Bingo, cc2 removal detected */
+	smblib_reg_block_restore(chg, cc2_detach_settings);
+	chg->cc2_sink_detach_flag = CC2_SINK_WA_DONE;
+	irq_data.parent_data = chg;
+	smblib_handle_usb_typec_change(0, &irq_data);
+
+	return;
+
+rerun:
+	schedule_work(&chg->rdstd_cc2_detach_work);
+}
+
+static void smblib_otg_oc_exit(struct smb_charger *chg, bool success)
+{
+	int rc;
+
+	chg->otg_attempts = 0;
+	if (!success) {
+		smblib_err(chg, "OTG soft start failed\n");
+		chg->otg_en = false;
+	}
+
+	smblib_dbg(chg, PR_OTG, "enabling VBUS < 1V check\n");
+	rc = smblib_masked_write(chg, OTG_CFG_REG,
+					QUICKSTART_OTG_FASTROLESWAP_BIT, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't enable VBUS < 1V check rc=%d\n", rc);
+
+	if (!chg->external_vconn && chg->vconn_en) {
+		chg->vconn_attempts = 0;
+		if (success) {
+			rc = _smblib_vconn_regulator_enable(
+							chg->vconn_vreg->rdev);
+			if (rc < 0)
+				smblib_err(chg, "Couldn't enable VCONN rc=%d\n",
+									rc);
+		} else {
+			chg->vconn_en = false;
+		}
+	}
+}
+
+#define MAX_OC_FALLING_TRIES 10
+static void smblib_otg_oc_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+								otg_oc_work);
+	int rc, i;
+	u8 stat;
+
+	if (!chg->vbus_vreg || !chg->vbus_vreg->rdev)
+		return;
+
+	smblib_err(chg, "over-current detected on VBUS\n");
+	mutex_lock(&chg->otg_oc_lock);
+	if (!chg->otg_en)
+		goto unlock;
+
+	smblib_dbg(chg, PR_OTG, "disabling VBUS < 1V check\n");
+	smblib_masked_write(chg, OTG_CFG_REG,
+					QUICKSTART_OTG_FASTROLESWAP_BIT,
+					QUICKSTART_OTG_FASTROLESWAP_BIT);
+
+	/*
+	 * If 500ms has passed and another over-current interrupt has not
+	 * triggered then it is likely that the software based soft start was
+	 * successful and the VBUS < 1V restriction should be re-enabled.
+	 */
+	schedule_delayed_work(&chg->otg_ss_done_work, msecs_to_jiffies(500));
+
+	rc = _smblib_vbus_regulator_disable(chg->vbus_vreg->rdev);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't disable VBUS rc=%d\n", rc);
+		goto unlock;
+	}
+
+	if (++chg->otg_attempts > OTG_MAX_ATTEMPTS) {
+		cancel_delayed_work_sync(&chg->otg_ss_done_work);
+		smblib_err(chg, "OTG failed to enable after %d attempts\n",
+			   chg->otg_attempts - 1);
+		smblib_otg_oc_exit(chg, false);
+		goto unlock;
+	}
+
+	/*
+	 * The real time status should go low within 10ms. Poll every 1-2ms to
+	 * minimize the delay when re-enabling OTG.
+	 */
+	for (i = 0; i < MAX_OC_FALLING_TRIES; ++i) {
+		usleep_range(1000, 2000);
+		rc = smblib_read(chg, OTG_BASE + INT_RT_STS_OFFSET, &stat);
+		if (rc >= 0 && !(stat & OTG_OVERCURRENT_RT_STS_BIT))
+			break;
+	}
+
+	if (i >= MAX_OC_FALLING_TRIES) {
+		cancel_delayed_work_sync(&chg->otg_ss_done_work);
+		smblib_err(chg, "OTG OC did not fall after %dms\n",
+						2 * MAX_OC_FALLING_TRIES);
+		smblib_otg_oc_exit(chg, false);
+		goto unlock;
+	}
+
+	smblib_dbg(chg, PR_OTG, "OTG OC fell after %dms\n", 2 * i + 1);
+	rc = _smblib_vbus_regulator_enable(chg->vbus_vreg->rdev);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't enable VBUS rc=%d\n", rc);
+		goto unlock;
+	}
+
+unlock:
+	mutex_unlock(&chg->otg_oc_lock);
+}
+
+static void smblib_vconn_oc_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+								vconn_oc_work);
+	int rc, i;
+	u8 stat;
+
+	smblib_err(chg, "over-current detected on VCONN\n");
+	if (!chg->vconn_vreg || !chg->vconn_vreg->rdev)
+		return;
+
+	mutex_lock(&chg->otg_oc_lock);
+	rc = _smblib_vconn_regulator_disable(chg->vconn_vreg->rdev);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't disable VCONN rc=%d\n", rc);
+		goto unlock;
+	}
+
+	if (++chg->vconn_attempts > VCONN_MAX_ATTEMPTS) {
+		smblib_err(chg, "VCONN failed to enable after %d attempts\n",
+			   chg->otg_attempts - 1);
+		chg->vconn_en = false;
+		chg->vconn_attempts = 0;
+		goto unlock;
+	}
+
+	/*
+	 * The real time status should go low within 10ms. Poll every 1-2ms to
+	 * minimize the delay when re-enabling OTG.
+	 */
+	for (i = 0; i < MAX_OC_FALLING_TRIES; ++i) {
+		usleep_range(1000, 2000);
+		rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+		if (rc >= 0 && !(stat & TYPEC_VCONN_OVERCURR_STATUS_BIT))
+			break;
+	}
+
+	if (i >= MAX_OC_FALLING_TRIES) {
+		smblib_err(chg, "VCONN OC did not fall after %dms\n",
+						2 * MAX_OC_FALLING_TRIES);
+		chg->vconn_en = false;
+		chg->vconn_attempts = 0;
+		goto unlock;
+	}
+
+	smblib_dbg(chg, PR_OTG, "VCONN OC fell after %dms\n", 2 * i + 1);
+	if (++chg->vconn_attempts > VCONN_MAX_ATTEMPTS) {
+		smblib_err(chg, "VCONN failed to enable after %d attempts\n",
+			   chg->vconn_attempts - 1);
+		chg->vconn_en = false;
+		goto unlock;
+	}
+
+	rc = _smblib_vconn_regulator_enable(chg->vconn_vreg->rdev);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't enable VCONN rc=%d\n", rc);
+		goto unlock;
+	}
+
+unlock:
+	mutex_unlock(&chg->otg_oc_lock);
+}
+
+static void smblib_otg_ss_done_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+							otg_ss_done_work.work);
+	int rc;
+	bool success = false;
+	u8 stat;
+
+	mutex_lock(&chg->otg_oc_lock);
+	rc = smblib_read(chg, OTG_STATUS_REG, &stat);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't read OTG status rc=%d\n", rc);
+	else if (stat & BOOST_SOFTSTART_DONE_BIT)
+		success = true;
+
+	smblib_otg_oc_exit(chg, success);
+	mutex_unlock(&chg->otg_oc_lock);
+}
+
+static void smblib_icl_change_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+							icl_change_work.work);
+	int rc, settled_ua;
+
+	rc = smblib_get_charge_param(chg, &chg->param.icl_stat, &settled_ua);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get ICL status rc=%d\n", rc);
+		return;
+	}
+
+	power_supply_changed(chg->usb_main_psy);
+	vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER,
+				settled_ua >= USB_WEAK_INPUT_UA, 0);
+
+	smblib_dbg(chg, PR_INTERRUPT, "icl_settled=%d\n", settled_ua);
+}
+
+static int smblib_create_votables(struct smb_charger *chg)
+{
+	int rc = 0;
+
+	chg->fcc_votable = find_votable("FCC");
+	if (!chg->fcc_votable) {
+		rc = -EPROBE_DEFER;
+		return rc;
+	}
+
+	chg->fv_votable = find_votable("FV");
+	if (!chg->fv_votable) {
+		rc = -EPROBE_DEFER;
+		return rc;
+	}
+
+	chg->pl_disable_votable = find_votable("PL_DISABLE");
+	if (!chg->pl_disable_votable) {
+		rc = -EPROBE_DEFER;
+		return rc;
+	}
+	vote(chg->pl_disable_votable, PL_INDIRECT_VOTER, true, 0);
+	vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, true, 0);
+
+	chg->dc_suspend_votable = create_votable("DC_SUSPEND", VOTE_SET_ANY,
+					smblib_dc_suspend_vote_callback,
+					chg);
+	if (IS_ERR(chg->dc_suspend_votable)) {
+		rc = PTR_ERR(chg->dc_suspend_votable);
+		return rc;
+	}
+
+	chg->usb_icl_votable = create_votable("USB_ICL", VOTE_MIN,
+					smblib_usb_icl_vote_callback,
+					chg);
+	if (IS_ERR(chg->usb_icl_votable)) {
+		rc = PTR_ERR(chg->usb_icl_votable);
+		return rc;
+	}
+
+	chg->dc_icl_votable = create_votable("DC_ICL", VOTE_MIN,
+					smblib_dc_icl_vote_callback,
+					chg);
+	if (IS_ERR(chg->dc_icl_votable)) {
+		rc = PTR_ERR(chg->dc_icl_votable);
+		return rc;
+	}
+
+	chg->pd_disallowed_votable_indirect
+		= create_votable("PD_DISALLOWED_INDIRECT", VOTE_SET_ANY,
+			smblib_pd_disallowed_votable_indirect_callback, chg);
+	if (IS_ERR(chg->pd_disallowed_votable_indirect)) {
+		rc = PTR_ERR(chg->pd_disallowed_votable_indirect);
+		return rc;
+	}
+
+	chg->pd_allowed_votable = create_votable("PD_ALLOWED",
+					VOTE_SET_ANY, NULL, NULL);
+	if (IS_ERR(chg->pd_allowed_votable)) {
+		rc = PTR_ERR(chg->pd_allowed_votable);
+		return rc;
+	}
+
+	chg->awake_votable = create_votable("AWAKE", VOTE_SET_ANY,
+					smblib_awake_vote_callback,
+					chg);
+	if (IS_ERR(chg->awake_votable)) {
+		rc = PTR_ERR(chg->awake_votable);
+		return rc;
+	}
+
+	chg->chg_disable_votable = create_votable("CHG_DISABLE", VOTE_SET_ANY,
+					smblib_chg_disable_vote_callback,
+					chg);
+	if (IS_ERR(chg->chg_disable_votable)) {
+		rc = PTR_ERR(chg->chg_disable_votable);
+		return rc;
+	}
+
+	chg->pl_enable_votable_indirect = create_votable("PL_ENABLE_INDIRECT",
+					VOTE_SET_ANY,
+					smblib_pl_enable_indirect_vote_callback,
+					chg);
+	if (IS_ERR(chg->pl_enable_votable_indirect)) {
+		rc = PTR_ERR(chg->pl_enable_votable_indirect);
+		return rc;
+	}
+
+	chg->hvdcp_disable_votable_indirect = create_votable(
+				"HVDCP_DISABLE_INDIRECT",
+				VOTE_SET_ANY,
+				smblib_hvdcp_disable_indirect_vote_callback,
+				chg);
+	if (IS_ERR(chg->hvdcp_disable_votable_indirect)) {
+		rc = PTR_ERR(chg->hvdcp_disable_votable_indirect);
+		return rc;
+	}
+
+	chg->hvdcp_enable_votable = create_votable("HVDCP_ENABLE",
+					VOTE_SET_ANY,
+					smblib_hvdcp_enable_vote_callback,
+					chg);
+	if (IS_ERR(chg->hvdcp_enable_votable)) {
+		rc = PTR_ERR(chg->hvdcp_enable_votable);
+		return rc;
+	}
+
+	chg->apsd_disable_votable = create_votable("APSD_DISABLE",
+					VOTE_SET_ANY,
+					smblib_apsd_disable_vote_callback,
+					chg);
+	if (IS_ERR(chg->apsd_disable_votable)) {
+		rc = PTR_ERR(chg->apsd_disable_votable);
+		return rc;
+	}
+
+	chg->hvdcp_hw_inov_dis_votable = create_votable("HVDCP_HW_INOV_DIS",
+					VOTE_SET_ANY,
+					smblib_hvdcp_hw_inov_dis_vote_callback,
+					chg);
+	if (IS_ERR(chg->hvdcp_hw_inov_dis_votable)) {
+		rc = PTR_ERR(chg->hvdcp_hw_inov_dis_votable);
+		return rc;
+	}
+
+	return rc;
+}
+
+static void smblib_destroy_votables(struct smb_charger *chg)
+{
+	if (chg->dc_suspend_votable)
+		destroy_votable(chg->dc_suspend_votable);
+	if (chg->usb_icl_votable)
+		destroy_votable(chg->usb_icl_votable);
+	if (chg->dc_icl_votable)
+		destroy_votable(chg->dc_icl_votable);
+	if (chg->pd_disallowed_votable_indirect)
+		destroy_votable(chg->pd_disallowed_votable_indirect);
+	if (chg->pd_allowed_votable)
+		destroy_votable(chg->pd_allowed_votable);
+	if (chg->awake_votable)
+		destroy_votable(chg->awake_votable);
+	if (chg->chg_disable_votable)
+		destroy_votable(chg->chg_disable_votable);
+	if (chg->pl_enable_votable_indirect)
+		destroy_votable(chg->pl_enable_votable_indirect);
+	if (chg->apsd_disable_votable)
+		destroy_votable(chg->apsd_disable_votable);
+	if (chg->hvdcp_hw_inov_dis_votable)
+		destroy_votable(chg->hvdcp_hw_inov_dis_votable);
+}
+
+static void smblib_iio_deinit(struct smb_charger *chg)
+{
+	if (!IS_ERR_OR_NULL(chg->iio.temp_chan))
+		iio_channel_release(chg->iio.temp_chan);
+	if (!IS_ERR_OR_NULL(chg->iio.temp_max_chan))
+		iio_channel_release(chg->iio.temp_max_chan);
+	if (!IS_ERR_OR_NULL(chg->iio.usbin_i_chan))
+		iio_channel_release(chg->iio.usbin_i_chan);
+	if (!IS_ERR_OR_NULL(chg->iio.usbin_v_chan))
+		iio_channel_release(chg->iio.usbin_v_chan);
+	if (!IS_ERR_OR_NULL(chg->iio.batt_i_chan))
+		iio_channel_release(chg->iio.batt_i_chan);
+}
+
+int smblib_init(struct smb_charger *chg)
+{
+	int rc = 0;
+
+	mutex_init(&chg->write_lock);
+	mutex_init(&chg->otg_oc_lock);
+	INIT_WORK(&chg->bms_update_work, bms_update_work);
+	INIT_WORK(&chg->rdstd_cc2_detach_work, rdstd_cc2_detach_work);
+	INIT_DELAYED_WORK(&chg->hvdcp_detect_work, smblib_hvdcp_detect_work);
+	INIT_DELAYED_WORK(&chg->step_soc_req_work, step_soc_req_work);
+	INIT_DELAYED_WORK(&chg->clear_hdc_work, clear_hdc_work);
+	INIT_WORK(&chg->otg_oc_work, smblib_otg_oc_work);
+	INIT_WORK(&chg->vconn_oc_work, smblib_vconn_oc_work);
+	INIT_DELAYED_WORK(&chg->otg_ss_done_work, smblib_otg_ss_done_work);
+	INIT_DELAYED_WORK(&chg->icl_change_work, smblib_icl_change_work);
+	chg->fake_capacity = -EINVAL;
+
+	switch (chg->mode) {
+	case PARALLEL_MASTER:
+		chg->qnovo_fcc_ua = -EINVAL;
+		chg->qnovo_fv_uv = -EINVAL;
+		rc = smblib_create_votables(chg);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't create votables rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		rc = smblib_register_notifier(chg);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't register notifier rc=%d\n", rc);
+			return rc;
+		}
+
+		chg->bms_psy = power_supply_get_by_name("bms");
+		chg->pl.psy = power_supply_get_by_name("parallel");
+		break;
+	case PARALLEL_SLAVE:
+		break;
+	default:
+		smblib_err(chg, "Unsupported mode %d\n", chg->mode);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+int smblib_deinit(struct smb_charger *chg)
+{
+	switch (chg->mode) {
+	case PARALLEL_MASTER:
+		power_supply_unreg_notifier(&chg->nb);
+		smblib_destroy_votables(chg);
+		break;
+	case PARALLEL_SLAVE:
+		break;
+	default:
+		smblib_err(chg, "Unsupported mode %d\n", chg->mode);
+		return -EINVAL;
+	}
+
+	smblib_iio_deinit(chg);
+
+	return 0;
+}
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
new file mode 100644
index 0000000..21ccd3c
--- /dev/null
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -0,0 +1,487 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SMB2_CHARGER_H
+#define __SMB2_CHARGER_H
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/consumer.h>
+#include <linux/extcon.h>
+#include "storm-watch.h"
+
+enum print_reason {
+	PR_INTERRUPT	= BIT(0),
+	PR_REGISTER	= BIT(1),
+	PR_MISC		= BIT(2),
+	PR_PARALLEL	= BIT(3),
+	PR_OTG		= BIT(4),
+};
+
+#define DEFAULT_VOTER			"DEFAULT_VOTER"
+#define USER_VOTER			"USER_VOTER"
+#define PD_VOTER			"PD_VOTER"
+#define DCP_VOTER			"DCP_VOTER"
+#define PL_USBIN_USBIN_VOTER		"PL_USBIN_USBIN_VOTER"
+#define USB_PSY_VOTER			"USB_PSY_VOTER"
+#define PL_TAPER_WORK_RUNNING_VOTER	"PL_TAPER_WORK_RUNNING_VOTER"
+#define PL_INDIRECT_VOTER		"PL_INDIRECT_VOTER"
+#define USBIN_I_VOTER			"USBIN_I_VOTER"
+#define USBIN_V_VOTER			"USBIN_V_VOTER"
+#define CHG_STATE_VOTER			"CHG_STATE_VOTER"
+#define TYPEC_SRC_VOTER			"TYPEC_SRC_VOTER"
+#define TAPER_END_VOTER			"TAPER_END_VOTER"
+#define THERMAL_DAEMON_VOTER		"THERMAL_DAEMON_VOTER"
+#define CC_DETACHED_VOTER		"CC_DETACHED_VOTER"
+#define HVDCP_TIMEOUT_VOTER		"HVDCP_TIMEOUT_VOTER"
+#define PD_DISALLOWED_INDIRECT_VOTER	"PD_DISALLOWED_INDIRECT_VOTER"
+#define PD_HARD_RESET_VOTER		"PD_HARD_RESET_VOTER"
+#define VBUS_CC_SHORT_VOTER		"VBUS_CC_SHORT_VOTER"
+#define LEGACY_CABLE_VOTER		"LEGACY_CABLE_VOTER"
+#define PD_INACTIVE_VOTER		"PD_INACTIVE_VOTER"
+#define BOOST_BACK_VOTER		"BOOST_BACK_VOTER"
+#define HVDCP_INDIRECT_VOTER		"HVDCP_INDIRECT_VOTER"
+#define MICRO_USB_VOTER			"MICRO_USB_VOTER"
+#define DEBUG_BOARD_VOTER		"DEBUG_BOARD_VOTER"
+#define PD_SUSPEND_SUPPORTED_VOTER	"PD_SUSPEND_SUPPORTED_VOTER"
+#define PL_DELAY_HVDCP_VOTER		"PL_DELAY_HVDCP_VOTER"
+#define CTM_VOTER			"CTM_VOTER"
+#define SW_QC3_VOTER			"SW_QC3_VOTER"
+#define AICL_RERUN_VOTER		"AICL_RERUN_VOTER"
+
+#define VCONN_MAX_ATTEMPTS	3
+#define OTG_MAX_ATTEMPTS	3
+
+enum smb_mode {
+	PARALLEL_MASTER = 0,
+	PARALLEL_SLAVE,
+	NUM_MODES,
+};
+
+enum cc2_sink_type {
+	CC2_SINK_NONE = 0,
+	CC2_SINK_STD,
+	CC2_SINK_MEDIUM_HIGH,
+	CC2_SINK_WA_DONE,
+};
+
+enum {
+	QC_CHARGER_DETECTION_WA_BIT	= BIT(0),
+	BOOST_BACK_WA			= BIT(1),
+	TYPEC_CC2_REMOVAL_WA_BIT	= BIT(2),
+	QC_AUTH_INTERRUPT_WA_BIT	= BIT(3),
+};
+
+enum smb_irq_index {
+	CHG_ERROR_IRQ = 0,
+	CHG_STATE_CHANGE_IRQ,
+	STEP_CHG_STATE_CHANGE_IRQ,
+	STEP_CHG_SOC_UPDATE_FAIL_IRQ,
+	STEP_CHG_SOC_UPDATE_REQ_IRQ,
+	OTG_FAIL_IRQ,
+	OTG_OVERCURRENT_IRQ,
+	OTG_OC_DIS_SW_STS_IRQ,
+	TESTMODE_CHANGE_DET_IRQ,
+	BATT_TEMP_IRQ,
+	BATT_OCP_IRQ,
+	BATT_OV_IRQ,
+	BATT_LOW_IRQ,
+	BATT_THERM_ID_MISS_IRQ,
+	BATT_TERM_MISS_IRQ,
+	USBIN_COLLAPSE_IRQ,
+	USBIN_LT_3P6V_IRQ,
+	USBIN_UV_IRQ,
+	USBIN_OV_IRQ,
+	USBIN_PLUGIN_IRQ,
+	USBIN_SRC_CHANGE_IRQ,
+	USBIN_ICL_CHANGE_IRQ,
+	TYPE_C_CHANGE_IRQ,
+	DCIN_COLLAPSE_IRQ,
+	DCIN_LT_3P6V_IRQ,
+	DCIN_UV_IRQ,
+	DCIN_OV_IRQ,
+	DCIN_PLUGIN_IRQ,
+	DIV2_EN_DG_IRQ,
+	DCIN_ICL_CHANGE_IRQ,
+	WDOG_SNARL_IRQ,
+	WDOG_BARK_IRQ,
+	AICL_FAIL_IRQ,
+	AICL_DONE_IRQ,
+	HIGH_DUTY_CYCLE_IRQ,
+	INPUT_CURRENT_LIMIT_IRQ,
+	TEMPERATURE_CHANGE_IRQ,
+	SWITCH_POWER_OK_IRQ,
+	SMB_IRQ_MAX,
+};
+
+struct smb_irq_info {
+	const char			*name;
+	const irq_handler_t		handler;
+	const bool			wake;
+	const struct storm_watch	storm_data;
+	struct smb_irq_data		*irq_data;
+	int				irq;
+};
+
+static const unsigned int smblib_extcon_cable[] = {
+	EXTCON_USB,
+	EXTCON_USB_HOST,
+	EXTCON_NONE,
+};
+
+struct smb_regulator {
+	struct regulator_dev	*rdev;
+	struct regulator_desc	rdesc;
+};
+
+struct smb_irq_data {
+	void			*parent_data;
+	const char		*name;
+	struct storm_watch	storm_data;
+};
+
+struct smb_chg_param {
+	const char	*name;
+	u16		reg;
+	int		min_u;
+	int		max_u;
+	int		step_u;
+	int		(*get_proc)(struct smb_chg_param *param,
+				    u8 val_raw);
+	int		(*set_proc)(struct smb_chg_param *param,
+				    int val_u,
+				    u8 *val_raw);
+};
+
+struct smb_chg_freq {
+	unsigned int		freq_5V;
+	unsigned int		freq_6V_8V;
+	unsigned int		freq_9V;
+	unsigned int		freq_12V;
+	unsigned int		freq_removal;
+	unsigned int		freq_below_otg_threshold;
+	unsigned int		freq_above_otg_threshold;
+};
+
+struct smb_params {
+	struct smb_chg_param	fcc;
+	struct smb_chg_param	fv;
+	struct smb_chg_param	usb_icl;
+	struct smb_chg_param	icl_stat;
+	struct smb_chg_param	otg_cl;
+	struct smb_chg_param	dc_icl;
+	struct smb_chg_param	dc_icl_pt_lv;
+	struct smb_chg_param	dc_icl_pt_hv;
+	struct smb_chg_param	dc_icl_div2_lv;
+	struct smb_chg_param	dc_icl_div2_mid_lv;
+	struct smb_chg_param	dc_icl_div2_mid_hv;
+	struct smb_chg_param	dc_icl_div2_hv;
+	struct smb_chg_param	jeita_cc_comp;
+	struct smb_chg_param	step_soc_threshold[4];
+	struct smb_chg_param	step_soc;
+	struct smb_chg_param	step_cc_delta[5];
+	struct smb_chg_param	freq_buck;
+	struct smb_chg_param	freq_boost;
+};
+
+struct parallel_params {
+	struct power_supply	*psy;
+};
+
+struct smb_iio {
+	struct iio_channel	*temp_chan;
+	struct iio_channel	*temp_max_chan;
+	struct iio_channel	*usbin_i_chan;
+	struct iio_channel	*usbin_v_chan;
+	struct iio_channel	*batt_i_chan;
+	struct iio_channel	*connector_temp_chan;
+	struct iio_channel	*connector_temp_thr1_chan;
+	struct iio_channel	*connector_temp_thr2_chan;
+	struct iio_channel	*connector_temp_thr3_chan;
+};
+
+struct reg_info {
+	u16		reg;
+	u8		mask;
+	u8		val;
+	u8		bak;
+	const char	*desc;
+};
+
+struct smb_charger {
+	struct device		*dev;
+	char			*name;
+	struct regmap		*regmap;
+	struct smb_irq_info	*irq_info;
+	struct smb_params	param;
+	struct smb_iio		iio;
+	int			*debug_mask;
+	enum smb_mode		mode;
+	bool			external_vconn;
+	struct smb_chg_freq	chg_freq;
+	int			smb_version;
+
+	/* locks */
+	struct mutex		write_lock;
+	struct mutex		ps_change_lock;
+	struct mutex		otg_oc_lock;
+
+	/* power supplies */
+	struct power_supply		*batt_psy;
+	struct power_supply		*usb_psy;
+	struct power_supply		*dc_psy;
+	struct power_supply		*bms_psy;
+	struct power_supply_desc	usb_psy_desc;
+	struct power_supply		*usb_main_psy;
+
+	/* notifiers */
+	struct notifier_block	nb;
+
+	/* parallel charging */
+	struct parallel_params	pl;
+
+	/* regulators */
+	struct smb_regulator	*vbus_vreg;
+	struct smb_regulator	*vconn_vreg;
+	struct regulator	*dpdm_reg;
+
+	/* votables */
+	struct votable		*dc_suspend_votable;
+	struct votable		*fcc_votable;
+	struct votable		*fv_votable;
+	struct votable		*usb_icl_votable;
+	struct votable		*dc_icl_votable;
+	struct votable		*pd_disallowed_votable_indirect;
+	struct votable		*pd_allowed_votable;
+	struct votable		*awake_votable;
+	struct votable		*pl_disable_votable;
+	struct votable		*chg_disable_votable;
+	struct votable		*pl_enable_votable_indirect;
+	struct votable		*hvdcp_disable_votable_indirect;
+	struct votable		*hvdcp_enable_votable;
+	struct votable		*apsd_disable_votable;
+	struct votable		*hvdcp_hw_inov_dis_votable;
+
+	/* work */
+	struct work_struct	bms_update_work;
+	struct work_struct	rdstd_cc2_detach_work;
+	struct delayed_work	hvdcp_detect_work;
+	struct delayed_work	ps_change_timeout_work;
+	struct delayed_work	step_soc_req_work;
+	struct delayed_work	clear_hdc_work;
+	struct work_struct	otg_oc_work;
+	struct work_struct	vconn_oc_work;
+	struct delayed_work	otg_ss_done_work;
+	struct delayed_work	icl_change_work;
+
+	/* cached status */
+	int			voltage_min_uv;
+	int			voltage_max_uv;
+	int			pd_active;
+	bool			system_suspend_supported;
+	int			boost_threshold_ua;
+	int			system_temp_level;
+	int			thermal_levels;
+	int			*thermal_mitigation;
+	int			dcp_icl_ua;
+	int			fake_capacity;
+	bool			step_chg_enabled;
+	bool			is_hdc;
+	bool			chg_done;
+	bool			micro_usb_mode;
+	bool			otg_en;
+	bool			vconn_en;
+	bool			suspend_input_on_debug_batt;
+	int			otg_attempts;
+	int			vconn_attempts;
+	int			default_icl_ua;
+
+	/* workaround flag */
+	u32			wa_flags;
+	enum cc2_sink_type	cc2_sink_detach_flag;
+	int			boost_current_ua;
+
+	/* extcon for VBUS / ID notification to USB for uUSB */
+	struct extcon_dev	*extcon;
+	bool			usb_ever_removed;
+
+	int			icl_reduction_ua;
+
+	/* qnovo */
+	int			qnovo_fcc_ua;
+	int			qnovo_fv_uv;
+	int			usb_icl_delta_ua;
+	int			pulse_cnt;
+};
+
+int smblib_read(struct smb_charger *chg, u16 addr, u8 *val);
+int smblib_masked_write(struct smb_charger *chg, u16 addr, u8 mask, u8 val);
+int smblib_write(struct smb_charger *chg, u16 addr, u8 val);
+
+int smblib_get_charge_param(struct smb_charger *chg,
+			    struct smb_chg_param *param, int *val_u);
+int smblib_get_usb_suspend(struct smb_charger *chg, int *suspend);
+
+int smblib_enable_charging(struct smb_charger *chg, bool enable);
+int smblib_set_charge_param(struct smb_charger *chg,
+			    struct smb_chg_param *param, int val_u);
+int smblib_set_usb_suspend(struct smb_charger *chg, bool suspend);
+int smblib_set_dc_suspend(struct smb_charger *chg, bool suspend);
+
+int smblib_mapping_soc_from_field_value(struct smb_chg_param *param,
+					     int val_u, u8 *val_raw);
+int smblib_mapping_cc_delta_to_field_value(struct smb_chg_param *param,
+					   u8 val_raw);
+int smblib_mapping_cc_delta_from_field_value(struct smb_chg_param *param,
+					     int val_u, u8 *val_raw);
+int smblib_set_chg_freq(struct smb_chg_param *param,
+				int val_u, u8 *val_raw);
+
+int smblib_vbus_regulator_enable(struct regulator_dev *rdev);
+int smblib_vbus_regulator_disable(struct regulator_dev *rdev);
+int smblib_vbus_regulator_is_enabled(struct regulator_dev *rdev);
+
+int smblib_vconn_regulator_enable(struct regulator_dev *rdev);
+int smblib_vconn_regulator_disable(struct regulator_dev *rdev);
+int smblib_vconn_regulator_is_enabled(struct regulator_dev *rdev);
+
+irqreturn_t smblib_handle_debug(int irq, void *data);
+irqreturn_t smblib_handle_otg_overcurrent(int irq, void *data);
+irqreturn_t smblib_handle_chg_state_change(int irq, void *data);
+irqreturn_t smblib_handle_step_chg_state_change(int irq, void *data);
+irqreturn_t smblib_handle_step_chg_soc_update_fail(int irq, void *data);
+irqreturn_t smblib_handle_step_chg_soc_update_request(int irq, void *data);
+irqreturn_t smblib_handle_batt_temp_changed(int irq, void *data);
+irqreturn_t smblib_handle_batt_psy_changed(int irq, void *data);
+irqreturn_t smblib_handle_usb_psy_changed(int irq, void *data);
+irqreturn_t smblib_handle_usbin_uv(int irq, void *data);
+irqreturn_t smblib_handle_usb_plugin(int irq, void *data);
+irqreturn_t smblib_handle_usb_source_change(int irq, void *data);
+irqreturn_t smblib_handle_icl_change(int irq, void *data);
+irqreturn_t smblib_handle_usb_typec_change(int irq, void *data);
+irqreturn_t smblib_handle_dc_plugin(int irq, void *data);
+irqreturn_t smblib_handle_high_duty_cycle(int irq, void *data);
+irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data);
+irqreturn_t smblib_handle_wdog_bark(int irq, void *data);
+
+int smblib_get_prop_input_suspend(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_present(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_capacity(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_status(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_charge_type(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_charge_done(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_health(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_system_temp_level(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_input_current_limited(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_voltage_now(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_current_now(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_temp(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_step_chg_step(struct smb_charger *chg,
+				union power_supply_propval *val);
+
+int smblib_set_prop_input_suspend(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_batt_capacity(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_system_temp_level(struct smb_charger *chg,
+				const union power_supply_propval *val);
+
+int smblib_get_prop_dc_present(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_dc_online(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_dc_current_max(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_set_prop_dc_current_max(struct smb_charger *chg,
+				const union power_supply_propval *val);
+
+int smblib_get_prop_usb_present(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_usb_online(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_usb_suspend(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_pd_current_max(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_usb_current_max(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_usb_current_now(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_typec_mode(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_typec_power_role(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_pd_allowed(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_input_current_settled(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_input_voltage_settled(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_pd_in_hard_reset(struct smb_charger *chg,
+			       union power_supply_propval *val);
+int smblib_get_pe_start(struct smb_charger *chg,
+			       union power_supply_propval *val);
+int smblib_get_prop_charger_temp(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_charger_temp_max(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_die_health(struct smb_charger *chg,
+			       union power_supply_propval *val);
+int smblib_set_prop_pd_current_max(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_usb_current_max(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_usb_voltage_min(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_usb_voltage_max(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_boost_current(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_typec_power_role(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_pd_active(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_pd_in_hard_reset(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_get_prop_slave_current_now(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_set_prop_ship_mode(struct smb_charger *chg,
+				const union power_supply_propval *val);
+void smblib_suspend_on_debug_battery(struct smb_charger *chg);
+int smblib_rerun_apsd_if_required(struct smb_charger *chg);
+int smblib_get_prop_fcc_delta(struct smb_charger *chg,
+			       union power_supply_propval *val);
+int smblib_icl_override(struct smb_charger *chg, bool override);
+int smblib_set_icl_reduction(struct smb_charger *chg, int reduction_ua);
+int smblib_dp_dm(struct smb_charger *chg, int val);
+int smblib_rerun_aicl(struct smb_charger *chg);
+
+int smblib_init(struct smb_charger *chg);
+int smblib_deinit(struct smb_charger *chg);
+#endif /* __SMB2_CHARGER_H */
diff --git a/drivers/power/supply/qcom/smb-reg.h b/drivers/power/supply/qcom/smb-reg.h
new file mode 100644
index 0000000..54b6b38
--- /dev/null
+++ b/drivers/power/supply/qcom/smb-reg.h
@@ -0,0 +1,1024 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SMB2_CHARGER_REG_H
+#define __SMB2_CHARGER_REG_H
+
+#include <linux/bitops.h>
+
+#define CHGR_BASE	0x1000
+#define OTG_BASE	0x1100
+#define BATIF_BASE	0x1200
+#define USBIN_BASE	0x1300
+#define DCIN_BASE	0x1400
+#define MISC_BASE	0x1600
+#define CHGR_FREQ_BASE	0x1900
+
+#define PERPH_TYPE_OFFSET		0x04
+#define TYPE_MASK			GENMASK(7, 0)
+#define PERPH_SUBTYPE_OFFSET		0x05
+#define SUBTYPE_MASK			GENMASK(7, 0)
+#define INT_RT_STS_OFFSET		0x10
+
+/* CHGR Peripheral Registers */
+#define BATTERY_CHARGER_STATUS_1_REG	(CHGR_BASE + 0x06)
+#define BVR_INITIAL_RAMP_BIT		BIT(7)
+#define CC_SOFT_TERMINATE_BIT		BIT(6)
+#define STEP_CHARGING_STATUS_SHIFT	3
+#define STEP_CHARGING_STATUS_MASK	GENMASK(5, 3)
+#define BATTERY_CHARGER_STATUS_MASK	GENMASK(2, 0)
+enum {
+	TRICKLE_CHARGE = 0,
+	PRE_CHARGE,
+	FAST_CHARGE,
+	FULLON_CHARGE,
+	TAPER_CHARGE,
+	TERMINATE_CHARGE,
+	INHIBIT_CHARGE,
+	DISABLE_CHARGE,
+};
+
+#define BATTERY_CHARGER_STATUS_2_REG			(CHGR_BASE + 0x07)
+#define INPUT_CURRENT_LIMITED_BIT			BIT(7)
+#define CHARGER_ERROR_STATUS_SFT_EXPIRE_BIT		BIT(6)
+#define CHARGER_ERROR_STATUS_BAT_OV_BIT			BIT(5)
+#define CHARGER_ERROR_STATUS_BAT_TERM_MISSING_BIT	BIT(4)
+#define BAT_TEMP_STATUS_MASK				GENMASK(3, 0)
+#define BAT_TEMP_STATUS_SOFT_LIMIT_MASK			GENMASK(3, 2)
+#define BAT_TEMP_STATUS_HOT_SOFT_LIMIT_BIT		BIT(3)
+#define BAT_TEMP_STATUS_COLD_SOFT_LIMIT_BIT		BIT(2)
+#define BAT_TEMP_STATUS_TOO_HOT_BIT			BIT(1)
+#define BAT_TEMP_STATUS_TOO_COLD_BIT			BIT(0)
+
+#define CHG_OPTION_REG					(CHGR_BASE + 0x08)
+#define PIN_BIT						BIT(7)
+
+#define BATTERY_CHARGER_STATUS_3_REG			(CHGR_BASE + 0x09)
+#define FV_POST_JEITA_MASK				GENMASK(7, 0)
+
+#define BATTERY_CHARGER_STATUS_4_REG			(CHGR_BASE + 0x0A)
+#define CHARGE_CURRENT_POST_JEITA_MASK			GENMASK(7, 0)
+
+#define BATTERY_CHARGER_STATUS_5_REG			(CHGR_BASE + 0x0B)
+#define VALID_INPUT_POWER_SOURCE_BIT			BIT(7)
+#define DISABLE_CHARGING_BIT				BIT(6)
+#define FORCE_ZERO_CHARGE_CURRENT_BIT			BIT(5)
+#define CHARGING_ENABLE_BIT				BIT(4)
+#define TAPER_BIT					BIT(3)
+#define ENABLE_CHG_SENSORS_BIT				BIT(2)
+#define ENABLE_TAPER_SENSOR_BIT				BIT(1)
+#define TAPER_REGION_BIT				BIT(0)
+
+#define BATTERY_CHARGER_STATUS_6_REG			(CHGR_BASE + 0x0C)
+#define GF_BATT_OV_BIT					BIT(7)
+#define DROP_IN_BATTERY_VOLTAGE_REFERENCE_BIT		BIT(6)
+#define VBATT_LTET_RECHARGE_BIT				BIT(5)
+#define VBATT_GTET_INHIBIT_BIT				BIT(4)
+#define VBATT_GTET_FLOAT_VOLTAGE_BIT			BIT(3)
+#define BATT_GT_PRE_TO_FAST_BIT				BIT(2)
+#define BATT_GT_FULL_ON_BIT				BIT(1)
+#define VBATT_LT_2V_BIT					BIT(0)
+
+#define BATTERY_CHARGER_STATUS_7_REG			(CHGR_BASE + 0x0D)
+#define ENABLE_TRICKLE_BIT				BIT(7)
+#define ENABLE_PRE_CHARGING_BIT				BIT(6)
+#define ENABLE_FAST_CHARGING_BIT			BIT(5)
+#define ENABLE_FULLON_MODE_BIT				BIT(4)
+#define TOO_COLD_ADC_BIT				BIT(3)
+#define TOO_HOT_ADC_BIT					BIT(2)
+#define HOT_SL_ADC_BIT					BIT(1)
+#define COLD_SL_ADC_BIT					BIT(0)
+
+#define BATTERY_CHARGER_STATUS_8_REG			(CHGR_BASE + 0x0E)
+#define PRE_FAST_BIT					BIT(7)
+#define PRE_FULLON_BIT					BIT(6)
+#define PRE_RCHG_BIT					BIT(5)
+#define PRE_INHIBIT_BIT					BIT(4)
+#define PRE_OVRV_BIT					BIT(3)
+#define PRE_TERM_BIT					BIT(2)
+#define BAT_ID_BMISS_CMP_BIT				BIT(1)
+#define THERM_CMP_BIT					BIT(0)
+
+/* CHGR Interrupt Bits */
+#define CHGR_7_RT_STS_BIT				BIT(7)
+#define CHGR_6_RT_STS_BIT				BIT(6)
+#define FG_FVCAL_QUALIFIED_RT_STS_BIT			BIT(5)
+#define STEP_CHARGING_SOC_UPDATE_REQUEST_RT_STS_BIT	BIT(4)
+#define STEP_CHARGING_SOC_UPDATE_FAIL_RT_STS_BIT	BIT(3)
+#define STEP_CHARGING_STATE_CHANGE_RT_STS_BIT		BIT(2)
+#define CHARGING_STATE_CHANGE_RT_STS_BIT		BIT(1)
+#define CHGR_ERROR_RT_STS_BIT				BIT(0)
+
+#define STEP_CHG_SOC_VBATT_V_REG			(CHGR_BASE + 0x40)
+#define STEP_CHG_SOC_VBATT_V_MASK			GENMASK(7, 0)
+
+#define STEP_CHG_SOC_VBATT_V_UPDATE_REG			(CHGR_BASE + 0x41)
+#define STEP_CHG_SOC_VBATT_V_UPDATE_BIT			BIT(0)
+
+#define CHARGING_ENABLE_CMD_REG				(CHGR_BASE + 0x42)
+#define CHARGING_ENABLE_CMD_BIT				BIT(0)
+
+#define ALLOW_FAST_CHARGING_CMD_REG			(CHGR_BASE + 0x43)
+#define ALLOW_FAST_CHARGING_CMD_BIT			BIT(0)
+
+#define QNOVO_PT_ENABLE_CMD_REG				(CHGR_BASE + 0x44)
+#define QNOVO_PT_ENABLE_CMD_BIT				BIT(0)
+
+#define CHGR_CFG1_REG					(CHGR_BASE + 0x50)
+#define INCREASE_RCHG_TIMEOUT_CFG_BIT			BIT(1)
+#define LOAD_BAT_BIT					BIT(0)
+
+#define CHGR_CFG2_REG					(CHGR_BASE + 0x51)
+#define CHG_EN_SRC_BIT					BIT(7)
+#define CHG_EN_POLARITY_BIT				BIT(6)
+#define PRETOFAST_TRANSITION_CFG_BIT			BIT(5)
+#define BAT_OV_ECC_BIT					BIT(4)
+#define I_TERM_BIT					BIT(3)
+#define AUTO_RECHG_BIT					BIT(2)
+#define EN_ANALOG_DROP_IN_VBATT_BIT			BIT(1)
+#define CHARGER_INHIBIT_BIT				BIT(0)
+
+#define CHARGER_ENABLE_CFG_REG				(CHGR_BASE + 0x52)
+#define CHG_ENB_TIMEOUT_SETTING_BIT			BIT(1)
+#define FORCE_ZERO_CFG_BIT				BIT(0)
+
+#define CFG_REG						(CHGR_BASE + 0x53)
+#define CHG_OPTION_PIN_TRIM_BIT				BIT(7)
+#define BATN_SNS_CFG_BIT				BIT(4)
+#define CFG_TAPER_DIS_AFVC_BIT				BIT(3)
+#define BATFET_SHUTDOWN_CFG_BIT				BIT(2)
+#define VDISCHG_EN_CFG_BIT				BIT(1)
+#define VCHG_EN_CFG_BIT					BIT(0)
+
+#define CHARGER_SPARE_REG				(CHGR_BASE + 0x54)
+#define CHARGER_SPARE_MASK				GENMASK(5, 0)
+
+#define PRE_CHARGE_CURRENT_CFG_REG			(CHGR_BASE + 0x60)
+#define PRE_CHARGE_CURRENT_SETTING_MASK			GENMASK(5, 0)
+
+#define FAST_CHARGE_CURRENT_CFG_REG			(CHGR_BASE + 0x61)
+#define FAST_CHARGE_CURRENT_SETTING_MASK		GENMASK(7, 0)
+
+#define CHARGE_CURRENT_TERMINATION_CFG_REG		(CHGR_BASE + 0x62)
+#define ANALOG_CHARGE_CURRENT_TERMINATION_SETTING_MASK	GENMASK(2, 0)
+
+#define TCCC_CHARGE_CURRENT_TERMINATION_CFG_REG		(CHGR_BASE + 0x63)
+#define TCCC_CHARGE_CURRENT_TERMINATION_SETTING_MASK	GENMASK(3, 0)
+
+#define CHARGE_CURRENT_SOFTSTART_SETTING_CFG_REG	(CHGR_BASE + 0x64)
+#define CHARGE_CURRENT_SOFTSTART_SETTING_MASK		GENMASK(1, 0)
+
+#define FLOAT_VOLTAGE_CFG_REG				(CHGR_BASE + 0x70)
+#define FLOAT_VOLTAGE_SETTING_MASK			GENMASK(7, 0)
+
+#define AUTO_FLOAT_VOLTAGE_COMPENSATION_CFG_REG		(CHGR_BASE + 0x71)
+#define AUTO_FLOAT_VOLTAGE_COMPENSATION_MASK		GENMASK(2, 0)
+
+#define CHARGE_INHIBIT_THRESHOLD_CFG_REG		(CHGR_BASE + 0x72)
+#define CHARGE_INHIBIT_THRESHOLD_MASK			GENMASK(1, 0)
+#define CHARGE_INHIBIT_THRESHOLD_50MV			0
+#define CHARGE_INHIBIT_THRESHOLD_100MV			1
+#define CHARGE_INHIBIT_THRESHOLD_200MV			2
+#define CHARGE_INHIBIT_THRESHOLD_300MV			3
+
+#define RECHARGE_THRESHOLD_CFG_REG			(CHGR_BASE + 0x73)
+#define RECHARGE_THRESHOLD_MASK				GENMASK(1, 0)
+
+#define PRE_TO_FAST_CHARGE_THRESHOLD_CFG_REG		(CHGR_BASE + 0x74)
+#define PRE_TO_FAST_CHARGE_THRESHOLD_MASK		GENMASK(1, 0)
+
+#define FV_HYSTERESIS_CFG_REG				(CHGR_BASE + 0x75)
+#define FV_DROP_HYSTERESIS_CFG_MASK			GENMASK(7, 4)
+#define THRESH_HYSTERESIS_CFG_MASK			GENMASK(3, 0)
+
+#define FVC_CHARGE_INHIBIT_THRESHOLD_CFG_REG		(CHGR_BASE + 0x80)
+#define FVC_CHARGE_INHIBIT_THRESHOLD_MASK		GENMASK(5, 0)
+
+#define FVC_RECHARGE_THRESHOLD_CFG_REG			(CHGR_BASE + 0x81)
+#define FVC_RECHARGE_THRESHOLD_MASK			GENMASK(7, 0)
+
+#define FVC_PRE_TO_FAST_CHARGE_THRESHOLD_CFG_REG	(CHGR_BASE + 0x82)
+#define FVC_PRE_TO_FAST_CHARGE_THRESHOLD_MASK		GENMASK(7, 0)
+
+#define FVC_FULL_ON_THRESHOLD_CFG_REG			(CHGR_BASE + 0x83)
+#define FVC_FULL_ON_THRESHOLD_MASK			GENMASK(7, 0)
+
+#define FVC_CC_MODE_GLITCH_FILTER_SEL_CFG_REG		(CHGR_BASE + 0x84)
+#define FVC_CC_MODE_GLITCH_FILTER_SEL_MASK		GENMASK(1, 0)
+
+#define FVC_TERMINATION_GLITCH_FILTER_SEL_CFG_REG	(CHGR_BASE + 0x85)
+#define FVC_TERMINATION_GLITCH_FILTER_SEL_MASK		GENMASK(1, 0)
+
+#define JEITA_EN_CFG_REG		(CHGR_BASE + 0x90)
+#define JEITA_EN_HARDLIMIT_BIT		BIT(4)
+#define JEITA_EN_HOT_SL_FCV_BIT		BIT(3)
+#define JEITA_EN_COLD_SL_FCV_BIT	BIT(2)
+#define JEITA_EN_HOT_SL_CCC_BIT		BIT(1)
+#define JEITA_EN_COLD_SL_CCC_BIT	BIT(0)
+
+#define JEITA_FVCOMP_CFG_REG		(CHGR_BASE + 0x91)
+#define JEITA_FVCOMP_MASK		GENMASK(7, 0)
+
+#define JEITA_CCCOMP_CFG_REG		(CHGR_BASE + 0x92)
+#define JEITA_CCCOMP_MASK		GENMASK(7, 0)
+
+#define FV_CAL_CFG_REG			(CHGR_BASE + 0x76)
+#define FV_CALIBRATION_CFG_MASK		GENMASK(2, 0)
+
+#define FV_ADJUST_REG			(CHGR_BASE + 0x77)
+#define FLOAT_VOLTAGE_ADJUSTMENT_MASK	GENMASK(4, 0)
+
+#define FG_VADC_DISQ_THRESH_REG		(CHGR_BASE + 0x78)
+#define VADC_DISQUAL_THRESH_MASK	GENMASK(7, 0)
+
+#define FG_IADC_DISQ_THRESH_REG		(CHGR_BASE + 0x79)
+#define IADC_DISQUAL_THRESH_MASK	GENMASK(7, 0)
+
+#define FG_UPDATE_CFG_1_REG	(CHGR_BASE + 0x7A)
+#define BT_TMPR_TCOLD_BIT	BIT(7)
+#define BT_TMPR_COLD_BIT	BIT(6)
+#define BT_TMPR_HOT_BIT		BIT(5)
+#define BT_TMPR_THOT_BIT	BIT(4)
+#define CHG_DIE_TMPR_HOT_BIT	BIT(3)
+#define CHG_DIE_TMPR_THOT_BIT	BIT(2)
+#define SKIN_TMPR_HOT_BIT	BIT(1)
+#define SKIN_TMPR_THOT_BIT	BIT(0)
+
+#define FG_UPDATE_CFG_1_SEL_REG		(CHGR_BASE + 0x7B)
+#define BT_TMPR_TCOLD_SEL_BIT		BIT(7)
+#define BT_TMPR_COLD_SEL_BIT		BIT(6)
+#define BT_TMPR_HOT_SEL_BIT		BIT(5)
+#define BT_TMPR_THOT_SEL_BIT		BIT(4)
+#define CHG_DIE_TMPR_HOT_SEL_BIT	BIT(3)
+#define CHG_DIE_TMPR_THOT_SEL_BIT	BIT(2)
+#define SKIN_TMPR_HOT_SEL_BIT		BIT(1)
+#define SKIN_TMPR_THOT_SEL_BIT		BIT(0)
+
+#define FG_UPDATE_CFG_2_REG		(CHGR_BASE + 0x7C)
+#define SOC_LT_OTG_THRESH_BIT		BIT(3)
+#define SOC_LT_CHG_RECHARGE_THRESH_BIT	BIT(2)
+#define VBT_LT_CHG_RECHARGE_THRESH_BIT	BIT(1)
+#define IBT_LT_CHG_TERM_THRESH_BIT	BIT(0)
+
+#define FG_UPDATE_CFG_2_SEL_REG			(CHGR_BASE + 0x7D)
+#define SOC_LT_OTG_THRESH_SEL_BIT		BIT(3)
+#define SOC_LT_CHG_RECHARGE_THRESH_SEL_BIT	BIT(2)
+#define VBT_LT_CHG_RECHARGE_THRESH_SEL_BIT	BIT(1)
+#define IBT_LT_CHG_TERM_THRESH_SEL_BIT		BIT(0)
+
+#define FG_CHG_INTERFACE_CFG_REG	(CHGR_BASE + 0x7E)
+#define ESR_ISINK_CFG_MASK		GENMASK(7, 6)
+#define ESR_FASTCHG_DECR_CFG_MASK	GENMASK(5, 4)
+#define FG_CHARGER_INHIBIT_BIT		BIT(3)
+#define FG_BATFET_BIT			BIT(2)
+#define IADC_SYNC_CNV_BIT		BIT(1)
+#define VADC_SYNC_CNV_BIT		BIT(0)
+
+#define FG_CHG_INTERFACE_CFG_SEL_REG	(CHGR_BASE + 0x7F)
+#define ESR_ISINK_CFG_SEL_BIT		BIT(5)
+#define ESR_FASTCHG_DECR_CFG_SEL_BIT	BIT(4)
+#define FG_CHARGER_INHIBIT_SEL_BIT	BIT(3)
+#define FG_BATFET_SEL_BIT		BIT(2)
+#define IADC_SYNC_CNV_SEL_BIT		BIT(1)
+#define VADC_SYNC_CNV_SEL_BIT		BIT(0)
+
+#define CHGR_STEP_CHG_MODE_CFG_REG		(CHGR_BASE + 0xB0)
+#define STEP_CHARGING_SOC_FAIL_OPTION_BIT	BIT(3)
+#define STEP_CHARGING_MODE_SELECT_BIT		BIT(2)
+#define STEP_CHARGING_SOURCE_SELECT_BIT		BIT(1)
+#define STEP_CHARGING_ENABLE_BIT		BIT(0)
+
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_CFG_REG		(CHGR_BASE + 0xB1)
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_CFG_MASK	GENMASK(0, 1)
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_5S		0
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_10S		1
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_20S		2
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_40S		3
+
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_CFG_REG		(CHGR_BASE + 0xB2)
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_CFG_MASK		GENMASK(0, 1)
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_10S		0
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_30S		1
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_60S		2
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_120S		3
+
+#define STEP_CHG_SOC_OR_BATT_V_TH1_REG	(CHGR_BASE + 0xB3)
+#define STEP_CHG_SOC_OR_BATT_V_TH2_REG	(CHGR_BASE + 0xB4)
+#define STEP_CHG_SOC_OR_BATT_V_TH3_REG	(CHGR_BASE + 0xB5)
+#define STEP_CHG_SOC_OR_BATT_V_TH4_REG	(CHGR_BASE + 0xB6)
+#define STEP_CHG_CURRENT_DELTA1_REG	(CHGR_BASE + 0xB7)
+#define STEP_CHG_CURRENT_DELTA2_REG	(CHGR_BASE + 0xB8)
+#define STEP_CHG_CURRENT_DELTA3_REG	(CHGR_BASE + 0xB9)
+#define STEP_CHG_CURRENT_DELTA4_REG	(CHGR_BASE + 0xBA)
+#define STEP_CHG_CURRENT_DELTA5_REG	(CHGR_BASE + 0xBB)
+
+/* OTG Peripheral Registers */
+#define RID_CC_CONTROL_23_16_REG	(OTG_BASE + 0x06)
+#define RID_CC_CONTROL_23_BIT		BIT(7)
+#define VCONN_SOFTSTART_EN_BIT		BIT(6)
+#define VCONN_SFTST_CFG_MASK		GENMASK(5, 4)
+#define CONNECT_RIDCC_SENSOR_TO_CC_MASK	GENMASK(3, 2)
+#define EN_CC_1P1CLAMP_BIT		BIT(1)
+#define ENABLE_CRUDESEN_CC_1_BIT	BIT(0)
+
+#define RID_CC_CONTROL_15_8_REG		(OTG_BASE + 0x07)
+#define ENABLE_CRUDESEN_CC_0_BIT	BIT(7)
+#define EN_FMB_2P5UA_CC_MASK		GENMASK(6, 5)
+#define EN_ISRC_180UA_BIT		BIT(4)
+#define ENABLE_CURRENTSOURCE_CC_MASK	GENMASK(3, 2)
+#define EN_BANDGAP_RID_C_DET_BIT	BIT(1)
+#define ENABLE_RD_CC_1_BIT		BIT(0)
+
+#define RID_CC_CONTROL_7_0_REG		(OTG_BASE + 0x08)
+#define ENABLE_RD_CC_0_BIT		BIT(7)
+#define VCONN_ILIM500MA_BIT		BIT(6)
+#define EN_MICRO_USB_MODE_BIT		BIT(5)
+#define UFP_DFP_MODE_BIT		BIT(4)
+#define VCONN_EN_CC_MASK		GENMASK(3, 2)
+#define VREF_SEL_RIDCC_SENSOR_MASK	GENMASK(1, 0)
+
+#define OTG_STATUS_REG			(OTG_BASE + 0x09)
+#define BOOST_SOFTSTART_DONE_BIT	BIT(3)
+#define OTG_STATE_MASK			GENMASK(2, 0)
+#define OTG_STATE_ENABLED		0x2
+
+/* OTG Interrupt Bits */
+#define TESTMODE_CHANGE_DETECT_RT_STS_BIT	BIT(3)
+#define OTG_OC_DIS_SW_STS_RT_STS_BIT		BIT(2)
+#define OTG_OVERCURRENT_RT_STS_BIT		BIT(1)
+#define OTG_FAIL_RT_STS_BIT			BIT(0)
+
+#define CMD_OTG_REG			(OTG_BASE + 0x40)
+#define OTG_EN_BIT			BIT(0)
+
+#define BAT_UVLO_THRESHOLD_CFG_REG	(OTG_BASE + 0x51)
+#define BAT_UVLO_THRESHOLD_MASK		GENMASK(1, 0)
+
+#define OTG_CURRENT_LIMIT_CFG_REG	(OTG_BASE + 0x52)
+#define OTG_CURRENT_LIMIT_MASK		GENMASK(2, 0)
+
+#define OTG_CFG_REG			(OTG_BASE + 0x53)
+#define OTG_RESERVED_MASK		GENMASK(7, 6)
+#define DIS_OTG_ON_TLIM_BIT		BIT(5)
+#define QUICKSTART_OTG_FASTROLESWAP_BIT	BIT(4)
+#define INCREASE_DFP_TIME_BIT		BIT(3)
+#define ENABLE_OTG_IN_DEBUG_MODE_BIT	BIT(2)
+#define OTG_EN_SRC_CFG_BIT		BIT(1)
+#define CONCURRENT_MODE_CFG_BIT		BIT(0)
+
+#define OTG_ENG_OTG_CFG_REG		(OTG_BASE + 0xC0)
+#define ENG_BUCKBOOST_HALT1_8_MODE_BIT	BIT(0)
+
+/* BATIF Peripheral Registers */
+/* BATIF Interrupt Bits */
+#define BAT_7_RT_STS_BIT			BIT(7)
+#define BAT_6_RT_STS_BIT			BIT(6)
+#define BAT_TERMINAL_MISSING_RT_STS_BIT		BIT(5)
+#define BAT_THERM_OR_ID_MISSING_RT_STS_BIT	BIT(4)
+#define BAT_LOW_RT_STS_BIT			BIT(3)
+#define BAT_OV_RT_STS_BIT			BIT(2)
+#define BAT_OCP_RT_STS_BIT			BIT(1)
+#define BAT_TEMP_RT_STS_BIT			BIT(0)
+
+#define SHIP_MODE_REG			(BATIF_BASE + 0x40)
+#define SHIP_MODE_EN_BIT		BIT(0)
+
+#define BATOCP_THRESHOLD_CFG_REG	(BATIF_BASE + 0x50)
+#define BATOCP_ENABLE_CFG_BIT		BIT(3)
+#define BATOCP_THRESHOLD_MASK		GENMASK(2, 0)
+
+#define BATOCP_INTRPT_DELAY_TMR_CFG_REG	(BATIF_BASE + 0x51)
+#define BATOCP_INTRPT_TIMEOUT_MASK	GENMASK(5, 3)
+#define BATOCP_DELAY_TIMEOUT_MASK	GENMASK(2, 0)
+
+#define BATOCP_RESET_TMR_CFG_REG	(BATIF_BASE + 0x52)
+#define EN_BATOCP_RESET_TMR_BIT		BIT(3)
+#define BATOCP_RESET_TIMEOUT_MASK	GENMASK(2, 0)
+
+#define LOW_BATT_DETECT_EN_CFG_REG	(BATIF_BASE + 0x60)
+#define LOW_BATT_DETECT_EN_BIT		BIT(0)
+
+#define LOW_BATT_THRESHOLD_CFG_REG	(BATIF_BASE + 0x61)
+#define LOW_BATT_THRESHOLD_MASK		GENMASK(3, 0)
+
+#define BAT_FET_CFG_REG			(BATIF_BASE + 0x62)
+#define BAT_FET_CFG_BIT			BIT(0)
+
+#define BAT_MISS_SRC_CFG_REG		(BATIF_BASE + 0x70)
+#define BAT_MISS_ALG_EN_BIT		BIT(2)
+#define BAT_MISS_RESERVED_BIT		BIT(1)
+#define BAT_MISS_PIN_SRC_EN_BIT		BIT(0)
+
+#define BAT_MISS_ALG_OPTIONS_CFG_REG	(BATIF_BASE + 0x71)
+#define BAT_MISS_INPUT_PLUGIN_BIT	BIT(2)
+#define BAT_MISS_TMR_START_OPTION_BIT	BIT(1)
+#define BAT_MISS_POLL_EN_BIT		BIT(0)
+
+#define BAT_MISS_PIN_GF_CFG_REG		(BATIF_BASE + 0x72)
+#define BAT_MISS_PIN_GF_MASK		GENMASK(1, 0)
+
+/* USBIN Peripheral Registers */
+#define USBIN_INPUT_STATUS_REG		(USBIN_BASE + 0x06)
+#define USBIN_INPUT_STATUS_7_BIT	BIT(7)
+#define USBIN_INPUT_STATUS_6_BIT	BIT(6)
+#define USBIN_12V_BIT			BIT(5)
+#define USBIN_9V_TO_12V_BIT		BIT(4)
+#define USBIN_9V_BIT			BIT(3)
+#define USBIN_5V_TO_12V_BIT		BIT(2)
+#define USBIN_5V_TO_9V_BIT		BIT(1)
+#define USBIN_5V_BIT			BIT(0)
+#define QC_2P0_STATUS_MASK		GENMASK(2, 0)
+
+#define APSD_STATUS_REG			(USBIN_BASE + 0x07)
+#define APSD_STATUS_7_BIT		BIT(7)
+#define HVDCP_CHECK_TIMEOUT_BIT		BIT(6)
+#define SLOW_PLUGIN_TIMEOUT_BIT		BIT(5)
+#define ENUMERATION_DONE_BIT		BIT(4)
+#define VADP_CHANGE_DONE_AFTER_AUTH_BIT	BIT(3)
+#define QC_AUTH_DONE_STATUS_BIT		BIT(2)
+#define QC_CHARGER_BIT			BIT(1)
+#define APSD_DTC_STATUS_DONE_BIT	BIT(0)
+
+#define APSD_RESULT_STATUS_REG		(USBIN_BASE + 0x08)
+#define ICL_OVERRIDE_LATCH_BIT		BIT(7)
+#define APSD_RESULT_STATUS_MASK		GENMASK(6, 0)
+#define QC_3P0_BIT			BIT(6)
+#define QC_2P0_BIT			BIT(5)
+#define FLOAT_CHARGER_BIT		BIT(4)
+#define DCP_CHARGER_BIT			BIT(3)
+#define CDP_CHARGER_BIT			BIT(2)
+#define OCP_CHARGER_BIT			BIT(1)
+#define SDP_CHARGER_BIT			BIT(0)
+
+#define QC_CHANGE_STATUS_REG		(USBIN_BASE + 0x09)
+#define QC_CHANGE_STATUS_7_BIT		BIT(7)
+#define QC_CHANGE_STATUS_6_BIT		BIT(6)
+#define QC_9V_TO_12V_REASON_BIT		BIT(5)
+#define QC_5V_TO_9V_REASON_BIT		BIT(4)
+#define QC_CONTINUOUS_BIT		BIT(3)
+#define QC_12V_BIT			BIT(2)
+#define QC_9V_BIT			BIT(1)
+#define QC_5V_BIT			BIT(0)
+
+#define QC_PULSE_COUNT_STATUS_REG		(USBIN_BASE + 0x0A)
+#define QC_PULSE_COUNT_STATUS_7_BIT		BIT(7)
+#define QC_PULSE_COUNT_STATUS_6_BIT		BIT(6)
+#define QC_PULSE_COUNT_MASK			GENMASK(5, 0)
+
+#define TYPE_C_STATUS_1_REG			(USBIN_BASE + 0x0B)
+#define UFP_TYPEC_MASK				GENMASK(7, 5)
+#define UFP_TYPEC_RDSTD_BIT			BIT(7)
+#define UFP_TYPEC_RD1P5_BIT			BIT(6)
+#define UFP_TYPEC_RD3P0_BIT			BIT(5)
+#define UFP_TYPEC_FMB_255K_BIT			BIT(4)
+#define UFP_TYPEC_FMB_301K_BIT			BIT(3)
+#define UFP_TYPEC_FMB_523K_BIT			BIT(2)
+#define UFP_TYPEC_FMB_619K_BIT			BIT(1)
+#define UFP_TYPEC_OPEN_OPEN_BIT			BIT(0)
+
+#define TYPE_C_STATUS_2_REG			(USBIN_BASE + 0x0C)
+#define DFP_TYPEC_MASK				0x8F
+#define DFP_RA_OPEN_BIT				BIT(7)
+#define TIMER_STAGE_BIT				BIT(6)
+#define EXIT_UFP_MODE_BIT			BIT(5)
+#define EXIT_DFP_MODE_BIT			BIT(4)
+#define DFP_RD_OPEN_BIT				BIT(3)
+#define DFP_RD_RA_VCONN_BIT			BIT(2)
+#define DFP_RD_RD_BIT				BIT(1)
+#define DFP_RA_RA_BIT				BIT(0)
+
+#define TYPE_C_STATUS_3_REG			(USBIN_BASE + 0x0D)
+#define ENABLE_BANDGAP_BIT			BIT(7)
+#define U_USB_GND_NOVBUS_BIT			BIT(6)
+#define U_USB_FLOAT_NOVBUS_BIT			BIT(5)
+#define U_USB_GND_BIT				BIT(4)
+#define U_USB_FMB1_BIT				BIT(3)
+#define U_USB_FLOAT1_BIT			BIT(2)
+#define U_USB_FMB2_BIT				BIT(1)
+#define U_USB_FLOAT2_BIT			BIT(0)
+
+#define TYPE_C_STATUS_4_REG			(USBIN_BASE + 0x0E)
+#define UFP_DFP_MODE_STATUS_BIT			BIT(7)
+#define TYPEC_VBUS_STATUS_BIT			BIT(6)
+#define TYPEC_VBUS_ERROR_STATUS_BIT		BIT(5)
+#define TYPEC_DEBOUNCE_DONE_STATUS_BIT		BIT(4)
+#define TYPEC_UFP_AUDIO_ADAPT_STATUS_BIT	BIT(3)
+#define TYPEC_VCONN_OVERCURR_STATUS_BIT		BIT(2)
+#define CC_ORIENTATION_BIT			BIT(1)
+#define CC_ATTACHED_BIT				BIT(0)
+
+#define TYPE_C_STATUS_5_REG			(USBIN_BASE + 0x0F)
+#define TRY_SOURCE_FAILED_BIT			BIT(6)
+#define TRY_SINK_FAILED_BIT			BIT(5)
+#define TIMER_STAGE_2_BIT			BIT(4)
+#define TYPEC_LEGACY_CABLE_STATUS_BIT		BIT(3)
+#define TYPEC_NONCOMP_LEGACY_CABLE_STATUS_BIT	BIT(2)
+#define TYPEC_TRYSOURCE_DETECT_STATUS_BIT	BIT(1)
+#define TYPEC_TRYSINK_DETECT_STATUS_BIT		BIT(0)
+
+/* USBIN Interrupt Bits */
+#define TYPE_C_CHANGE_RT_STS_BIT		BIT(7)
+#define USBIN_ICL_CHANGE_RT_STS_BIT		BIT(6)
+#define USBIN_SOURCE_CHANGE_RT_STS_BIT		BIT(5)
+#define USBIN_PLUGIN_RT_STS_BIT			BIT(4)
+#define USBIN_OV_RT_STS_BIT			BIT(3)
+#define USBIN_UV_RT_STS_BIT			BIT(2)
+#define USBIN_LT_3P6V_RT_STS_BIT		BIT(1)
+#define USBIN_COLLAPSE_RT_STS_BIT		BIT(0)
+
+#define QC_PULSE_COUNT_STATUS_1_REG		(USBIN_BASE + 0x30)
+
+#define USBIN_CMD_IL_REG			(USBIN_BASE + 0x40)
+#define BAT_2_SYS_FET_DIS_BIT			BIT(1)
+#define USBIN_SUSPEND_BIT			BIT(0)
+
+#define CMD_APSD_REG				(USBIN_BASE + 0x41)
+#define ICL_OVERRIDE_BIT			BIT(1)
+#define APSD_RERUN_BIT				BIT(0)
+
+#define CMD_HVDCP_2_REG				(USBIN_BASE + 0x43)
+#define RESTART_AICL_BIT			BIT(7)
+#define TRIGGER_AICL_BIT			BIT(6)
+#define FORCE_12V_BIT				BIT(5)
+#define FORCE_9V_BIT				BIT(4)
+#define FORCE_5V_BIT				BIT(3)
+#define IDLE_BIT				BIT(2)
+#define SINGLE_DECREMENT_BIT			BIT(1)
+#define SINGLE_INCREMENT_BIT			BIT(0)
+
+#define USB_MISC2_REG				(USBIN_BASE + 0x57)
+#define USB_MISC2_MASK				GENMASK(1, 0)
+
+#define TYPE_C_CFG_REG				(USBIN_BASE + 0x58)
+#define APSD_START_ON_CC_BIT			BIT(7)
+#define WAIT_FOR_APSD_BIT			BIT(6)
+#define FACTORY_MODE_DETECTION_EN_BIT		BIT(5)
+#define FACTORY_MODE_ICL_3A_4A_BIT		BIT(4)
+#define FACTORY_MODE_DIS_CHGING_CFG_BIT		BIT(3)
+#define SUSPEND_NON_COMPLIANT_CFG_BIT		BIT(2)
+#define VCONN_OC_CFG_BIT			BIT(1)
+#define TYPE_C_OR_U_USB_BIT			BIT(0)
+
+#define TYPE_C_CFG_2_REG			(USBIN_BASE + 0x59)
+#define TYPE_C_DFP_CURRSRC_MODE_BIT		BIT(7)
+#define VCONN_ILIM500MA_CFG_BIT			BIT(6)
+#define VCONN_SOFTSTART_CFG_MASK		GENMASK(5, 4)
+#define EN_TRY_SOURCE_MODE_BIT			BIT(3)
+#define USB_FACTORY_MODE_ENABLE_BIT		BIT(2)
+#define TYPE_C_UFP_MODE_BIT			BIT(1)
+#define EN_80UA_180UA_CUR_SOURCE_BIT		BIT(0)
+
+#define TYPE_C_CFG_3_REG			(USBIN_BASE + 0x5A)
+#define TVBUS_DEBOUNCE_BIT			BIT(7)
+#define TYPEC_LEGACY_CABLE_INT_EN_BIT		BIT(6)
+#define TYPEC_NONCOMPLIANT_LEGACY_CABLE_INT_EN_BIT		BIT(5)
+#define TYPEC_TRYSOURCE_DETECT_INT_EN_BIT	BIT(4)
+#define TYPEC_TRYSINK_DETECT_INT_EN_BIT		BIT(3)
+#define EN_TRYSINK_MODE_BIT			BIT(2)
+#define EN_LEGACY_CABLE_DETECTION_BIT		BIT(1)
+#define ALLOW_PD_DRING_UFP_TCCDB_BIT		BIT(0)
+
+#define USBIN_ADAPTER_ALLOW_CFG_REG		(USBIN_BASE + 0x60)
+#define USBIN_ADAPTER_ALLOW_MASK		GENMASK(3, 0)
+enum {
+	USBIN_ADAPTER_ALLOW_5V		= 0,
+	USBIN_ADAPTER_ALLOW_9V		= 2,
+	USBIN_ADAPTER_ALLOW_5V_OR_9V	= 3,
+	USBIN_ADAPTER_ALLOW_12V		= 4,
+	USBIN_ADAPTER_ALLOW_5V_OR_12V	= 5,
+	USBIN_ADAPTER_ALLOW_9V_TO_12V	= 6,
+	USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V = 7,
+	USBIN_ADAPTER_ALLOW_5V_TO_9V	= 8,
+	USBIN_ADAPTER_ALLOW_5V_TO_12V	= 12,
+};
+
+#define USBIN_OPTIONS_1_CFG_REG			(USBIN_BASE + 0x62)
+#define CABLE_R_SEL_BIT				BIT(7)
+#define HVDCP_AUTH_ALG_EN_CFG_BIT		BIT(6)
+#define HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT	BIT(5)
+#define INPUT_PRIORITY_BIT			BIT(4)
+#define AUTO_SRC_DETECT_BIT			BIT(3)
+#define HVDCP_EN_BIT				BIT(2)
+#define VADP_INCREMENT_VOLTAGE_LIMIT_BIT	BIT(1)
+#define VADP_TAPER_TIMER_EN_BIT			BIT(0)
+
+#define USBIN_OPTIONS_2_CFG_REG			(USBIN_BASE + 0x63)
+#define WIPWR_RST_EUD_CFG_BIT			BIT(7)
+#define SWITCHER_START_CFG_BIT			BIT(6)
+#define DCD_TIMEOUT_SEL_BIT			BIT(5)
+#define OCD_CURRENT_SEL_BIT			BIT(4)
+#define SLOW_PLUGIN_TIMER_EN_CFG_BIT		BIT(3)
+#define FLOAT_OPTIONS_MASK			GENMASK(2, 0)
+#define FLOAT_DIS_CHGING_CFG_BIT		BIT(2)
+#define SUSPEND_FLOAT_CFG_BIT			BIT(1)
+#define FORCE_FLOAT_SDP_CFG_BIT			BIT(0)
+
+#define TAPER_TIMER_SEL_CFG_REG			(USBIN_BASE + 0x64)
+#define TYPEC_SPARE_CFG_BIT			BIT(7)
+#define TAPER_TIMER_SEL_MASK			GENMASK(1, 0)
+
+#define USBIN_LOAD_CFG_REG			(USBIN_BASE + 0x65)
+#define USBIN_OV_CH_LOAD_OPTION_BIT		BIT(7)
+
+#define USBIN_ICL_OPTIONS_REG			(USBIN_BASE + 0x66)
+#define CFG_USB3P0_SEL_BIT			BIT(2)
+#define USB51_MODE_BIT				BIT(1)
+#define USBIN_MODE_CHG_BIT			BIT(0)
+
+#define TYPE_C_INTRPT_ENB_REG			(USBIN_BASE + 0x67)
+#define TYPEC_CCOUT_DETACH_INT_EN_BIT		BIT(7)
+#define TYPEC_CCOUT_ATTACH_INT_EN_BIT		BIT(6)
+#define TYPEC_VBUS_ERROR_INT_EN_BIT		BIT(5)
+#define TYPEC_UFP_AUDIOADAPT_INT_EN_BIT		BIT(4)
+#define TYPEC_DEBOUNCE_DONE_INT_EN_BIT		BIT(3)
+#define TYPEC_CCSTATE_CHANGE_INT_EN_BIT		BIT(2)
+#define TYPEC_VBUS_DEASSERT_INT_EN_BIT		BIT(1)
+#define TYPEC_VBUS_ASSERT_INT_EN_BIT		BIT(0)
+
+#define TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG	(USBIN_BASE + 0x68)
+#define EXIT_SNK_BASED_ON_CC_BIT		BIT(7)
+#define VCONN_EN_ORIENTATION_BIT		BIT(6)
+#define TYPEC_VCONN_OVERCURR_INT_EN_BIT		BIT(5)
+#define VCONN_EN_SRC_BIT			BIT(4)
+#define VCONN_EN_VALUE_BIT			BIT(3)
+#define TYPEC_POWER_ROLE_CMD_MASK		GENMASK(2, 0)
+#define UFP_EN_CMD_BIT				BIT(2)
+#define DFP_EN_CMD_BIT				BIT(1)
+#define TYPEC_DISABLE_CMD_BIT			BIT(0)
+
+#define USBIN_SOURCE_CHANGE_INTRPT_ENB_REG	(USBIN_BASE + 0x69)
+#define SLOW_IRQ_EN_CFG_BIT			BIT(5)
+#define ENUMERATION_IRQ_EN_CFG_BIT		BIT(4)
+#define VADP_IRQ_EN_CFG_BIT			BIT(3)
+#define AUTH_IRQ_EN_CFG_BIT			BIT(2)
+#define HVDCP_IRQ_EN_CFG_BIT			BIT(1)
+#define APSD_IRQ_EN_CFG_BIT			BIT(0)
+
+#define USBIN_CURRENT_LIMIT_CFG_REG		(USBIN_BASE + 0x70)
+#define USBIN_CURRENT_LIMIT_MASK		GENMASK(7, 0)
+
+#define USBIN_AICL_OPTIONS_CFG_REG		(USBIN_BASE + 0x80)
+#define SUSPEND_ON_COLLAPSE_USBIN_BIT		BIT(7)
+#define USBIN_AICL_HDC_EN_BIT			BIT(6)
+#define USBIN_AICL_START_AT_MAX_BIT		BIT(5)
+#define USBIN_AICL_RERUN_EN_BIT			BIT(4)
+#define USBIN_AICL_ADC_EN_BIT			BIT(3)
+#define USBIN_AICL_EN_BIT			BIT(2)
+#define USBIN_HV_COLLAPSE_RESPONSE_BIT		BIT(1)
+#define USBIN_LV_COLLAPSE_RESPONSE_BIT		BIT(0)
+
+#define USBIN_5V_AICL_THRESHOLD_CFG_REG		(USBIN_BASE + 0x81)
+#define USBIN_5V_AICL_THRESHOLD_CFG_MASK	GENMASK(2, 0)
+
+#define USBIN_9V_AICL_THRESHOLD_CFG_REG		(USBIN_BASE + 0x82)
+#define USBIN_9V_AICL_THRESHOLD_CFG_MASK	GENMASK(2, 0)
+
+#define USBIN_12V_AICL_THRESHOLD_CFG_REG	(USBIN_BASE + 0x83)
+#define USBIN_12V_AICL_THRESHOLD_CFG_MASK	GENMASK(2, 0)
+
+#define USBIN_CONT_AICL_THRESHOLD_CFG_REG	(USBIN_BASE + 0x84)
+#define USBIN_CONT_AICL_THRESHOLD_CFG_MASK	GENMASK(5, 0)
+
+/* DCIN Peripheral Registers */
+#define DCIN_INPUT_STATUS_REG		(DCIN_BASE + 0x06)
+#define DCIN_INPUT_STATUS_7_BIT		BIT(7)
+#define DCIN_INPUT_STATUS_6_BIT		BIT(6)
+#define DCIN_12V_BIT			BIT(5)
+#define DCIN_9V_TO_12V_BIT		BIT(4)
+#define DCIN_9V_BIT			BIT(3)
+#define DCIN_5V_TO_12V_BIT		BIT(2)
+#define DCIN_5V_TO_9V_BIT		BIT(1)
+#define DCIN_5V_BIT			BIT(0)
+
+#define WIPWR_STATUS_REG		(DCIN_BASE + 0x07)
+#define WIPWR_STATUS_7_BIT		BIT(7)
+#define WIPWR_STATUS_6_BIT		BIT(6)
+#define WIPWR_STATUS_5_BIT		BIT(5)
+#define DCIN_WIPWR_OV_DG_BIT		BIT(4)
+#define DIV2_EN_DG_BIT			BIT(3)
+#define SHUTDOWN_N_LATCH_BIT		BIT(2)
+#define CHG_OK_PIN_BIT			BIT(1)
+#define WIPWR_CHARGING_ENABLED_BIT	BIT(0)
+
+#define WIPWR_RANGE_STATUS_REG		(DCIN_BASE + 0x08)
+#define WIPWR_RANGE_STATUS_MASK		GENMASK(4, 0)
+
+/* DCIN Interrupt Bits */
+#define WIPWR_VOLTAGE_RANGE_RT_STS_BIT	BIT(7)
+#define DCIN_ICL_CHANGE_RT_STS_BIT	BIT(6)
+#define DIV2_EN_DG_RT_STS_BIT		BIT(5)
+#define DCIN_PLUGIN_RT_STS_BIT		BIT(4)
+#define DCIN_OV_RT_STS_BIT		BIT(3)
+#define DCIN_UV_RT_STS_BIT		BIT(2)
+#define DCIN_LT_3P6V_RT_STS_BIT		BIT(1)
+#define DCIN_COLLAPSE_RT_STS_BIT	BIT(0)
+
+#define DCIN_CMD_IL_REG				(DCIN_BASE + 0x40)
+#define WIRELESS_CHG_DIS_BIT			BIT(3)
+#define SHDN_N_CLEAR_CMD_BIT			BIT(2)
+#define SHDN_N_SET_CMD_BIT			BIT(1)
+#define DCIN_SUSPEND_BIT			BIT(0)
+
+#define DC_SPARE_REG				(DCIN_BASE + 0x58)
+#define DC_SPARE_MASK				GENMASK(3, 0)
+
+#define DCIN_ADAPTER_ALLOW_CFG_REG		(DCIN_BASE + 0x60)
+#define DCIN_ADAPTER_ALLOW_MASK			GENMASK(3, 0)
+
+#define DCIN_LOAD_CFG_REG			(DCIN_BASE + 0x65)
+#define DCIN_OV_CH_LOAD_OPTION_BIT		BIT(7)
+
+#define DCIN_CURRENT_LIMIT_CFG_REG		(DCIN_BASE + 0x70)
+#define DCIN_CURRENT_LIMIT_MASK			GENMASK(7, 0)
+
+#define DCIN_AICL_OPTIONS_CFG_REG		(DCIN_BASE + 0x80)
+#define SUSPEND_ON_COLLAPSE_DCIN_BIT		BIT(7)
+#define DCIN_AICL_HDC_EN_BIT			BIT(6)
+#define DCIN_AICL_START_AT_MAX_BIT		BIT(5)
+#define DCIN_AICL_RERUN_EN_BIT			BIT(4)
+#define DCIN_AICL_ADC_EN_BIT			BIT(3)
+#define DCIN_AICL_EN_BIT			BIT(2)
+#define DCIN_HV_COLLAPSE_RESPONSE_BIT		BIT(1)
+#define DCIN_LV_COLLAPSE_RESPONSE_BIT		BIT(0)
+
+#define DCIN_AICL_REF_SEL_CFG_REG		(DCIN_BASE + 0x81)
+#define DCIN_CONT_AICL_THRESHOLD_CFG_MASK	GENMASK(5, 0)
+
+#define DCIN_ICL_START_CFG_REG			(DCIN_BASE + 0x82)
+#define DCIN_ICL_START_CFG_BIT			BIT(0)
+
+#define DIV2_EN_GF_TIME_CFG_REG			(DCIN_BASE + 0x90)
+#define DIV2_EN_GF_TIME_CFG_MASK		GENMASK(1, 0)
+
+#define WIPWR_IRQ_TMR_CFG_REG			(DCIN_BASE + 0x91)
+#define WIPWR_IRQ_TMR_MASK			GENMASK(2, 0)
+
+#define ZIN_ICL_PT_REG				(DCIN_BASE + 0x92)
+#define ZIN_ICL_PT_MASK				GENMASK(7, 0)
+
+#define ZIN_ICL_LV_REG				(DCIN_BASE + 0x93)
+#define ZIN_ICL_LV_MASK				GENMASK(7, 0)
+
+#define ZIN_ICL_HV_REG				(DCIN_BASE + 0x94)
+#define ZIN_ICL_HV_MASK				GENMASK(7, 0)
+
+#define WI_PWR_OPTIONS_REG			(DCIN_BASE + 0x95)
+#define CHG_OK_BIT				BIT(7)
+#define WIPWR_UVLO_IRQ_OPT_BIT			BIT(6)
+#define BUCK_HOLDOFF_ENABLE_BIT			BIT(5)
+#define CHG_OK_HW_SW_SELECT_BIT			BIT(4)
+#define WIPWR_RST_ENABLE_BIT			BIT(3)
+#define DCIN_WIPWR_IRQ_SELECT_BIT		BIT(2)
+#define AICL_SWITCH_ENABLE_BIT			BIT(1)
+#define ZIN_ICL_ENABLE_BIT			BIT(0)
+
+#define ZIN_ICL_PT_HV_REG			(DCIN_BASE + 0x96)
+#define ZIN_ICL_PT_HV_MASK			GENMASK(7, 0)
+
+#define ZIN_ICL_MID_LV_REG			(DCIN_BASE + 0x97)
+#define ZIN_ICL_MID_LV_MASK			GENMASK(7, 0)
+
+#define ZIN_ICL_MID_HV_REG			(DCIN_BASE + 0x98)
+#define ZIN_ICL_MID_HV_MASK			GENMASK(7, 0)
+
+enum {
+	ZIN_ICL_PT_MAX_MV = 8000,
+	ZIN_ICL_PT_HV_MAX_MV = 9000,
+	ZIN_ICL_LV_MAX_MV = 5500,
+	ZIN_ICL_MID_LV_MAX_MV = 6500,
+	ZIN_ICL_MID_HV_MAX_MV = 8000,
+	ZIN_ICL_HV_MAX_MV = 11000,
+};
+
+#define DC_ENG_SSUPPLY_CFG2_REG			(DCIN_BASE + 0xC1)
+#define ENG_SSUPPLY_IVREF_OTG_SS_MASK		GENMASK(2, 0)
+#define OTG_SS_SLOW				0x3
+
+#define DC_ENG_SSUPPLY_CFG3_REG			(DCIN_BASE + 0xC2)
+#define ENG_SSUPPLY_HI_CAP_BIT			BIT(6)
+#define ENG_SSUPPLY_HI_RES_BIT			BIT(5)
+#define ENG_SSUPPLY_CFG_SKIP_TH_V0P2_BIT	BIT(3)
+#define ENG_SSUPPLY_CFG_SYSOV_TH_4P8_BIT	BIT(2)
+#define ENG_SSUPPLY_5V_OV_OPT_BIT		BIT(0)
+
+/* MISC Peripheral Registers */
+#define REVISION1_REG				(MISC_BASE + 0x00)
+#define DIG_MINOR_MASK				GENMASK(7, 0)
+
+#define REVISION2_REG				(MISC_BASE + 0x01)
+#define DIG_MAJOR_MASK				GENMASK(7, 0)
+
+#define REVISION3_REG				(MISC_BASE + 0x02)
+#define ANA_MINOR_MASK				GENMASK(7, 0)
+
+#define REVISION4_REG				(MISC_BASE + 0x03)
+#define ANA_MAJOR_MASK				GENMASK(7, 0)
+
+#define TEMP_RANGE_STATUS_REG			(MISC_BASE + 0x06)
+#define TEMP_RANGE_STATUS_7_BIT			BIT(7)
+#define THERM_REG_ACTIVE_BIT			BIT(6)
+#define TLIM_BIT				BIT(5)
+#define TEMP_RANGE_MASK				GENMASK(4, 1)
+#define ALERT_LEVEL_BIT				BIT(4)
+#define TEMP_ABOVE_RANGE_BIT			BIT(3)
+#define TEMP_WITHIN_RANGE_BIT			BIT(2)
+#define TEMP_BELOW_RANGE_BIT			BIT(1)
+#define THERMREG_DISABLED_BIT			BIT(0)
+
+#define ICL_STATUS_REG				(MISC_BASE + 0x07)
+#define INPUT_CURRENT_LIMIT_MASK		GENMASK(7, 0)
+
+#define ADAPTER_5V_ICL_STATUS_REG		(MISC_BASE + 0x08)
+#define ADAPTER_5V_ICL_MASK			GENMASK(7, 0)
+
+#define ADAPTER_9V_ICL_STATUS_REG		(MISC_BASE + 0x09)
+#define ADAPTER_9V_ICL_MASK			GENMASK(7, 0)
+
+#define AICL_STATUS_REG				(MISC_BASE + 0x0A)
+#define AICL_STATUS_7_BIT			BIT(7)
+#define SOFT_ILIMIT_BIT				BIT(6)
+#define HIGHEST_DC_BIT				BIT(5)
+#define USBIN_CH_COLLAPSE_BIT			BIT(4)
+#define DCIN_CH_COLLAPSE_BIT			BIT(3)
+#define ICL_IMIN_BIT				BIT(2)
+#define AICL_FAIL_BIT				BIT(1)
+#define AICL_DONE_BIT				BIT(0)
+
+#define POWER_PATH_STATUS_REG			(MISC_BASE + 0x0B)
+#define INPUT_SS_DONE_BIT			BIT(7)
+#define USBIN_SUSPEND_STS_BIT			BIT(6)
+#define DCIN_SUSPEND_STS_BIT			BIT(5)
+#define USE_USBIN_BIT				BIT(4)
+#define USE_DCIN_BIT				BIT(3)
+#define POWER_PATH_MASK				GENMASK(2, 1)
+#define VALID_INPUT_POWER_SOURCE_STS_BIT	BIT(0)
+
+#define WDOG_STATUS_REG				(MISC_BASE + 0x0C)
+#define WDOG_STATUS_7_BIT			BIT(7)
+#define WDOG_STATUS_6_BIT			BIT(6)
+#define WDOG_STATUS_5_BIT			BIT(5)
+#define WDOG_STATUS_4_BIT			BIT(4)
+#define WDOG_STATUS_3_BIT			BIT(3)
+#define WDOG_STATUS_2_BIT			BIT(2)
+#define WDOG_STATUS_1_BIT			BIT(1)
+#define BARK_BITE_STATUS_BIT			BIT(0)
+
+#define SYSOK_REASON_STATUS_REG			(MISC_BASE + 0x0D)
+#define SYSOK_REASON_DCIN_BIT			BIT(1)
+#define SYSOK_REASON_USBIN_BIT			BIT(0)
+
+/* MISC Interrupt Bits */
+#define SWITCHER_POWER_OK_RT_STS_BIT		BIT(7)
+#define TEMPERATURE_CHANGE_RT_STS_BIT		BIT(6)
+#define INPUT_CURRENT_LIMITING_RT_STS_BIT	BIT(5)
+#define HIGH_DUTY_CYCLE_RT_STS_BIT		BIT(4)
+#define AICL_DONE_RT_STS_BIT			BIT(3)
+#define AICL_FAIL_RT_STS_BIT			BIT(2)
+#define WDOG_BARK_RT_STS_BIT			BIT(1)
+#define WDOG_SNARL_RT_STS_BIT			BIT(0)
+
+#define WDOG_RST_REG				(MISC_BASE + 0x40)
+#define WDOG_RST_BIT				BIT(0)
+
+#define AFP_MODE_REG				(MISC_BASE + 0x41)
+#define AFP_MODE_EN_BIT				BIT(0)
+
+#define GSM_PA_ON_ADJ_EN_REG			(MISC_BASE + 0x42)
+#define GSM_PA_ON_ADJ_EN_BIT			BIT(0)
+
+#define BARK_BITE_WDOG_PET_REG			(MISC_BASE + 0x43)
+#define BARK_BITE_WDOG_PET_BIT			BIT(0)
+
+#define PHYON_CMD_REG				(MISC_BASE + 0x44)
+#define PHYON_CMD_BIT				BIT(0)
+
+#define SHDN_CMD_REG				(MISC_BASE + 0x45)
+#define SHDN_CMD_BIT				BIT(0)
+
+#define FINISH_COPY_COMMAND_REG			(MISC_BASE + 0x4F)
+#define START_COPY_BIT				BIT(0)
+
+#define WD_CFG_REG				(MISC_BASE + 0x51)
+#define WATCHDOG_TRIGGER_AFP_EN_BIT		BIT(7)
+#define BARK_WDOG_INT_EN_BIT			BIT(6)
+#define BITE_WDOG_INT_EN_BIT			BIT(5)
+#define SFT_AFTER_WDOG_IRQ_MASK			GENMASK(4, 3)
+#define WDOG_IRQ_SFT_BIT			BIT(2)
+#define WDOG_TIMER_EN_ON_PLUGIN_BIT		BIT(1)
+#define WDOG_TIMER_EN_BIT			BIT(0)
+
+#define MISC_CFG_REG				(MISC_BASE + 0x52)
+#define GSM_PA_ON_ADJ_SEL_BIT			BIT(0)
+#define TCC_DEBOUNCE_20MS_BIT			BIT(5)
+
+#define SNARL_BARK_BITE_WD_CFG_REG		(MISC_BASE + 0x53)
+#define BITE_WDOG_DISABLE_CHARGING_CFG_BIT	BIT(7)
+#define SNARL_WDOG_TIMEOUT_MASK			GENMASK(6, 4)
+#define BARK_WDOG_TIMEOUT_MASK			GENMASK(3, 2)
+#define BITE_WDOG_TIMEOUT_MASK			GENMASK(1, 0)
+
+#define PHYON_CFG_REG				(MISC_BASE + 0x54)
+#define USBPHYON_PUSHPULL_CFG_BIT		BIT(1)
+#define PHYON_SW_SEL_BIT			BIT(0)
+
+#define CHGR_TRIM_OPTIONS_7_0_REG		(MISC_BASE + 0x55)
+#define TLIM_DIS_TBIT_BIT			BIT(0)
+
+#define CH_OV_OPTION_CFG_REG			(MISC_BASE + 0x56)
+#define OV_OPTION_TBIT_BIT			BIT(0)
+
+#define AICL_CFG_REG				(MISC_BASE + 0x60)
+#define TREG_ALLOW_DECREASE_BIT			BIT(1)
+#define AICL_HIGH_DC_INC_BIT			BIT(0)
+
+#define AICL_RERUN_TIME_CFG_REG			(MISC_BASE + 0x61)
+#define AICL_RERUN_TIME_MASK			GENMASK(1, 0)
+
+#define AICL_RERUN_TEMP_TIME_CFG_REG		(MISC_BASE + 0x62)
+#define AICL_RERUN_TEMP_TIME_MASK		GENMASK(1, 0)
+
+#define THERMREG_SRC_CFG_REG			(MISC_BASE + 0x70)
+#define SKIN_ADC_CFG_BIT			BIT(3)
+#define THERMREG_SKIN_ADC_SRC_EN_BIT		BIT(2)
+#define THERMREG_DIE_ADC_SRC_EN_BIT		BIT(1)
+#define THERMREG_DIE_CMP_SRC_EN_BIT		BIT(0)
+
+#define TREG_DIE_CMP_INC_CYCLE_TIME_CFG_REG	(MISC_BASE + 0x71)
+#define TREG_DIE_CMP_INC_CYCLE_TIME_MASK	GENMASK(1, 0)
+
+#define TREG_DIE_CMP_DEC_CYCLE_TIME_CFG_REG	(MISC_BASE + 0x72)
+#define TREG_DIE_CMP_DEC_CYCLE_TIME_MASK	GENMASK(1, 0)
+
+#define TREG_DIE_ADC_INC_CYCLE_TIME_CFG_REG	(MISC_BASE + 0x73)
+#define TREG_DIE_ADC_INC_CYCLE_TIME_MASK	GENMASK(1, 0)
+
+#define TREG_DIE_ADC_DEC_CYCLE_TIME_CFG_REG	(MISC_BASE + 0x74)
+#define TREG_DIE_ADC_DEC_CYCLE_TIME_MASK	GENMASK(1, 0)
+
+#define TREG_SKIN_ADC_INC_CYCLE_TIME_CFG_REG	(MISC_BASE + 0x75)
+#define TREG_SKIN_ADC_INC_CYCLE_TIME_MASK	GENMASK(1, 0)
+
+#define TREG_SKIN_ADC_DEC_CYCLE_TIME_CFG_REG	(MISC_BASE + 0x76)
+#define TREG_SKIN_ADC_DEC_CYCLE_TIME_MASK	GENMASK(1, 0)
+
+#define BUCK_OPTIONS_CFG_REG			(MISC_BASE + 0x80)
+#define CHG_EN_PIN_SUSPEND_CFG_BIT		BIT(6)
+#define HICCUP_OPTIONS_MASK			GENMASK(5, 4)
+#define INPUT_CURRENT_LIMIT_SOFTSTART_EN_BIT	BIT(3)
+#define HV_HIGH_DUTY_CYCLE_PROTECT_EN_BIT	BIT(2)
+#define BUCK_OC_PROTECT_EN_BIT			BIT(1)
+#define INPUT_MISS_POLL_EN_BIT			BIT(0)
+
+#define ICL_SOFTSTART_RATE_CFG_REG		(MISC_BASE + 0x81)
+#define ICL_SOFTSTART_RATE_MASK			GENMASK(1, 0)
+
+#define ICL_SOFTSTOP_RATE_CFG_REG		(MISC_BASE + 0x82)
+#define ICL_SOFTSTOP_RATE_MASK			GENMASK(1, 0)
+
+#define VSYS_MIN_SEL_CFG_REG			(MISC_BASE + 0x83)
+#define VSYS_MIN_SEL_MASK			GENMASK(1, 0)
+
+#define TRACKING_VOLTAGE_SEL_CFG_REG		(MISC_BASE + 0x84)
+#define TRACKING_VOLTAGE_SEL_BIT		BIT(0)
+
+#define STAT_CFG_REG				(MISC_BASE + 0x90)
+#define STAT_SW_OVERRIDE_VALUE_BIT		BIT(7)
+#define STAT_SW_OVERRIDE_CFG_BIT		BIT(6)
+#define STAT_PARALLEL_OFF_DG_CFG_MASK		GENMASK(5, 4)
+#define STAT_POLARITY_CFG_BIT			BIT(3)
+#define STAT_PARALLEL_CFG_BIT			BIT(2)
+#define STAT_FUNCTION_CFG_BIT			BIT(1)
+#define STAT_IRQ_PULSING_EN_BIT			BIT(0)
+
+#define LBC_EN_CFG_REG				(MISC_BASE + 0x91)
+#define LBC_DURING_CHARGING_CFG_BIT		BIT(1)
+#define LBC_EN_BIT				BIT(0)
+
+#define LBC_PERIOD_CFG_REG			(MISC_BASE + 0x92)
+#define LBC_PERIOD_MASK				GENMASK(2, 0)
+
+#define LBC_DUTY_CYCLE_CFG_REG			(MISC_BASE + 0x93)
+#define LBC_DUTY_CYCLE_MASK			GENMASK(2, 0)
+
+#define SYSOK_CFG_REG				(MISC_BASE + 0x94)
+#define SYSOK_PUSHPULL_CFG_BIT			BIT(5)
+#define SYSOK_B_OR_C_SEL_BIT			BIT(4)
+#define SYSOK_POL_BIT				BIT(3)
+#define SYSOK_OPTIONS_MASK			GENMASK(2, 0)
+
+#define CFG_BUCKBOOST_FREQ_SELECT_BUCK_REG	(MISC_BASE + 0xA0)
+#define CFG_BUCKBOOST_FREQ_SELECT_BOOST_REG	(MISC_BASE + 0xA1)
+
+/* CHGR FREQ Peripheral registers */
+#define FREQ_CLK_DIV_REG			(CHGR_FREQ_BASE + 0x50)
+
+#endif /* __SMB2_CHARGER_REG_H */
diff --git a/drivers/power/supply/qcom/smb1351-charger.c b/drivers/power/supply/qcom/smb1351-charger.c
new file mode 100644
index 0000000..0d1f2a6
--- /dev/null
+++ b/drivers/power/supply/qcom/smb1351-charger.c
@@ -0,0 +1,3335 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/i2c.h>
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
+#include <linux/of.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/pinctrl/consumer.h>
+
+/* Mask/Bit helpers */
+#define _SMB1351_MASK(BITS, POS) \
+	((unsigned char)(((1 << (BITS)) - 1) << (POS)))
+#define SMB1351_MASK(LEFT_BIT_POS, RIGHT_BIT_POS) \
+		_SMB1351_MASK((LEFT_BIT_POS) - (RIGHT_BIT_POS) + 1, \
+				(RIGHT_BIT_POS))
+
+/* Configuration registers */
+#define CHG_CURRENT_CTRL_REG			0x0
+#define FAST_CHG_CURRENT_MASK			SMB1351_MASK(7, 4)
+#define AC_INPUT_CURRENT_LIMIT_MASK		SMB1351_MASK(3, 0)
+
+#define CHG_OTH_CURRENT_CTRL_REG		0x1
+#define PRECHG_CURRENT_MASK			SMB1351_MASK(7, 5)
+#define ITERM_MASK				SMB1351_MASK(4, 2)
+#define USB_2_3_MODE_SEL_BIT			BIT(1)
+#define USB_2_3_MODE_SEL_BY_I2C			0
+#define USB_2_3_MODE_SEL_BY_PIN			0x2
+#define USB_5_1_CMD_POLARITY_BIT		BIT(0)
+#define USB_CMD_POLARITY_500_1_100_0		0
+#define USB_CMD_POLARITY_500_0_100_1		0x1
+
+#define VARIOUS_FUNC_REG			0x2
+#define SUSPEND_MODE_CTRL_BIT			BIT(7)
+#define SUSPEND_MODE_CTRL_BY_PIN		0
+#define SUSPEND_MODE_CTRL_BY_I2C		0x80
+#define BATT_TO_SYS_POWER_CTRL_BIT		BIT(6)
+#define MAX_SYS_VOLTAGE				BIT(5)
+#define AICL_EN_BIT				BIT(4)
+#define AICL_DET_TH_BIT				BIT(3)
+#define APSD_EN_BIT				BIT(2)
+#define BATT_OV_BIT				BIT(1)
+#define VCHG_FUNC_BIT				BIT(0)
+
+#define VFLOAT_REG				0x3
+#define PRECHG_TO_FAST_VOLTAGE_CFG_MASK		SMB1351_MASK(7, 6)
+#define VFLOAT_MASK				SMB1351_MASK(5, 0)
+
+#define CHG_CTRL_REG				0x4
+#define AUTO_RECHG_BIT				BIT(7)
+#define AUTO_RECHG_ENABLE			0
+#define AUTO_RECHG_DISABLE			0x80
+#define ITERM_EN_BIT				BIT(6)
+#define ITERM_ENABLE				0
+#define ITERM_DISABLE				0x40
+#define MAPPED_AC_INPUT_CURRENT_LIMIT_MASK	SMB1351_MASK(5, 4)
+#define AUTO_RECHG_TH_BIT			BIT(3)
+#define AUTO_RECHG_TH_50MV			0
+#define AUTO_RECHG_TH_100MV			0x8
+#define AFCV_MASK				SMB1351_MASK(2, 0)
+
+#define CHG_STAT_TIMERS_CTRL_REG		0x5
+#define STAT_OUTPUT_POLARITY_BIT		BIT(7)
+#define STAT_OUTPUT_MODE_BIT			BIT(6)
+#define STAT_OUTPUT_CTRL_BIT			BIT(5)
+#define OTH_CHG_IL_BIT				BIT(4)
+#define COMPLETE_CHG_TIMEOUT_MASK		SMB1351_MASK(3, 2)
+#define PRECHG_TIMEOUT_MASK			SMB1351_MASK(1, 0)
+
+#define CHG_PIN_EN_CTRL_REG			0x6
+#define LED_BLINK_FUNC_BIT			BIT(7)
+#define EN_PIN_CTRL_MASK			SMB1351_MASK(6, 5)
+#define EN_BY_I2C_0_DISABLE			0
+#define EN_BY_I2C_0_ENABLE			0x20
+#define EN_BY_PIN_HIGH_ENABLE			0x40
+#define EN_BY_PIN_LOW_ENABLE			0x60
+#define USBCS_CTRL_BIT				BIT(4)
+#define USBCS_CTRL_BY_I2C			0
+#define USBCS_CTRL_BY_PIN			0x10
+#define USBCS_INPUT_STATE_BIT			BIT(3)
+#define CHG_ERR_BIT				BIT(2)
+#define APSD_DONE_BIT				BIT(1)
+#define USB_FAIL_BIT				BIT(0)
+
+#define THERM_A_CTRL_REG			0x7
+#define MIN_SYS_VOLTAGE_MASK			SMB1351_MASK(7, 6)
+#define LOAD_BATT_10MA_FVC_BIT			BIT(5)
+#define THERM_MONITOR_BIT			BIT(4)
+#define THERM_MONITOR_EN			0
+#define SOFT_COLD_TEMP_LIMIT_MASK		SMB1351_MASK(3, 2)
+#define SOFT_HOT_TEMP_LIMIT_MASK		SMB1351_MASK(1, 0)
+
+#define WDOG_SAFETY_TIMER_CTRL_REG		0x8
+#define AICL_FAIL_OPTION_BIT			BIT(7)
+#define AICL_FAIL_TO_SUSPEND			0
+#define AICL_FAIL_TO_150_MA			0x80
+#define WDOG_TIMEOUT_MASK			SMB1351_MASK(6, 5)
+#define WDOG_IRQ_SAFETY_TIMER_MASK		SMB1351_MASK(4, 3)
+#define WDOG_IRQ_SAFETY_TIMER_EN_BIT		BIT(2)
+#define WDOG_OPTION_BIT				BIT(1)
+#define WDOG_TIMER_EN_BIT			BIT(0)
+
+#define OTG_USBIN_AICL_CTRL_REG			0x9
+#define OTG_ID_PIN_CTRL_MASK			SMB1351_MASK(7, 6)
+#define OTG_PIN_POLARITY_BIT			BIT(5)
+#define DCIN_IC_GLITCH_FILTER_HV_ADAPTER_MASK	SMB1351_MASK(4, 3)
+#define DCIN_IC_GLITCH_FILTER_LV_ADAPTER_BIT	BIT(2)
+#define USBIN_AICL_CFG1_BIT			BIT(1)
+#define USBIN_AICL_CFG0_BIT			BIT(0)
+
+#define OTG_TLIM_CTRL_REG			0xA
+#define SWITCH_FREQ_MASK			SMB1351_MASK(7, 6)
+#define THERM_LOOP_TEMP_SEL_MASK		SMB1351_MASK(5, 4)
+#define OTG_OC_LIMIT_MASK			SMB1351_MASK(3, 2)
+#define OTG_BATT_UVLO_TH_MASK			SMB1351_MASK(1, 0)
+
+#define HARD_SOFT_LIMIT_CELL_TEMP_REG		0xB
+#define HARD_LIMIT_COLD_TEMP_ALARM_TRIP_MASK	SMB1351_MASK(7, 6)
+#define HARD_LIMIT_HOT_TEMP_ALARM_TRIP_MASK	SMB1351_MASK(5, 4)
+#define SOFT_LIMIT_COLD_TEMP_ALARM_TRIP_MASK	SMB1351_MASK(3, 2)
+#define SOFT_LIMIT_HOT_TEMP_ALARM_TRIP_MASK	SMB1351_MASK(1, 0)
+
+#define FAULT_INT_REG				0xC
+#define HOT_COLD_HARD_LIMIT_BIT			BIT(7)
+#define HOT_COLD_SOFT_LIMIT_BIT			BIT(6)
+#define BATT_UVLO_IN_OTG_BIT			BIT(5)
+#define OTG_OC_BIT				BIT(4)
+#define INPUT_OVLO_BIT				BIT(3)
+#define INPUT_UVLO_BIT				BIT(2)
+#define AICL_DONE_FAIL_BIT			BIT(1)
+#define INTERNAL_OVER_TEMP_BIT			BIT(0)
+
+#define STATUS_INT_REG				0xD
+#define CHG_OR_PRECHG_TIMEOUT_BIT		BIT(7)
+#define RID_CHANGE_BIT				BIT(6)
+#define BATT_OVP_BIT				BIT(5)
+#define FAST_TERM_TAPER_RECHG_INHIBIT_BIT	BIT(4)
+#define WDOG_TIMER_BIT				BIT(3)
+#define POK_BIT					BIT(2)
+#define BATT_MISSING_BIT			BIT(1)
+#define BATT_LOW_BIT				BIT(0)
+
+#define VARIOUS_FUNC_2_REG			0xE
+#define CHG_HOLD_OFF_TIMER_AFTER_PLUGIN_BIT	BIT(7)
+#define CHG_INHIBIT_BIT				BIT(6)
+#define FAST_CHG_CC_IN_BATT_SOFT_LIMIT_MODE_BIT	BIT(5)
+#define FVCL_IN_BATT_SOFT_LIMIT_MODE_MASK	SMB1351_MASK(4, 3)
+#define HARD_TEMP_LIMIT_BEHAVIOR_BIT		BIT(2)
+#define PRECHG_TO_FASTCHG_BIT			BIT(1)
+#define STAT_PIN_CONFIG_BIT			BIT(0)
+
+#define FLEXCHARGER_REG				0x10
+#define AFVC_IRQ_BIT				BIT(7)
+#define CHG_CONFIG_MASK				SMB1351_MASK(6, 4)
+#define LOW_BATT_VOLTAGE_DET_TH_MASK		SMB1351_MASK(3, 0)
+
+#define VARIOUS_FUNC_3_REG			0x11
+#define SAFETY_TIMER_EN_MASK			SMB1351_MASK(7, 6)
+#define BLOCK_SUSPEND_DURING_VBATT_LOW_BIT	BIT(5)
+#define TIMEOUT_SEL_FOR_APSD_BIT		BIT(4)
+#define SDP_SUSPEND_BIT				BIT(3)
+#define QC_2P1_AUTO_INCREMENT_MODE_BIT		BIT(2)
+#define QC_2P1_AUTH_ALGO_BIT			BIT(1)
+#define DCD_EN_BIT				BIT(0)
+
+#define HVDCP_BATT_MISSING_CTRL_REG		0x12
+#define HVDCP_ADAPTER_SEL_MASK			SMB1351_MASK(7, 6)
+#define HVDCP_EN_BIT				BIT(5)
+#define HVDCP_AUTO_INCREMENT_LIMIT_BIT		BIT(4)
+#define BATT_MISSING_ON_INPUT_PLUGIN_BIT	BIT(3)
+#define BATT_MISSING_2P6S_POLLER_BIT		BIT(2)
+#define BATT_MISSING_ALGO_BIT			BIT(1)
+#define BATT_MISSING_THERM_PIN_SOURCE_BIT	BIT(0)
+
+#define PON_OPTIONS_REG				0x13
+#define SYSOK_INOK_POLARITY_BIT			BIT(7)
+#define SYSOK_OPTIONS_MASK			SMB1351_MASK(6, 4)
+#define INPUT_MISSING_POLLER_CONFIG_BIT		BIT(3)
+#define VBATT_LOW_DISABLED_OR_RESET_STATE_BIT	BIT(2)
+#define QC_2P1_AUTH_ALGO_IRQ_EN_BIT		BIT(0)
+
+#define OTG_MODE_POWER_OPTIONS_REG		0x14
+#define ADAPTER_CONFIG_MASK			SMB1351_MASK(7, 6)
+#define MAP_HVDCP_BIT				BIT(5)
+#define SDP_LOW_BATT_FORCE_USB5_OVER_USB1_BIT	BIT(4)
+#define OTG_HICCUP_MODE_BIT			BIT(2)
+#define INPUT_CURRENT_LIMIT_MASK		SMB1351_MASK(1, 0)
+
+#define CHARGER_I2C_CTRL_REG			0x15
+#define FULLON_MODE_EN_BIT			BIT(7)
+#define I2C_HS_MODE_EN_BIT			BIT(6)
+#define SYSON_LDO_OUTPUT_SEL_BIT		BIT(5)
+#define VBATT_TRACKING_VOLTAGE_DIFF_BIT		BIT(4)
+#define DISABLE_AFVC_WHEN_ENTER_TAPER_BIT	BIT(3)
+#define VCHG_IINV_BIT				BIT(2)
+#define AFVC_OVERRIDE_BIT			BIT(1)
+#define SYSOK_PIN_CONFIG_BIT			BIT(0)
+
+#define VERSION_REG				0x2E
+#define VERSION_MASK				BIT(1)
+
+/* Command registers */
+#define CMD_I2C_REG				0x30
+#define CMD_RELOAD_BIT				BIT(7)
+#define CMD_BQ_CFG_ACCESS_BIT			BIT(6)
+
+#define CMD_INPUT_LIMIT_REG			0x31
+#define CMD_OVERRIDE_BIT			BIT(7)
+#define CMD_SUSPEND_MODE_BIT			BIT(6)
+#define CMD_INPUT_CURRENT_MODE_BIT		BIT(3)
+#define CMD_INPUT_CURRENT_MODE_APSD		0
+#define CMD_INPUT_CURRENT_MODE_CMD		0x08
+#define CMD_USB_2_3_SEL_BIT			BIT(2)
+#define CMD_USB_2_MODE				0
+#define CMD_USB_3_MODE				0x4
+#define CMD_USB_1_5_AC_CTRL_MASK		SMB1351_MASK(1, 0)
+#define CMD_USB_100_MODE			0
+#define CMD_USB_500_MODE			0x2
+#define CMD_USB_AC_MODE				0x1
+
+#define CMD_CHG_REG				0x32
+#define CMD_DISABLE_THERM_MONITOR_BIT		BIT(4)
+#define CMD_TURN_OFF_STAT_PIN_BIT		BIT(3)
+#define CMD_PRE_TO_FAST_EN_BIT			BIT(2)
+#define CMD_CHG_EN_BIT				BIT(1)
+#define CMD_CHG_DISABLE				0
+#define CMD_CHG_ENABLE				0x2
+#define CMD_OTG_EN_BIT				BIT(0)
+
+#define CMD_DEAD_BATT_REG			0x33
+#define CMD_STOP_DEAD_BATT_TIMER_MASK		SMB1351_MASK(7, 0)
+
+#define CMD_HVDCP_REG				0x34
+#define CMD_APSD_RE_RUN_BIT			BIT(7)
+#define CMD_FORCE_HVDCP_2P0_BIT			BIT(5)
+#define CMD_HVDCP_MODE_MASK			SMB1351_MASK(5, 0)
+
+/* Status registers */
+#define STATUS_0_REG				0x36
+#define STATUS_AICL_BIT				BIT(7)
+#define STATUS_INPUT_CURRENT_LIMIT_MASK		SMB1351_MASK(6, 5)
+#define STATUS_DCIN_INPUT_CURRENT_LIMIT_MASK	SMB1351_MASK(4, 0)
+
+#define STATUS_1_REG				0x37
+#define STATUS_INPUT_RANGE_MASK			SMB1351_MASK(7, 4)
+#define STATUS_INPUT_USB_BIT			BIT(0)
+
+#define STATUS_2_REG				0x38
+#define STATUS_FAST_CHG_BIT			BIT(7)
+#define STATUS_HARD_LIMIT_BIT			BIT(6)
+#define STATUS_FLOAT_VOLTAGE_MASK		SMB1351_MASK(5, 0)
+
+#define STATUS_3_REG				0x39
+#define STATUS_CHG_BIT				BIT(7)
+#define STATUS_PRECHG_CURRENT_MASK		SMB1351_MASK(6, 4)
+#define STATUS_FAST_CHG_CURRENT_MASK		SMB1351_MASK(3, 0)
+
+#define STATUS_4_REG				0x3A
+#define STATUS_OTG_BIT				BIT(7)
+#define STATUS_AFVC_BIT				BIT(6)
+#define STATUS_DONE_BIT				BIT(5)
+#define STATUS_BATT_LESS_THAN_2V_BIT		BIT(4)
+#define STATUS_HOLD_OFF_BIT			BIT(3)
+#define STATUS_CHG_MASK				SMB1351_MASK(2, 1)
+#define STATUS_NO_CHARGING			0
+#define STATUS_FAST_CHARGING			0x4
+#define STATUS_PRE_CHARGING			0x2
+#define STATUS_TAPER_CHARGING			0x6
+#define STATUS_CHG_EN_STATUS_BIT		BIT(0)
+
+#define STATUS_5_REG				0x3B
+#define STATUS_SOURCE_DETECTED_MASK		SMB1351_MASK(7, 0)
+#define STATUS_PORT_CDP				0x80
+#define STATUS_PORT_DCP				0x40
+#define STATUS_PORT_OTHER			0x20
+#define STATUS_PORT_SDP				0x10
+#define STATUS_PORT_ACA_A			0x8
+#define STATUS_PORT_ACA_B			0x4
+#define STATUS_PORT_ACA_C			0x2
+#define STATUS_PORT_ACA_DOCK			0x1
+
+#define STATUS_6_REG				0x3C
+#define STATUS_DCD_TIMEOUT_BIT			BIT(7)
+#define STATUS_DCD_GOOD_DG_BIT			BIT(6)
+#define STATUS_OCD_GOOD_DG_BIT			BIT(5)
+#define STATUS_RID_ABD_DG_BIT			BIT(4)
+#define STATUS_RID_FLOAT_STATE_MACHINE_BIT	BIT(3)
+#define STATUS_RID_A_STATE_MACHINE_BIT		BIT(2)
+#define STATUS_RID_B_STATE_MACHINE_BIT		BIT(1)
+#define STATUS_RID_C_STATE_MACHINE_BIT		BIT(0)
+
+#define STATUS_7_REG				0x3D
+#define STATUS_HVDCP_MASK			SMB1351_MASK(7, 0)
+
+#define STATUS_8_REG				0x3E
+#define STATUS_USNIN_HV_INPUT_SEL_BIT		BIT(5)
+#define STATUS_USBIN_LV_UNDER_INPUT_SEL_BIT	BIT(4)
+#define STATUS_USBIN_LV_INPUT_SEL_BIT		BIT(3)
+
+/* Revision register */
+#define CHG_REVISION_REG			0x3F
+#define GUI_REVISION_MASK			SMB1351_MASK(7, 4)
+#define DEVICE_REVISION_MASK			SMB1351_MASK(3, 0)
+
+/* IRQ status registers */
+#define IRQ_A_REG				0x40
+#define IRQ_HOT_HARD_BIT			BIT(6)
+#define IRQ_COLD_HARD_BIT			BIT(4)
+#define IRQ_HOT_SOFT_BIT			BIT(2)
+#define IRQ_COLD_SOFT_BIT			BIT(0)
+
+#define IRQ_B_REG				0x41
+#define IRQ_BATT_TERMINAL_REMOVED_BIT		BIT(6)
+#define IRQ_BATT_MISSING_BIT			BIT(4)
+#define IRQ_LOW_BATT_VOLTAGE_BIT		BIT(2)
+#define IRQ_INTERNAL_TEMP_LIMIT_BIT		BIT(0)
+
+#define IRQ_C_REG				0x42
+#define IRQ_PRE_TO_FAST_VOLTAGE_BIT		BIT(6)
+#define IRQ_RECHG_BIT				BIT(4)
+#define IRQ_TAPER_BIT				BIT(2)
+#define IRQ_TERM_BIT				BIT(0)
+
+#define IRQ_D_REG				0x43
+#define IRQ_BATT_OV_BIT				BIT(6)
+#define IRQ_CHG_ERROR_BIT			BIT(4)
+#define IRQ_CHG_TIMEOUT_BIT			BIT(2)
+#define IRQ_PRECHG_TIMEOUT_BIT			BIT(0)
+
+#define IRQ_E_REG				0x44
+#define IRQ_USBIN_OV_BIT			BIT(6)
+#define IRQ_USBIN_UV_BIT			BIT(4)
+#define IRQ_AFVC_BIT				BIT(2)
+#define IRQ_POWER_OK_BIT			BIT(0)
+
+#define IRQ_F_REG				0x45
+#define IRQ_OTG_OVER_CURRENT_BIT		BIT(6)
+#define IRQ_OTG_FAIL_BIT			BIT(4)
+#define IRQ_RID_BIT				BIT(2)
+#define IRQ_OTG_OC_RETRY_BIT			BIT(0)
+
+#define IRQ_G_REG				0x46
+#define IRQ_SOURCE_DET_BIT			BIT(6)
+#define IRQ_AICL_DONE_BIT			BIT(4)
+#define IRQ_AICL_FAIL_BIT			BIT(2)
+#define IRQ_CHG_INHIBIT_BIT			BIT(0)
+
+#define IRQ_H_REG				0x47
+#define IRQ_IC_LIMIT_STATUS_BIT			BIT(5)
+#define IRQ_HVDCP_2P1_STATUS_BIT		BIT(4)
+#define IRQ_HVDCP_AUTH_DONE_BIT			BIT(2)
+#define IRQ_WDOG_TIMEOUT_BIT			BIT(0)
+
+/* constants */
+#define USB2_MIN_CURRENT_MA			100
+#define USB2_MAX_CURRENT_MA			500
+#define USB3_MIN_CURRENT_MA			150
+#define USB3_MAX_CURRENT_MA			900
+#define SMB1351_IRQ_REG_COUNT			8
+#define SMB1351_CHG_PRE_MIN_MA			100
+#define SMB1351_CHG_FAST_MIN_MA			1000
+#define SMB1351_CHG_FAST_MAX_MA			4500
+#define SMB1351_CHG_PRE_SHIFT			5
+#define SMB1351_CHG_FAST_SHIFT			4
+#define DEFAULT_BATT_CAPACITY			50
+#define DEFAULT_BATT_TEMP			250
+#define SUSPEND_CURRENT_MA			2
+
+#define CHG_ITERM_200MA				0x0
+#define CHG_ITERM_300MA				0x04
+#define CHG_ITERM_400MA				0x08
+#define CHG_ITERM_500MA				0x0C
+#define CHG_ITERM_600MA				0x10
+#define CHG_ITERM_700MA				0x14
+
+#define ADC_TM_WARM_COOL_THR_ENABLE		ADC_TM_HIGH_LOW_THR_ENABLE
+
+enum reason {
+	USER	= BIT(0),
+	THERMAL = BIT(1),
+	CURRENT = BIT(2),
+	SOC	= BIT(3),
+};
+
+static char *pm_batt_supplied_to[] = {
+	"bms",
+};
+
+struct smb1351_regulator {
+	struct regulator_desc	rdesc;
+	struct regulator_dev	*rdev;
+};
+
+enum chip_version {
+	SMB_UNKNOWN = 0,
+	SMB1350,
+	SMB1351,
+	SMB_MAX_TYPE,
+};
+
+static const char *smb1351_version_str[SMB_MAX_TYPE] = {
+	[SMB_UNKNOWN] = "Unknown",
+	[SMB1350] = "SMB1350",
+	[SMB1351] = "SMB1351",
+};
+
+struct smb1351_charger {
+	struct i2c_client	*client;
+	struct device		*dev;
+
+	bool			recharge_disabled;
+	int			recharge_mv;
+	bool			iterm_disabled;
+	int			iterm_ma;
+	int			vfloat_mv;
+	int			chg_present;
+	int			fake_battery_soc;
+	bool			chg_autonomous_mode;
+	bool			disable_apsd;
+	bool			using_pmic_therm;
+	bool			jeita_supported;
+	bool			battery_missing;
+	const char		*bms_psy_name;
+	bool			resume_completed;
+	bool			irq_waiting;
+	struct delayed_work	chg_remove_work;
+	struct delayed_work	hvdcp_det_work;
+
+	/* status tracking */
+	bool			batt_full;
+	bool			batt_hot;
+	bool			batt_cold;
+	bool			batt_warm;
+	bool			batt_cool;
+
+	int			battchg_disabled_status;
+	int			usb_suspended_status;
+	int			target_fastchg_current_max_ma;
+	int			fastchg_current_max_ma;
+	int			workaround_flags;
+
+	int			parallel_pin_polarity_setting;
+	int			parallel_mode;
+	bool			parallel_charger;
+	bool			parallel_charger_suspended;
+	bool			bms_controlled_charging;
+	bool			apsd_rerun;
+	bool			usbin_ov;
+	bool			chg_remove_work_scheduled;
+	bool			force_hvdcp_2p0;
+	enum chip_version	version;
+
+	/* psy */
+	struct power_supply	*usb_psy;
+	int			usb_psy_ma;
+	struct power_supply	*bms_psy;
+	struct power_supply_desc	batt_psy_d;
+	struct power_supply	*batt_psy;
+	struct power_supply	*parallel_psy;
+	struct power_supply_desc	parallel_psy_d;
+
+	struct smb1351_regulator	otg_vreg;
+	struct mutex		irq_complete;
+
+	struct dentry		*debug_root;
+	u32			peek_poke_address;
+
+	/* adc_tm parameters */
+	struct qpnp_vadc_chip	*vadc_dev;
+	struct qpnp_adc_tm_chip	*adc_tm_dev;
+	struct qpnp_adc_tm_btm_param	adc_param;
+
+	/* jeita parameters */
+	int			batt_hot_decidegc;
+	int			batt_cold_decidegc;
+	int			batt_warm_decidegc;
+	int			batt_cool_decidegc;
+	int			batt_missing_decidegc;
+	unsigned int		batt_warm_ma;
+	unsigned int		batt_warm_mv;
+	unsigned int		batt_cool_ma;
+	unsigned int		batt_cool_mv;
+
+	/* pinctrl parameters */
+	const char		*pinctrl_state_name;
+	struct pinctrl		*smb_pinctrl;
+};
+
+struct smb_irq_info {
+	const char		*name;
+	int (*smb_irq)(struct smb1351_charger *chip, u8 rt_stat);
+	int			high;
+	int			low;
+};
+
+struct irq_handler_info {
+	u8			stat_reg;
+	u8			val;
+	u8			prev_val;
+	struct smb_irq_info	irq_info[4];
+};
+
+/* USB input charge current */
+static int usb_chg_current[] = {
+	500, 685, 1000, 1100, 1200, 1300, 1500, 1600,
+	1700, 1800, 2000, 2200, 2500, 3000,
+};
+
+static int fast_chg_current[] = {
+	1000, 1200, 1400, 1600, 1800, 2000, 2200,
+	2400, 2600, 2800, 3000, 3400, 3600, 3800,
+	4000, 4640,
+};
+
+static int pre_chg_current[] = {
+	200, 300, 400, 500, 600, 700,
+};
+
+struct battery_status {
+	bool			batt_hot;
+	bool			batt_warm;
+	bool			batt_cool;
+	bool			batt_cold;
+	bool			batt_present;
+};
+
+enum {
+	BATT_HOT = 0,
+	BATT_WARM,
+	BATT_NORMAL,
+	BATT_COOL,
+	BATT_COLD,
+	BATT_MISSING,
+	BATT_STATUS_MAX,
+};
+
+static struct battery_status batt_s[] = {
+	[BATT_HOT] = {1, 0, 0, 0, 1},
+	[BATT_WARM] = {0, 1, 0, 0, 1},
+	[BATT_NORMAL] = {0, 0, 0, 0, 1},
+	[BATT_COOL] = {0, 0, 1, 0, 1},
+	[BATT_COLD] = {0, 0, 0, 1, 1},
+	[BATT_MISSING] = {0, 0, 0, 1, 0},
+};
+
+static int smb1351_read_reg(struct smb1351_charger *chip, int reg, u8 *val)
+{
+	s32 ret;
+
+	pm_stay_awake(chip->dev);
+	ret = i2c_smbus_read_byte_data(chip->client, reg);
+	if (ret < 0) {
+		pr_err("i2c read fail: can't read from %02x: %d\n", reg, ret);
+		pm_relax(chip->dev);
+		return ret;
+	}
+
+	*val = ret;
+
+	pm_relax(chip->dev);
+	pr_debug("Reading 0x%02x=0x%02x\n", reg, *val);
+	return 0;
+}
+
+static int smb1351_write_reg(struct smb1351_charger *chip, int reg, u8 val)
+{
+	s32 ret;
+
+	pm_stay_awake(chip->dev);
+	ret = i2c_smbus_write_byte_data(chip->client, reg, val);
+	if (ret < 0) {
+		pr_err("i2c write fail: can't write %02x to %02x: %d\n",
+			val, reg, ret);
+		pm_relax(chip->dev);
+		return ret;
+	}
+	pm_relax(chip->dev);
+	pr_debug("Writing 0x%02x=0x%02x\n", reg, val);
+	return 0;
+}
+
+static int smb1351_masked_write(struct smb1351_charger *chip, int reg,
+							u8 mask, u8 val)
+{
+	s32 rc;
+	u8 temp;
+
+	rc = smb1351_read_reg(chip, reg, &temp);
+	if (rc) {
+		pr_err("read failed: reg=%03X, rc=%d\n", reg, rc);
+		return rc;
+	}
+	temp &= ~mask;
+	temp |= val & mask;
+	rc = smb1351_write_reg(chip, reg, temp);
+	if (rc) {
+		pr_err("write failed: reg=%03X, rc=%d\n", reg, rc);
+		return rc;
+	}
+	return 0;
+}
+
+static int smb1351_enable_volatile_writes(struct smb1351_charger *chip)
+{
+	int rc;
+
+	rc = smb1351_masked_write(chip, CMD_I2C_REG, CMD_BQ_CFG_ACCESS_BIT,
+							CMD_BQ_CFG_ACCESS_BIT);
+	if (rc)
+		pr_err("Couldn't write CMD_BQ_CFG_ACCESS_BIT rc=%d\n", rc);
+
+	return rc;
+}
+
+static int smb1351_usb_suspend(struct smb1351_charger *chip, int reason,
+					bool suspend)
+{
+	int rc = 0;
+	int suspended;
+
+	suspended = chip->usb_suspended_status;
+
+	pr_debug("reason = %d requested_suspend = %d suspended_status = %d\n",
+						reason, suspend, suspended);
+
+	if (suspend == false)
+		suspended &= ~reason;
+	else
+		suspended |= reason;
+
+	pr_debug("new suspended_status = %d\n", suspended);
+
+	rc = smb1351_masked_write(chip, CMD_INPUT_LIMIT_REG,
+				CMD_SUSPEND_MODE_BIT,
+				suspended ? CMD_SUSPEND_MODE_BIT : 0);
+	if (rc)
+		pr_err("Couldn't suspend rc = %d\n", rc);
+	else
+		chip->usb_suspended_status = suspended;
+
+	return rc;
+}
+
+static int smb1351_battchg_disable(struct smb1351_charger *chip,
+					int reason, int disable)
+{
+	int rc = 0;
+	int disabled;
+
+	if (chip->chg_autonomous_mode) {
+		pr_debug("Charger in autonomous mode\n");
+		return 0;
+	}
+
+	disabled = chip->battchg_disabled_status;
+
+	pr_debug("reason = %d requested_disable = %d disabled_status = %d\n",
+						reason, disable, disabled);
+	if (disable == true)
+		disabled |= reason;
+	else
+		disabled &= ~reason;
+
+	pr_debug("new disabled_status = %d\n", disabled);
+
+	rc = smb1351_masked_write(chip, CMD_CHG_REG, CMD_CHG_EN_BIT,
+					disabled ? 0 : CMD_CHG_ENABLE);
+	if (rc)
+		pr_err("Couldn't %s charging rc=%d\n",
+					disable ? "disable" : "enable", rc);
+	else
+		chip->battchg_disabled_status = disabled;
+
+	return rc;
+}
+
+static int smb1351_fastchg_current_set(struct smb1351_charger *chip,
+					unsigned int fastchg_current)
+{
+	int i, rc;
+	bool is_pre_chg = false;
+
+
+	if ((fastchg_current < SMB1351_CHG_PRE_MIN_MA) ||
+		(fastchg_current > SMB1351_CHG_FAST_MAX_MA)) {
+		pr_err("bad pre_fastchg current mA=%d asked to set\n",
+					fastchg_current);
+		return -EINVAL;
+	}
+
+	/*
+	 * fast chg current could not support less than 1000mA
+	 * use pre chg to instead for the parallel charging
+	 */
+	if (fastchg_current < SMB1351_CHG_FAST_MIN_MA) {
+		is_pre_chg = true;
+		pr_debug("is_pre_chg true, current is %d\n", fastchg_current);
+	}
+
+	if (is_pre_chg) {
+		/* set prechg current */
+		for (i = ARRAY_SIZE(pre_chg_current) - 1; i >= 0; i--) {
+			if (pre_chg_current[i] <= fastchg_current)
+				break;
+		}
+		if (i < 0)
+			i = 0;
+		chip->fastchg_current_max_ma = pre_chg_current[i];
+		pr_debug("prechg setting %02x\n", i);
+
+		i = i << SMB1351_CHG_PRE_SHIFT;
+
+		rc = smb1351_masked_write(chip, CHG_OTH_CURRENT_CTRL_REG,
+				PRECHG_CURRENT_MASK, i);
+		if (rc)
+			pr_err("Couldn't write CHG_OTH_CURRENT_CTRL_REG rc=%d\n",
+									rc);
+
+		return smb1351_masked_write(chip, VARIOUS_FUNC_2_REG,
+				PRECHG_TO_FASTCHG_BIT, PRECHG_TO_FASTCHG_BIT);
+	} else {
+		if (chip->version == SMB_UNKNOWN)
+			return -EINVAL;
+
+		/* SMB1350 supports FCC upto 2600 mA */
+		if (chip->version == SMB1350 && fastchg_current > 2600)
+			fastchg_current = 2600;
+
+		/* set fastchg current */
+		for (i = ARRAY_SIZE(fast_chg_current) - 1; i >= 0; i--) {
+			if (fast_chg_current[i] <= fastchg_current)
+				break;
+		}
+		if (i < 0)
+			i = 0;
+		chip->fastchg_current_max_ma = fast_chg_current[i];
+
+		i = i << SMB1351_CHG_FAST_SHIFT;
+		pr_debug("fastchg limit=%d setting %02x\n",
+					chip->fastchg_current_max_ma, i);
+
+		/* make sure pre chg mode is disabled */
+		rc = smb1351_masked_write(chip, VARIOUS_FUNC_2_REG,
+					PRECHG_TO_FASTCHG_BIT, 0);
+		if (rc)
+			pr_err("Couldn't write VARIOUS_FUNC_2_REG rc=%d\n", rc);
+
+		return smb1351_masked_write(chip, CHG_CURRENT_CTRL_REG,
+					FAST_CHG_CURRENT_MASK, i);
+	}
+}
+
+#define MIN_FLOAT_MV		3500
+#define MAX_FLOAT_MV		4500
+#define VFLOAT_STEP_MV		20
+
+static int smb1351_float_voltage_set(struct smb1351_charger *chip,
+								int vfloat_mv)
+{
+	u8 temp;
+
+	if ((vfloat_mv < MIN_FLOAT_MV) || (vfloat_mv > MAX_FLOAT_MV)) {
+		pr_err("bad float voltage mv =%d asked to set\n", vfloat_mv);
+		return -EINVAL;
+	}
+
+	temp = (vfloat_mv - MIN_FLOAT_MV) / VFLOAT_STEP_MV;
+
+	return smb1351_masked_write(chip, VFLOAT_REG, VFLOAT_MASK, temp);
+}
+
+static int smb1351_iterm_set(struct smb1351_charger *chip, int iterm_ma)
+{
+	int rc;
+	u8 reg;
+
+	if (iterm_ma <= 200)
+		reg = CHG_ITERM_200MA;
+	else if (iterm_ma <= 300)
+		reg = CHG_ITERM_300MA;
+	else if (iterm_ma <= 400)
+		reg = CHG_ITERM_400MA;
+	else if (iterm_ma <= 500)
+		reg = CHG_ITERM_500MA;
+	else if (iterm_ma <= 600)
+		reg = CHG_ITERM_600MA;
+	else
+		reg = CHG_ITERM_700MA;
+
+	rc = smb1351_masked_write(chip, CHG_OTH_CURRENT_CTRL_REG,
+				ITERM_MASK, reg);
+	if (rc) {
+		pr_err("Couldn't set iterm rc = %d\n", rc);
+		return rc;
+	}
+	/* enable the iterm */
+	rc = smb1351_masked_write(chip, CHG_CTRL_REG,
+				ITERM_EN_BIT, ITERM_ENABLE);
+	if (rc) {
+		pr_err("Couldn't enable iterm rc = %d\n", rc);
+		return rc;
+	}
+	return 0;
+}
+
+static int smb1351_chg_otg_regulator_enable(struct regulator_dev *rdev)
+{
+	int rc = 0;
+	struct smb1351_charger *chip = rdev_get_drvdata(rdev);
+
+	rc = smb1351_masked_write(chip, CMD_CHG_REG, CMD_OTG_EN_BIT,
+							CMD_OTG_EN_BIT);
+	if (rc)
+		pr_err("Couldn't enable  OTG mode rc=%d\n", rc);
+	return rc;
+}
+
+static int smb1351_chg_otg_regulator_disable(struct regulator_dev *rdev)
+{
+	int rc = 0;
+	struct smb1351_charger *chip = rdev_get_drvdata(rdev);
+
+	rc = smb1351_masked_write(chip, CMD_CHG_REG, CMD_OTG_EN_BIT, 0);
+	if (rc)
+		pr_err("Couldn't disable OTG mode rc=%d\n", rc);
+	return rc;
+}
+
+static int smb1351_chg_otg_regulator_is_enable(struct regulator_dev *rdev)
+{
+	int rc = 0;
+	u8 reg = 0;
+	struct smb1351_charger *chip = rdev_get_drvdata(rdev);
+
+	rc = smb1351_read_reg(chip, CMD_CHG_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read OTG enable bit rc=%d\n", rc);
+		return rc;
+	}
+
+	return (reg & CMD_OTG_EN_BIT) ? 1 : 0;
+}
+
+struct regulator_ops smb1351_chg_otg_reg_ops = {
+	.enable		= smb1351_chg_otg_regulator_enable,
+	.disable	= smb1351_chg_otg_regulator_disable,
+	.is_enabled	= smb1351_chg_otg_regulator_is_enable,
+};
+
+static int smb1351_regulator_init(struct smb1351_charger *chip)
+{
+	int rc = 0;
+	struct regulator_config cfg = {};
+
+	chip->otg_vreg.rdesc.owner = THIS_MODULE;
+	chip->otg_vreg.rdesc.type = REGULATOR_VOLTAGE;
+	chip->otg_vreg.rdesc.ops = &smb1351_chg_otg_reg_ops;
+	chip->otg_vreg.rdesc.name =
+		chip->dev->of_node->name;
+	chip->otg_vreg.rdesc.of_match =
+		chip->dev->of_node->name;
+
+	cfg.dev = chip->dev;
+	cfg.driver_data = chip;
+
+	chip->otg_vreg.rdev = regulator_register(
+					&chip->otg_vreg.rdesc, &cfg);
+	if (IS_ERR(chip->otg_vreg.rdev)) {
+		rc = PTR_ERR(chip->otg_vreg.rdev);
+		chip->otg_vreg.rdev = NULL;
+		if (rc != -EPROBE_DEFER)
+			pr_err("OTG reg failed, rc=%d\n", rc);
+	}
+	return rc;
+}
+
+static int smb_chip_get_version(struct smb1351_charger *chip)
+{
+	u8 ver;
+	int rc = 0;
+
+	if (chip->version == SMB_UNKNOWN) {
+		rc = smb1351_read_reg(chip, VERSION_REG, &ver);
+		if (rc) {
+			pr_err("Couldn't read version rc=%d\n", rc);
+			return rc;
+		}
+
+		/* If bit 1 is set, it is SMB1350 */
+		if (ver & VERSION_MASK)
+			chip->version = SMB1350;
+		else
+			chip->version = SMB1351;
+	}
+
+	return rc;
+}
+
+static int smb1351_hw_init(struct smb1351_charger *chip)
+{
+	int rc;
+	u8 reg = 0, mask = 0;
+
+	/* configure smb_pinctrl to enable irqs */
+	if (chip->pinctrl_state_name) {
+		chip->smb_pinctrl = pinctrl_get_select(chip->dev,
+						chip->pinctrl_state_name);
+		if (IS_ERR(chip->smb_pinctrl)) {
+			pr_err("Could not get/set %s pinctrl state rc = %ld\n",
+						chip->pinctrl_state_name,
+						PTR_ERR(chip->smb_pinctrl));
+			return PTR_ERR(chip->smb_pinctrl);
+		}
+	}
+
+	/*
+	 * If the charger is pre-configured for autonomous operation,
+	 * do not apply additional settings
+	 */
+	if (chip->chg_autonomous_mode) {
+		pr_debug("Charger configured for autonomous mode\n");
+		return 0;
+	}
+
+	rc = smb_chip_get_version(chip);
+	if (rc) {
+		pr_err("Couldn't get version rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = smb1351_enable_volatile_writes(chip);
+	if (rc) {
+		pr_err("Couldn't configure volatile writes rc=%d\n", rc);
+		return rc;
+	}
+
+	/* setup battery missing source */
+	reg = BATT_MISSING_THERM_PIN_SOURCE_BIT;
+	mask = BATT_MISSING_THERM_PIN_SOURCE_BIT;
+	rc = smb1351_masked_write(chip, HVDCP_BATT_MISSING_CTRL_REG,
+								mask, reg);
+	if (rc) {
+		pr_err("Couldn't set HVDCP_BATT_MISSING_CTRL_REG rc=%d\n", rc);
+		return rc;
+	}
+	/* setup defaults for CHG_PIN_EN_CTRL_REG */
+	reg = EN_BY_I2C_0_DISABLE | USBCS_CTRL_BY_I2C | CHG_ERR_BIT |
+		APSD_DONE_BIT | LED_BLINK_FUNC_BIT;
+	mask = EN_PIN_CTRL_MASK | USBCS_CTRL_BIT | CHG_ERR_BIT |
+		APSD_DONE_BIT | LED_BLINK_FUNC_BIT;
+	rc = smb1351_masked_write(chip, CHG_PIN_EN_CTRL_REG, mask, reg);
+	if (rc) {
+		pr_err("Couldn't set CHG_PIN_EN_CTRL_REG rc=%d\n", rc);
+		return rc;
+	}
+	/* setup USB 2.0/3.0 detection and USB 500/100 command polarity */
+	reg = USB_2_3_MODE_SEL_BY_I2C | USB_CMD_POLARITY_500_1_100_0;
+	mask = USB_2_3_MODE_SEL_BIT | USB_5_1_CMD_POLARITY_BIT;
+	rc = smb1351_masked_write(chip, CHG_OTH_CURRENT_CTRL_REG, mask, reg);
+	if (rc) {
+		pr_err("Couldn't set CHG_OTH_CURRENT_CTRL_REG rc=%d\n", rc);
+		return rc;
+	}
+	/* setup USB suspend, AICL and APSD  */
+	reg = SUSPEND_MODE_CTRL_BY_I2C | AICL_EN_BIT;
+	if (!chip->disable_apsd)
+		reg |= APSD_EN_BIT;
+	mask = SUSPEND_MODE_CTRL_BIT | AICL_EN_BIT | APSD_EN_BIT;
+	rc = smb1351_masked_write(chip, VARIOUS_FUNC_REG, mask, reg);
+	if (rc) {
+		pr_err("Couldn't set VARIOUS_FUNC_REG rc=%d\n",	rc);
+		return rc;
+	}
+	/* Fault and Status IRQ configuration */
+	reg = HOT_COLD_HARD_LIMIT_BIT | HOT_COLD_SOFT_LIMIT_BIT
+		| INPUT_OVLO_BIT | INPUT_UVLO_BIT | AICL_DONE_FAIL_BIT;
+	rc = smb1351_write_reg(chip, FAULT_INT_REG, reg);
+	if (rc) {
+		pr_err("Couldn't set FAULT_INT_REG rc=%d\n", rc);
+		return rc;
+	}
+	reg = CHG_OR_PRECHG_TIMEOUT_BIT | BATT_OVP_BIT |
+		FAST_TERM_TAPER_RECHG_INHIBIT_BIT |
+		BATT_MISSING_BIT | BATT_LOW_BIT;
+	rc = smb1351_write_reg(chip, STATUS_INT_REG, reg);
+	if (rc) {
+		pr_err("Couldn't set STATUS_INT_REG rc=%d\n", rc);
+		return rc;
+	}
+	/* setup THERM Monitor */
+	if (!chip->using_pmic_therm) {
+		rc = smb1351_masked_write(chip, THERM_A_CTRL_REG,
+			THERM_MONITOR_BIT, THERM_MONITOR_EN);
+		if (rc) {
+			pr_err("Couldn't set THERM_A_CTRL_REG rc=%d\n",	rc);
+			return rc;
+		}
+	}
+	/* set the fast charge current limit */
+	rc = smb1351_fastchg_current_set(chip,
+			chip->target_fastchg_current_max_ma);
+	if (rc) {
+		pr_err("Couldn't set fastchg current rc=%d\n", rc);
+		return rc;
+	}
+
+	/* set the float voltage */
+	if (chip->vfloat_mv != -EINVAL) {
+		rc = smb1351_float_voltage_set(chip, chip->vfloat_mv);
+		if (rc) {
+			pr_err("Couldn't set float voltage rc = %d\n", rc);
+			return rc;
+		}
+	}
+
+	/* set iterm */
+	if (chip->iterm_ma != -EINVAL) {
+		if (chip->iterm_disabled) {
+			pr_err("Error: Both iterm_disabled and iterm_ma set\n");
+			return -EINVAL;
+		}
+		rc = smb1351_iterm_set(chip, chip->iterm_ma);
+		if (rc) {
+			pr_err("Couldn't set iterm rc = %d\n", rc);
+			return rc;
+		}
+	} else  if (chip->iterm_disabled) {
+		rc = smb1351_masked_write(chip, CHG_CTRL_REG,
+					ITERM_EN_BIT, ITERM_DISABLE);
+		if (rc) {
+			pr_err("Couldn't set iterm rc = %d\n", rc);
+			return rc;
+		}
+	}
+
+	/* set recharge-threshold */
+	if (chip->recharge_mv != -EINVAL) {
+		if (chip->recharge_disabled) {
+			pr_err("Error: Both recharge_disabled and recharge_mv set\n");
+			return -EINVAL;
+		}
+
+		reg = AUTO_RECHG_ENABLE;
+		if (chip->recharge_mv > 50)
+			reg |= AUTO_RECHG_TH_100MV;
+		else
+			reg |= AUTO_RECHG_TH_50MV;
+
+		rc = smb1351_masked_write(chip, CHG_CTRL_REG,
+				AUTO_RECHG_BIT |
+				AUTO_RECHG_TH_BIT, reg);
+		if (rc) {
+			pr_err("Couldn't set rechg-cfg rc = %d\n", rc);
+			return rc;
+		}
+	} else if (chip->recharge_disabled) {
+		rc = smb1351_masked_write(chip, CHG_CTRL_REG,
+				AUTO_RECHG_BIT,
+				AUTO_RECHG_DISABLE);
+		if (rc) {
+			pr_err("Couldn't disable auto-rechg rc = %d\n", rc);
+			return rc;
+		}
+	}
+
+	/* enable/disable charging by suspending usb */
+	rc = smb1351_usb_suspend(chip, USER, chip->usb_suspended_status);
+	if (rc) {
+		pr_err("Unable to %s battery charging. rc=%d\n",
+			chip->usb_suspended_status ? "disable" : "enable",
+									rc);
+	}
+
+	return rc;
+}
+
+static enum power_supply_property smb1351_battery_properties[] = {
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_TEMP,
+	POWER_SUPPLY_PROP_TECHNOLOGY,
+	POWER_SUPPLY_PROP_MODEL_NAME,
+};
+
+static int smb1351_get_prop_batt_status(struct smb1351_charger *chip)
+{
+	int rc;
+	u8 reg = 0;
+
+	if (chip->batt_full)
+		return POWER_SUPPLY_STATUS_FULL;
+
+	rc = smb1351_read_reg(chip, STATUS_4_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read STATUS_4 rc = %d\n", rc);
+		return POWER_SUPPLY_STATUS_UNKNOWN;
+	}
+
+	pr_debug("STATUS_4_REG(0x3A)=%x\n", reg);
+
+	if (reg & STATUS_HOLD_OFF_BIT)
+		return POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+	if (reg & STATUS_CHG_MASK)
+		return POWER_SUPPLY_STATUS_CHARGING;
+
+	return POWER_SUPPLY_STATUS_DISCHARGING;
+}
+
+static int smb1351_get_prop_batt_present(struct smb1351_charger *chip)
+{
+	return !chip->battery_missing;
+}
+
+static int smb1351_get_prop_batt_capacity(struct smb1351_charger *chip)
+{
+	union power_supply_propval ret = {0, };
+
+	if (chip->fake_battery_soc >= 0)
+		return chip->fake_battery_soc;
+
+	if (chip->bms_psy) {
+		power_supply_get_property(chip->bms_psy,
+				POWER_SUPPLY_PROP_CAPACITY, &ret);
+		return ret.intval;
+	}
+	pr_debug("return DEFAULT_BATT_CAPACITY\n");
+	return DEFAULT_BATT_CAPACITY;
+}
+
+static int smb1351_get_prop_batt_temp(struct smb1351_charger *chip)
+{
+	union power_supply_propval ret = {0, };
+	int rc = 0;
+	struct qpnp_vadc_result results;
+
+	if (chip->bms_psy) {
+		power_supply_get_property(chip->bms_psy,
+				POWER_SUPPLY_PROP_TEMP, &ret);
+		return ret.intval;
+	}
+	if (chip->vadc_dev) {
+		rc = qpnp_vadc_read(chip->vadc_dev,
+				LR_MUX1_BATT_THERM, &results);
+		if (rc)
+			pr_debug("Unable to read adc batt temp rc=%d\n", rc);
+		else
+			return (int)results.physical;
+	}
+
+	pr_debug("return default temperature\n");
+	return DEFAULT_BATT_TEMP;
+}
+
+static int smb1351_get_prop_charge_type(struct smb1351_charger *chip)
+{
+	int rc;
+	u8 reg = 0;
+
+	rc = smb1351_read_reg(chip, STATUS_4_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read STATUS_4 rc = %d\n", rc);
+		return POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+	}
+
+	pr_debug("STATUS_4_REG(0x3A)=%x\n", reg);
+
+	reg &= STATUS_CHG_MASK;
+
+	if (reg == STATUS_FAST_CHARGING)
+		return POWER_SUPPLY_CHARGE_TYPE_FAST;
+	else if (reg == STATUS_TAPER_CHARGING)
+		return POWER_SUPPLY_CHARGE_TYPE_TAPER;
+	else if (reg == STATUS_PRE_CHARGING)
+		return POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+	else
+		return POWER_SUPPLY_CHARGE_TYPE_NONE;
+}
+
+static int smb1351_get_prop_batt_health(struct smb1351_charger *chip)
+{
+	union power_supply_propval ret = {0, };
+
+	if (chip->batt_hot)
+		ret.intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+	else if (chip->batt_cold)
+		ret.intval = POWER_SUPPLY_HEALTH_COLD;
+	else if (chip->batt_warm)
+		ret.intval = POWER_SUPPLY_HEALTH_WARM;
+	else if (chip->batt_cool)
+		ret.intval = POWER_SUPPLY_HEALTH_COOL;
+	else
+		ret.intval = POWER_SUPPLY_HEALTH_GOOD;
+
+	return ret.intval;
+}
+
+static int smb1351_set_usb_chg_current(struct smb1351_charger *chip,
+							int current_ma)
+{
+	int i, rc = 0;
+	u8 reg = 0, mask = 0;
+
+	pr_debug("USB current_ma = %d\n", current_ma);
+
+	if (chip->chg_autonomous_mode) {
+		pr_debug("Charger in autonomous mode\n");
+		return 0;
+	}
+
+	/* set suspend bit when urrent_ma <= 2 */
+	if (current_ma <= SUSPEND_CURRENT_MA) {
+		smb1351_usb_suspend(chip, CURRENT, true);
+		pr_debug("USB suspend\n");
+		return 0;
+	}
+
+	if (current_ma > SUSPEND_CURRENT_MA &&
+			current_ma < USB2_MIN_CURRENT_MA)
+		current_ma = USB2_MIN_CURRENT_MA;
+
+	if (current_ma == USB2_MIN_CURRENT_MA) {
+		/* USB 2.0 - 100mA */
+		reg = CMD_USB_2_MODE | CMD_USB_100_MODE;
+	} else if (current_ma == USB3_MIN_CURRENT_MA) {
+		/* USB 3.0 - 150mA */
+		reg = CMD_USB_3_MODE | CMD_USB_100_MODE;
+	} else if (current_ma == USB2_MAX_CURRENT_MA) {
+		/* USB 2.0 - 500mA */
+		reg = CMD_USB_2_MODE | CMD_USB_500_MODE;
+	} else if (current_ma == USB3_MAX_CURRENT_MA) {
+		/* USB 3.0 - 900mA */
+		reg = CMD_USB_3_MODE | CMD_USB_500_MODE;
+	} else if (current_ma > USB2_MAX_CURRENT_MA) {
+		/* HC mode  - if none of the above */
+		reg = CMD_USB_AC_MODE;
+
+		for (i = ARRAY_SIZE(usb_chg_current) - 1; i >= 0; i--) {
+			if (usb_chg_current[i] <= current_ma)
+				break;
+		}
+		if (i < 0)
+			i = 0;
+		rc = smb1351_masked_write(chip, CHG_CURRENT_CTRL_REG,
+						AC_INPUT_CURRENT_LIMIT_MASK, i);
+		if (rc) {
+			pr_err("Couldn't set input mA rc=%d\n", rc);
+			return rc;
+		}
+	}
+	/* control input current mode by command */
+	reg |= CMD_INPUT_CURRENT_MODE_CMD;
+	mask = CMD_INPUT_CURRENT_MODE_BIT | CMD_USB_2_3_SEL_BIT |
+		CMD_USB_1_5_AC_CTRL_MASK;
+	rc = smb1351_masked_write(chip, CMD_INPUT_LIMIT_REG, mask, reg);
+	if (rc) {
+		pr_err("Couldn't set charging mode rc = %d\n", rc);
+		return rc;
+	}
+
+	/* unset the suspend bit here */
+	smb1351_usb_suspend(chip, CURRENT, false);
+
+	return rc;
+}
+
+static int smb1351_batt_property_is_writeable(struct power_supply *psy,
+					enum power_supply_property psp)
+{
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+	case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED:
+	case POWER_SUPPLY_PROP_CAPACITY:
+		return 1;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int smb1351_battery_set_property(struct power_supply *psy,
+					enum power_supply_property prop,
+					const union power_supply_propval *val)
+{
+	int rc;
+	struct smb1351_charger *chip = power_supply_get_drvdata(psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_STATUS:
+		if (!chip->bms_controlled_charging)
+			return -EINVAL;
+		switch (val->intval) {
+		case POWER_SUPPLY_STATUS_FULL:
+			rc = smb1351_battchg_disable(chip, SOC, true);
+			if (rc) {
+				pr_err("Couldn't disable charging  rc = %d\n",
+									rc);
+			} else {
+				chip->batt_full = true;
+				pr_debug("status = FULL, batt_full = %d\n",
+							chip->batt_full);
+			}
+			break;
+		case POWER_SUPPLY_STATUS_DISCHARGING:
+			chip->batt_full = false;
+			power_supply_changed(chip->batt_psy);
+			pr_debug("status = DISCHARGING, batt_full = %d\n",
+							chip->batt_full);
+			break;
+		case POWER_SUPPLY_STATUS_CHARGING:
+			rc = smb1351_battchg_disable(chip, SOC, false);
+			if (rc) {
+				pr_err("Couldn't enable charging rc = %d\n",
+									rc);
+			} else {
+				chip->batt_full = false;
+				pr_debug("status = CHARGING, batt_full = %d\n",
+							chip->batt_full);
+			}
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		smb1351_usb_suspend(chip, USER, !val->intval);
+		break;
+	case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED:
+		smb1351_battchg_disable(chip, USER, !val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		chip->fake_battery_soc = val->intval;
+		power_supply_changed(chip->batt_psy);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int smb1351_battery_get_property(struct power_supply *psy,
+				       enum power_supply_property prop,
+				       union power_supply_propval *val)
+{
+	struct smb1351_charger *chip = power_supply_get_drvdata(psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_STATUS:
+		val->intval = smb1351_get_prop_batt_status(chip);
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		val->intval = smb1351_get_prop_batt_present(chip);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		val->intval = smb1351_get_prop_batt_capacity(chip);
+		break;
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		val->intval = !chip->usb_suspended_status;
+		break;
+	case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED:
+		val->intval = !chip->battchg_disabled_status;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		val->intval = smb1351_get_prop_charge_type(chip);
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		val->intval = smb1351_get_prop_batt_health(chip);
+		break;
+	case POWER_SUPPLY_PROP_TEMP:
+		val->intval = smb1351_get_prop_batt_temp(chip);
+		break;
+	case POWER_SUPPLY_PROP_TECHNOLOGY:
+		val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+		break;
+	case POWER_SUPPLY_PROP_MODEL_NAME:
+		val->strval = "smb1351";
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static enum power_supply_property smb1351_parallel_properties[] = {
+	POWER_SUPPLY_PROP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
+	POWER_SUPPLY_PROP_PARALLEL_MODE,
+};
+
+static int smb1351_parallel_set_chg_suspend(struct smb1351_charger *chip,
+						int suspend)
+{
+	int rc;
+	u8 reg, mask = 0;
+
+	if (chip->parallel_charger_suspended == suspend) {
+		pr_debug("Skip same state request suspended = %d suspend=%d\n",
+				chip->parallel_charger_suspended, !suspend);
+		return 0;
+	}
+
+	if (!suspend) {
+		rc = smb_chip_get_version(chip);
+		if (rc) {
+			pr_err("Couldn't get version rc = %d\n", rc);
+			return rc;
+		}
+
+		rc = smb1351_enable_volatile_writes(chip);
+		if (rc) {
+			pr_err("Couldn't configure for volatile rc = %d\n", rc);
+			return rc;
+		}
+
+		/* set the float voltage */
+		if (chip->vfloat_mv != -EINVAL) {
+			rc = smb1351_float_voltage_set(chip, chip->vfloat_mv);
+			if (rc) {
+				pr_err("Couldn't set float voltage rc = %d\n",
+									rc);
+				return rc;
+			}
+		}
+
+		/* set recharge-threshold and enable auto recharge */
+		if (chip->recharge_mv != -EINVAL) {
+			reg = AUTO_RECHG_ENABLE;
+			if (chip->recharge_mv > 50)
+				reg |= AUTO_RECHG_TH_100MV;
+			else
+				reg |= AUTO_RECHG_TH_50MV;
+
+			rc = smb1351_masked_write(chip, CHG_CTRL_REG,
+					AUTO_RECHG_BIT |
+					AUTO_RECHG_TH_BIT, reg);
+			if (rc) {
+				pr_err("Couldn't set rechg-cfg rc = %d\n", rc);
+				return rc;
+			}
+		}
+
+		/* control USB suspend via command bits */
+		rc = smb1351_masked_write(chip, VARIOUS_FUNC_REG,
+					APSD_EN_BIT | SUSPEND_MODE_CTRL_BIT,
+						SUSPEND_MODE_CTRL_BY_I2C);
+		if (rc) {
+			pr_err("Couldn't set USB suspend rc=%d\n", rc);
+			return rc;
+		}
+
+		/*
+		 * When present is being set force USB suspend, start charging
+		 * only when POWER_SUPPLY_PROP_CURRENT_MAX is set.
+		 */
+		rc = smb1351_usb_suspend(chip, CURRENT, true);
+		if (rc) {
+			pr_err("failed to suspend rc=%d\n", rc);
+			return rc;
+		}
+		chip->usb_psy_ma = SUSPEND_CURRENT_MA;
+
+		/* set chg en by pin active low  */
+		reg = chip->parallel_pin_polarity_setting | USBCS_CTRL_BY_I2C;
+		rc = smb1351_masked_write(chip, CHG_PIN_EN_CTRL_REG,
+					EN_PIN_CTRL_MASK | USBCS_CTRL_BIT, reg);
+		if (rc) {
+			pr_err("Couldn't set en pin rc=%d\n", rc);
+			return rc;
+		}
+
+		/*
+		 * setup USB 2.0/3.0 detection and USB 500/100
+		 * command polarity
+		 */
+		reg = USB_2_3_MODE_SEL_BY_I2C | USB_CMD_POLARITY_500_1_100_0;
+		mask = USB_2_3_MODE_SEL_BIT | USB_5_1_CMD_POLARITY_BIT;
+		rc = smb1351_masked_write(chip,
+				CHG_OTH_CURRENT_CTRL_REG, mask, reg);
+		if (rc) {
+			pr_err("Couldn't set CHG_OTH_CURRENT_CTRL_REG rc=%d\n",
+					rc);
+			return rc;
+		}
+
+		rc = smb1351_fastchg_current_set(chip,
+					chip->target_fastchg_current_max_ma);
+		if (rc) {
+			pr_err("Couldn't set fastchg current rc=%d\n", rc);
+			return rc;
+		}
+		chip->parallel_charger_suspended = false;
+	} else {
+		rc = smb1351_usb_suspend(chip, CURRENT, true);
+		if (rc)
+			pr_debug("failed to suspend rc=%d\n", rc);
+
+		chip->usb_psy_ma = SUSPEND_CURRENT_MA;
+		chip->parallel_charger_suspended = true;
+	}
+
+	return 0;
+}
+
+static int smb1351_get_closest_usb_setpoint(int val)
+{
+	int i;
+
+	for (i = ARRAY_SIZE(usb_chg_current) - 1; i >= 0; i--) {
+		if (usb_chg_current[i] <= val)
+			break;
+	}
+	if (i < 0)
+		i = 0;
+
+	if (i >= ARRAY_SIZE(usb_chg_current) - 1)
+		return ARRAY_SIZE(usb_chg_current) - 1;
+
+	/* check what is closer, i or i + 1 */
+	if (abs(usb_chg_current[i] - val) < abs(usb_chg_current[i + 1] - val))
+		return i;
+	else
+		return i + 1;
+}
+
+static bool smb1351_is_input_current_limited(struct smb1351_charger *chip)
+{
+	int rc;
+	u8 reg;
+
+	rc = smb1351_read_reg(chip, IRQ_H_REG, &reg);
+	if (rc) {
+		pr_err("Failed to read IRQ_H_REG for ICL status: %d\n", rc);
+		return false;
+	}
+
+	return !!(reg & IRQ_IC_LIMIT_STATUS_BIT);
+}
+
+static bool smb1351_is_usb_present(struct smb1351_charger *chip)
+{
+	int rc;
+	union power_supply_propval val = {0, };
+
+	if (!chip->usb_psy)
+		chip->usb_psy = power_supply_get_by_name("usb");
+	if (!chip->usb_psy) {
+		pr_err("USB psy not found\n");
+		return false;
+	}
+
+	rc = power_supply_get_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_ONLINE, &val);
+	if (rc < 0) {
+		pr_err("Failed to get present property rc=%d\n", rc);
+		return false;
+	}
+
+	if (val.intval)
+		return true;
+
+	return false;
+}
+
+static int smb1351_parallel_set_property(struct power_supply *psy,
+				       enum power_supply_property prop,
+				       const union power_supply_propval *val)
+{
+	int rc = 0, index;
+	struct smb1351_charger *chip = power_supply_get_drvdata(psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		/*
+		 *CHG EN is controlled by pin in the parallel charging.
+		 *Use suspend if disable charging by command.
+		 */
+		if (!chip->parallel_charger_suspended)
+			rc = smb1351_usb_suspend(chip, USER, !val->intval);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smb1351_parallel_set_chg_suspend(chip, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		chip->target_fastchg_current_max_ma =
+						val->intval / 1000;
+		if (!chip->parallel_charger_suspended)
+			rc = smb1351_fastchg_current_set(chip,
+					chip->target_fastchg_current_max_ma);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		index = smb1351_get_closest_usb_setpoint(val->intval / 1000);
+		chip->usb_psy_ma = usb_chg_current[index];
+		if (!chip->parallel_charger_suspended)
+			rc = smb1351_set_usb_chg_current(chip,
+						chip->usb_psy_ma);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		chip->vfloat_mv = val->intval / 1000;
+		if (!chip->parallel_charger_suspended)
+			rc = smb1351_float_voltage_set(chip, val->intval);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return rc;
+}
+
+static int smb1351_parallel_is_writeable(struct power_supply *psy,
+				       enum power_supply_property prop)
+{
+	switch (prop) {
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+static int smb1351_parallel_get_property(struct power_supply *psy,
+				       enum power_supply_property prop,
+				       union power_supply_propval *val)
+{
+	struct smb1351_charger *chip = power_supply_get_drvdata(psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		val->intval = !chip->usb_suspended_status;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		if (!chip->parallel_charger_suspended)
+			val->intval = chip->usb_psy_ma * 1000;
+		else
+			val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		if (!chip->parallel_charger_suspended)
+			val->intval = chip->vfloat_mv;
+		else
+			val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+		/* Check if SMB1351 is present */
+		if (smb1351_is_usb_present(chip)) {
+			val->intval = smb1351_get_prop_charge_type(chip);
+			if (val->intval == POWER_SUPPLY_CHARGE_TYPE_UNKNOWN) {
+				pr_debug("Failed to charge type, charger may be absent\n");
+				return -ENODEV;
+			}
+		}
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		if (!chip->parallel_charger_suspended)
+			val->intval = chip->fastchg_current_max_ma * 1000;
+		else
+			val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_STATUS:
+		if (!chip->parallel_charger_suspended)
+			val->intval = smb1351_get_prop_batt_status(chip);
+		else
+			val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+		if (!chip->parallel_charger_suspended)
+			val->intval =
+				smb1351_is_input_current_limited(chip) ? 1 : 0;
+		else
+			val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_PARALLEL_MODE:
+		val->intval = chip->parallel_mode;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void smb1351_chg_set_appropriate_battery_current(
+				struct smb1351_charger *chip)
+{
+	int rc;
+	unsigned int current_max = chip->target_fastchg_current_max_ma;
+
+	if (chip->batt_cool)
+		current_max = min(current_max, chip->batt_cool_ma);
+	if (chip->batt_warm)
+		current_max = min(current_max, chip->batt_warm_ma);
+
+	pr_debug("setting %dmA", current_max);
+
+	rc = smb1351_fastchg_current_set(chip, current_max);
+	if (rc)
+		pr_err("Couldn't set charging current rc = %d\n", rc);
+}
+
+static void smb1351_chg_set_appropriate_vddmax(struct smb1351_charger *chip)
+{
+	int rc;
+	unsigned int vddmax = chip->vfloat_mv;
+
+	if (chip->batt_cool)
+		vddmax = min(vddmax, chip->batt_cool_mv);
+	if (chip->batt_warm)
+		vddmax = min(vddmax, chip->batt_warm_mv);
+
+	pr_debug("setting %dmV\n", vddmax);
+
+	rc = smb1351_float_voltage_set(chip, vddmax);
+	if (rc)
+		pr_err("Couldn't set float voltage rc = %d\n", rc);
+}
+
+static void smb1351_chg_ctrl_in_jeita(struct smb1351_charger *chip)
+{
+	union power_supply_propval ret = {0, };
+	int rc;
+
+	/* enable the iterm to prevent the reverse boost */
+	if (chip->iterm_disabled) {
+		if (chip->batt_cool || chip->batt_warm) {
+			rc = smb1351_iterm_set(chip, 100);
+			pr_debug("set the iterm due to JEITA\n");
+		} else {
+			rc = smb1351_masked_write(chip, CHG_CTRL_REG,
+						ITERM_EN_BIT, ITERM_DISABLE);
+			pr_debug("disable the iterm when exits warm/cool\n");
+		}
+		if (rc) {
+			pr_err("Couldn't set iterm rc = %d\n", rc);
+			return;
+		}
+	}
+	/*
+	 * When JEITA back to normal, the charging maybe disabled due to
+	 * the current termination. So re-enable the charging if the soc
+	 * is less than 100 in the normal mode. A 200ms delay is required
+	 * before the disabe and enable operation.
+	 */
+	if (chip->bms_psy) {
+		rc = power_supply_get_property(chip->bms_psy,
+				POWER_SUPPLY_PROP_CAPACITY, &ret);
+		if (rc) {
+			pr_err("Couldn't read the bms capacity rc = %d\n",
+									rc);
+			return;
+		}
+		if (!chip->batt_cool && !chip->batt_warm
+				&& !chip->batt_cold && !chip->batt_hot
+				&& ret.intval < 100) {
+			rc = smb1351_battchg_disable(chip, THERMAL, true);
+			if (rc) {
+				pr_err("Couldn't disable charging rc = %d\n",
+									rc);
+				return;
+			}
+			/* delay for resetting the charging */
+			msleep(200);
+			rc = smb1351_battchg_disable(chip, THERMAL, false);
+			if (rc) {
+				pr_err("Couldn't enable charging rc = %d\n",
+									rc);
+				return;
+			}
+
+			chip->batt_full = false;
+			pr_debug("re-enable charging, batt_full = %d\n",
+						chip->batt_full);
+			power_supply_changed(chip->batt_psy);
+		}
+	}
+}
+
+#define HYSTERESIS_DECIDEGC 20
+static void smb1351_chg_adc_notification(enum qpnp_tm_state state, void *ctx)
+{
+	struct smb1351_charger *chip = ctx;
+	struct battery_status *cur = NULL;
+	int temp;
+
+	if (state >= ADC_TM_STATE_NUM) {
+		pr_err("invalid state parameter %d\n", state);
+		return;
+	}
+
+	temp = smb1351_get_prop_batt_temp(chip);
+
+	pr_debug("temp = %d state = %s\n", temp,
+				state == ADC_TM_WARM_STATE ? "hot" : "cold");
+
+	/* reset the adc status request */
+	chip->adc_param.state_request = ADC_TM_WARM_COOL_THR_ENABLE;
+
+	/* temp from low to high */
+	if (state == ADC_TM_WARM_STATE) {
+		/* WARM -> HOT */
+		if (temp >= chip->batt_hot_decidegc) {
+			cur = &batt_s[BATT_HOT];
+			chip->adc_param.low_temp =
+				chip->batt_hot_decidegc - HYSTERESIS_DECIDEGC;
+			chip->adc_param.state_request =	ADC_TM_COOL_THR_ENABLE;
+		/* NORMAL -> WARM */
+		} else if (temp >= chip->batt_warm_decidegc &&
+					chip->jeita_supported) {
+			cur = &batt_s[BATT_WARM];
+			chip->adc_param.low_temp =
+				chip->batt_warm_decidegc - HYSTERESIS_DECIDEGC;
+			chip->adc_param.high_temp = chip->batt_hot_decidegc;
+		/* COOL -> NORMAL */
+		} else if (temp >= chip->batt_cool_decidegc &&
+					chip->jeita_supported) {
+			cur = &batt_s[BATT_NORMAL];
+			chip->adc_param.low_temp =
+				chip->batt_cool_decidegc - HYSTERESIS_DECIDEGC;
+			chip->adc_param.high_temp = chip->batt_warm_decidegc;
+		/* COLD -> COOL */
+		} else if (temp >= chip->batt_cold_decidegc) {
+			cur = &batt_s[BATT_COOL];
+			chip->adc_param.low_temp =
+				chip->batt_cold_decidegc - HYSTERESIS_DECIDEGC;
+			if (chip->jeita_supported)
+				chip->adc_param.high_temp =
+						chip->batt_cool_decidegc;
+			else
+				chip->adc_param.high_temp =
+						chip->batt_hot_decidegc;
+		/* MISSING -> COLD */
+		} else if (temp >= chip->batt_missing_decidegc) {
+			cur = &batt_s[BATT_COLD];
+			chip->adc_param.high_temp = chip->batt_cold_decidegc;
+			chip->adc_param.low_temp = chip->batt_missing_decidegc
+							- HYSTERESIS_DECIDEGC;
+		}
+	/* temp from high to low */
+	} else {
+		/* COLD -> MISSING */
+		if (temp <= chip->batt_missing_decidegc) {
+			cur = &batt_s[BATT_MISSING];
+			chip->adc_param.high_temp = chip->batt_missing_decidegc
+							+ HYSTERESIS_DECIDEGC;
+			chip->adc_param.state_request = ADC_TM_WARM_THR_ENABLE;
+		/* COOL -> COLD */
+		} else if (temp <= chip->batt_cold_decidegc) {
+			cur = &batt_s[BATT_COLD];
+			chip->adc_param.high_temp =
+				chip->batt_cold_decidegc + HYSTERESIS_DECIDEGC;
+			/* add low_temp to enable batt present check */
+			chip->adc_param.low_temp = chip->batt_missing_decidegc;
+		/* NORMAL -> COOL */
+		} else if (temp <= chip->batt_cool_decidegc &&
+					chip->jeita_supported) {
+			cur = &batt_s[BATT_COOL];
+			chip->adc_param.high_temp =
+				chip->batt_cool_decidegc + HYSTERESIS_DECIDEGC;
+			chip->adc_param.low_temp = chip->batt_cold_decidegc;
+		/* WARM -> NORMAL */
+		} else if (temp <= chip->batt_warm_decidegc &&
+					chip->jeita_supported) {
+			cur = &batt_s[BATT_NORMAL];
+			chip->adc_param.high_temp =
+				chip->batt_warm_decidegc + HYSTERESIS_DECIDEGC;
+			chip->adc_param.low_temp = chip->batt_cool_decidegc;
+		/* HOT -> WARM */
+		} else if (temp <= chip->batt_hot_decidegc) {
+			cur = &batt_s[BATT_WARM];
+			if (chip->jeita_supported)
+				chip->adc_param.low_temp =
+					chip->batt_warm_decidegc;
+			else
+				chip->adc_param.low_temp =
+					chip->batt_cold_decidegc;
+			chip->adc_param.high_temp =
+				chip->batt_hot_decidegc + HYSTERESIS_DECIDEGC;
+		}
+	}
+
+	if (!cur) {
+		pr_debug("Couldn't choose batt state, adc state=%d and temp=%d\n",
+			state, temp);
+		return;
+	}
+
+	if (cur->batt_present)
+		chip->battery_missing = false;
+	else
+		chip->battery_missing = true;
+
+	if (cur->batt_hot ^ chip->batt_hot ||
+			cur->batt_cold ^ chip->batt_cold) {
+		chip->batt_hot = cur->batt_hot;
+		chip->batt_cold = cur->batt_cold;
+		/* stop charging explicitly since we use PMIC thermal pin*/
+		if (cur->batt_hot || cur->batt_cold ||
+							chip->battery_missing)
+			smb1351_battchg_disable(chip, THERMAL, 1);
+		else
+			smb1351_battchg_disable(chip, THERMAL, 0);
+	}
+
+	if ((chip->batt_warm ^ cur->batt_warm ||
+				chip->batt_cool ^ cur->batt_cool)
+						&& chip->jeita_supported) {
+		chip->batt_warm = cur->batt_warm;
+		chip->batt_cool = cur->batt_cool;
+		smb1351_chg_set_appropriate_battery_current(chip);
+		smb1351_chg_set_appropriate_vddmax(chip);
+		smb1351_chg_ctrl_in_jeita(chip);
+	}
+
+	pr_debug("hot %d, cold %d, warm %d, cool %d, soft jeita supported %d, missing %d, low = %d deciDegC, high = %d deciDegC\n",
+		chip->batt_hot, chip->batt_cold, chip->batt_warm,
+		chip->batt_cool, chip->jeita_supported,
+		chip->battery_missing, chip->adc_param.low_temp,
+		chip->adc_param.high_temp);
+	if (qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->adc_param))
+		pr_err("request ADC error\n");
+}
+
+static int rerun_apsd(struct smb1351_charger *chip)
+{
+	int rc;
+
+	pr_debug("Reruning APSD\nDisabling APSD\n");
+
+	rc = smb1351_masked_write(chip, CMD_HVDCP_REG, CMD_APSD_RE_RUN_BIT,
+						CMD_APSD_RE_RUN_BIT);
+	if (rc)
+		pr_err("Couldn't re-run APSD algo\n");
+
+	return 0;
+}
+
+static void smb1351_hvdcp_det_work(struct work_struct *work)
+{
+	int rc;
+	u8 reg;
+	union power_supply_propval pval = {0, };
+	struct smb1351_charger *chip = container_of(work,
+						struct smb1351_charger,
+						hvdcp_det_work.work);
+
+	rc = smb1351_read_reg(chip, STATUS_7_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read STATUS_7_REG rc == %d\n", rc);
+		goto end;
+	}
+	pr_debug("STATUS_7_REG = 0x%02X\n", reg);
+
+	if (reg) {
+		pr_debug("HVDCP detected; notifying USB PSY\n");
+		pval.intval = POWER_SUPPLY_TYPE_USB_HVDCP;
+		power_supply_set_property(chip->usb_psy,
+			POWER_SUPPLY_PROP_TYPE, &pval);
+	}
+end:
+	pm_relax(chip->dev);
+}
+
+#define HVDCP_NOTIFY_MS 2500
+static int smb1351_apsd_complete_handler(struct smb1351_charger *chip,
+						u8 status)
+{
+	int rc;
+	u8 reg = 0;
+	union power_supply_propval prop = {0, };
+	enum power_supply_type type = POWER_SUPPLY_TYPE_UNKNOWN;
+
+	/*
+	 * If apsd is disabled, charger detection is done by
+	 * USB phy driver.
+	 */
+	if (chip->disable_apsd || chip->usbin_ov) {
+		pr_debug("APSD %s, status = %d\n",
+			chip->disable_apsd ? "disabled" : "enabled", !!status);
+		pr_debug("USBIN ov, status = %d\n", chip->usbin_ov);
+		return 0;
+	}
+
+	rc = smb1351_read_reg(chip, STATUS_5_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read STATUS_5 rc = %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("STATUS_5_REG(0x3B)=%x\n", reg);
+
+	switch (reg) {
+	case STATUS_PORT_ACA_DOCK:
+	case STATUS_PORT_ACA_C:
+	case STATUS_PORT_ACA_B:
+	case STATUS_PORT_ACA_A:
+		type = POWER_SUPPLY_TYPE_USB_ACA;
+		break;
+	case STATUS_PORT_CDP:
+		type = POWER_SUPPLY_TYPE_USB_CDP;
+		break;
+	case STATUS_PORT_DCP:
+		type = POWER_SUPPLY_TYPE_USB_DCP;
+		break;
+	case STATUS_PORT_SDP:
+		type = POWER_SUPPLY_TYPE_USB;
+		break;
+	case STATUS_PORT_OTHER:
+		type = POWER_SUPPLY_TYPE_USB_DCP;
+		break;
+	default:
+		type = POWER_SUPPLY_TYPE_USB;
+		break;
+	}
+
+	if (status) {
+		chip->chg_present = true;
+		pr_debug("APSD complete. USB type detected=%d chg_present=%d\n",
+						type, chip->chg_present);
+		if (!chip->battery_missing && !chip->apsd_rerun) {
+			if (type == POWER_SUPPLY_TYPE_USB) {
+				pr_debug("Setting usb psy dp=f dm=f SDP and rerun\n");
+				prop.intval = POWER_SUPPLY_DP_DM_DPF_DMF;
+				power_supply_set_property(chip->usb_psy,
+						POWER_SUPPLY_PROP_DP_DM, &prop);
+				chip->apsd_rerun = true;
+				rerun_apsd(chip);
+				return 0;
+			}
+			pr_debug("Set usb psy dp=f dm=f DCP and no rerun\n");
+			prop.intval = POWER_SUPPLY_DP_DM_DPF_DMF;
+			power_supply_set_property(chip->usb_psy,
+					POWER_SUPPLY_PROP_DP_DM, &prop);
+		}
+		/*
+		 * If defined force hvdcp 2p0 property,
+		 * we force to hvdcp 2p0 in the APSD handler.
+		 */
+		if (chip->force_hvdcp_2p0) {
+			pr_debug("Force set to HVDCP 2.0 mode\n");
+			smb1351_masked_write(chip, VARIOUS_FUNC_3_REG,
+						QC_2P1_AUTH_ALGO_BIT, 0);
+			smb1351_masked_write(chip, CMD_HVDCP_REG,
+						CMD_FORCE_HVDCP_2P0_BIT,
+						CMD_FORCE_HVDCP_2P0_BIT);
+			type = POWER_SUPPLY_TYPE_USB_HVDCP;
+		} else if (type == POWER_SUPPLY_TYPE_USB_DCP) {
+			pr_debug("schedule hvdcp detection worker\n");
+			pm_stay_awake(chip->dev);
+			schedule_delayed_work(&chip->hvdcp_det_work,
+					msecs_to_jiffies(HVDCP_NOTIFY_MS));
+		}
+
+		prop.intval = type;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_TYPE, &prop);
+		/*
+		 * SMB is now done sampling the D+/D- lines,
+		 * indicate USB driver
+		 */
+		pr_debug("updating usb_psy present=%d\n", chip->chg_present);
+		prop.intval = chip->chg_present;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_PRESENT,
+				&prop);
+		chip->apsd_rerun = false;
+	} else if (!chip->apsd_rerun) {
+		/* Handle Charger removal */
+		prop.intval = POWER_SUPPLY_TYPE_UNKNOWN;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_TYPE, &prop);
+
+		chip->chg_present = false;
+		prop.intval = chip->chg_present;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_PRESENT,
+				&prop);
+
+		pr_debug("Set usb psy dm=r df=r\n");
+		prop.intval = POWER_SUPPLY_DP_DM_DPR_DMR;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_DP_DM, &prop);
+	}
+
+	return 0;
+}
+
+/*
+ * As source detect interrupt is not triggered on the falling edge,
+ * we need to schedule a work for checking source detect status after
+ * charger UV interrupt fired.
+ */
+#define FIRST_CHECK_DELAY	100
+#define SECOND_CHECK_DELAY	1000
+static void smb1351_chg_remove_work(struct work_struct *work)
+{
+	int rc;
+	u8 reg;
+	struct smb1351_charger *chip = container_of(work,
+				struct smb1351_charger, chg_remove_work.work);
+
+	rc = smb1351_read_reg(chip, IRQ_G_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read IRQ_G_REG rc = %d\n", rc);
+		goto end;
+	}
+
+	if (!(reg & IRQ_SOURCE_DET_BIT)) {
+		pr_debug("chg removed\n");
+		smb1351_apsd_complete_handler(chip, 0);
+	} else if (!chip->chg_remove_work_scheduled) {
+		chip->chg_remove_work_scheduled = true;
+		goto reschedule;
+	} else {
+		pr_debug("charger is present\n");
+	}
+end:
+	chip->chg_remove_work_scheduled = false;
+	pm_relax(chip->dev);
+	return;
+
+reschedule:
+	pr_debug("reschedule after 1s\n");
+	schedule_delayed_work(&chip->chg_remove_work,
+				msecs_to_jiffies(SECOND_CHECK_DELAY));
+}
+
+static int smb1351_usbin_uv_handler(struct smb1351_charger *chip, u8 status)
+{
+	union power_supply_propval pval = {0, };
+
+	/* use this to detect USB insertion only if !apsd */
+	if (chip->disable_apsd) {
+		/*
+		 * If APSD is disabled, src det interrupt won't trigger.
+		 * Hence use usbin_uv for removal and insertion notification
+		 */
+		if (status == 0) {
+			chip->chg_present = true;
+			pr_debug("updating usb_psy present=%d\n",
+						chip->chg_present);
+			pval.intval = POWER_SUPPLY_TYPE_USB;
+			power_supply_set_property(chip->usb_psy,
+					POWER_SUPPLY_PROP_TYPE, &pval);
+
+			pval.intval = chip->chg_present;
+			power_supply_set_property(chip->usb_psy,
+					POWER_SUPPLY_PROP_PRESENT,
+					&pval);
+		} else {
+			chip->chg_present = false;
+
+			pval.intval = POWER_SUPPLY_TYPE_UNKNOWN;
+			power_supply_set_property(chip->usb_psy,
+					POWER_SUPPLY_PROP_TYPE, &pval);
+
+			pr_debug("updating usb_psy present=%d\n",
+							chip->chg_present);
+			pval.intval = chip->chg_present;
+			power_supply_set_property(chip->usb_psy,
+					POWER_SUPPLY_PROP_PRESENT,
+					&pval);
+		}
+		return 0;
+	}
+
+	if (status) {
+		cancel_delayed_work_sync(&chip->hvdcp_det_work);
+		pm_relax(chip->dev);
+		pr_debug("schedule charger remove worker\n");
+		schedule_delayed_work(&chip->chg_remove_work,
+					msecs_to_jiffies(FIRST_CHECK_DELAY));
+		pm_stay_awake(chip->dev);
+	}
+
+	pr_debug("chip->chg_present = %d\n", chip->chg_present);
+
+	return 0;
+}
+
+static int smb1351_usbin_ov_handler(struct smb1351_charger *chip, u8 status)
+{
+	int rc;
+	u8 reg;
+	union power_supply_propval pval = {0, };
+
+	rc = smb1351_read_reg(chip, IRQ_E_REG, &reg);
+	if (rc)
+		pr_err("Couldn't read IRQ_E rc = %d\n", rc);
+
+	if (status != 0) {
+		chip->chg_present = false;
+		chip->usbin_ov = true;
+
+		pval.intval = POWER_SUPPLY_TYPE_UNKNOWN;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_TYPE, &pval);
+
+		pval.intval = chip->chg_present;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_PRESENT,
+				&pval);
+	} else {
+		chip->usbin_ov = false;
+		if (reg & IRQ_USBIN_UV_BIT)
+			pr_debug("Charger unplugged from OV\n");
+		else
+			smb1351_apsd_complete_handler(chip, 1);
+	}
+
+	if (chip->usb_psy) {
+		pval.intval = status ? POWER_SUPPLY_HEALTH_OVERVOLTAGE
+					: POWER_SUPPLY_HEALTH_GOOD;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_HEALTH, &pval);
+		pr_debug("chip ov status is %d\n", pval.intval);
+	}
+	pr_debug("chip->chg_present = %d\n", chip->chg_present);
+
+	return 0;
+}
+
+static int smb1351_fast_chg_handler(struct smb1351_charger *chip, u8 status)
+{
+	pr_debug("enter\n");
+	return 0;
+}
+
+static int smb1351_chg_term_handler(struct smb1351_charger *chip, u8 status)
+{
+	pr_debug("enter\n");
+	if (!chip->bms_controlled_charging)
+		chip->batt_full = !!status;
+	return 0;
+}
+
+static int smb1351_safety_timeout_handler(struct smb1351_charger *chip,
+						u8 status)
+{
+	pr_debug("safety_timeout triggered\n");
+	return 0;
+}
+
+static int smb1351_aicl_done_handler(struct smb1351_charger *chip, u8 status)
+{
+	pr_debug("aicl_done triggered\n");
+	return 0;
+}
+
+static int smb1351_hot_hard_handler(struct smb1351_charger *chip, u8 status)
+{
+	pr_debug("status = 0x%02x\n", status);
+	chip->batt_hot = !!status;
+	return 0;
+}
+static int smb1351_cold_hard_handler(struct smb1351_charger *chip, u8 status)
+{
+	pr_debug("status = 0x%02x\n", status);
+	chip->batt_cold = !!status;
+	return 0;
+}
+static int smb1351_hot_soft_handler(struct smb1351_charger *chip, u8 status)
+{
+	pr_debug("status = 0x%02x\n", status);
+	chip->batt_warm = !!status;
+	return 0;
+}
+static int smb1351_cold_soft_handler(struct smb1351_charger *chip, u8 status)
+{
+	pr_debug("status = 0x%02x\n", status);
+	chip->batt_cool = !!status;
+	return 0;
+}
+
+static int smb1351_battery_missing_handler(struct smb1351_charger *chip,
+						u8 status)
+{
+	if (status)
+		chip->battery_missing = true;
+	else
+		chip->battery_missing = false;
+
+	return 0;
+}
+
+static struct irq_handler_info handlers[] = {
+	[0] = {
+		.stat_reg	= IRQ_A_REG,
+		.val		= 0,
+		.prev_val	= 0,
+		.irq_info	= {
+			{	.name	 = "cold_soft",
+				.smb_irq = smb1351_cold_soft_handler,
+			},
+			{	.name	 = "hot_soft",
+				.smb_irq = smb1351_hot_soft_handler,
+			},
+			{	.name	 = "cold_hard",
+				.smb_irq = smb1351_cold_hard_handler,
+			},
+			{	.name	 = "hot_hard",
+				.smb_irq = smb1351_hot_hard_handler,
+			},
+		},
+	},
+	[1] = {
+		.stat_reg	= IRQ_B_REG,
+		.val		= 0,
+		.prev_val	= 0,
+		.irq_info	= {
+			{	.name	 = "internal_temp_limit",
+			},
+			{	.name	 = "vbatt_low",
+			},
+			{	.name	 = "battery_missing",
+				.smb_irq = smb1351_battery_missing_handler,
+			},
+			{	.name	 = "batt_therm_removed",
+			},
+		},
+	},
+	[2] = {
+		.stat_reg	= IRQ_C_REG,
+		.val		= 0,
+		.prev_val	= 0,
+		.irq_info	= {
+			{	.name	 = "chg_term",
+				.smb_irq = smb1351_chg_term_handler,
+			},
+			{	.name	 = "taper",
+			},
+			{	.name	 = "recharge",
+			},
+			{	.name	 = "fast_chg",
+				.smb_irq = smb1351_fast_chg_handler,
+			},
+		},
+	},
+	[3] = {
+		.stat_reg	= IRQ_D_REG,
+		.val		= 0,
+		.prev_val	= 0,
+		.irq_info	= {
+			{	.name	 = "prechg_timeout",
+			},
+			{	.name	 = "safety_timeout",
+				.smb_irq = smb1351_safety_timeout_handler,
+			},
+			{	.name	 = "chg_error",
+			},
+			{	.name	 = "batt_ov",
+			},
+		},
+	},
+	[4] = {
+		.stat_reg	= IRQ_E_REG,
+		.val		= 0,
+		.prev_val	= 0,
+		.irq_info	= {
+			{	.name	 = "power_ok",
+			},
+			{	.name	 = "afvc",
+			},
+			{	.name	 = "usbin_uv",
+				.smb_irq = smb1351_usbin_uv_handler,
+			},
+			{	.name	 = "usbin_ov",
+				.smb_irq = smb1351_usbin_ov_handler,
+			},
+		},
+	},
+	[5] = {
+		.stat_reg	= IRQ_F_REG,
+		.val		= 0,
+		.prev_val	= 0,
+		.irq_info	= {
+			{	.name	 = "otg_oc_retry",
+			},
+			{	.name	 = "rid",
+			},
+			{	.name	 = "otg_fail",
+			},
+			{	.name	 = "otg_oc",
+			},
+		},
+	},
+	[6] = {
+		.stat_reg	= IRQ_G_REG,
+		.val		= 0,
+		.prev_val	= 0,
+		.irq_info	= {
+			{	.name	 = "chg_inhibit",
+			},
+			{	.name	 = "aicl_fail",
+			},
+			{	.name	 = "aicl_done",
+				.smb_irq = smb1351_aicl_done_handler,
+			},
+			{	.name	 = "apsd_complete",
+				.smb_irq = smb1351_apsd_complete_handler,
+			},
+		},
+	},
+	[7] = {
+		.stat_reg	= IRQ_H_REG,
+		.val		= 0,
+		.prev_val	= 0,
+		.irq_info	= {
+			{	.name	 = "wdog_timeout",
+			},
+			{	.name	 = "hvdcp_auth_done",
+			},
+		},
+	},
+};
+
+#define IRQ_LATCHED_MASK	0x02
+#define IRQ_STATUS_MASK		0x01
+#define BITS_PER_IRQ		2
+static irqreturn_t smb1351_chg_stat_handler(int irq, void *dev_id)
+{
+	struct smb1351_charger *chip = dev_id;
+	int i, j;
+	u8 triggered;
+	u8 changed;
+	u8 rt_stat, prev_rt_stat;
+	int rc;
+	int handler_count = 0;
+
+	mutex_lock(&chip->irq_complete);
+
+	chip->irq_waiting = true;
+	if (!chip->resume_completed) {
+		pr_debug("IRQ triggered before device-resume\n");
+		disable_irq_nosync(irq);
+		mutex_unlock(&chip->irq_complete);
+		return IRQ_HANDLED;
+	}
+	chip->irq_waiting = false;
+
+	for (i = 0; i < ARRAY_SIZE(handlers); i++) {
+		rc = smb1351_read_reg(chip, handlers[i].stat_reg,
+						&handlers[i].val);
+		if (rc) {
+			pr_err("Couldn't read %d rc = %d\n",
+					handlers[i].stat_reg, rc);
+			continue;
+		}
+
+		for (j = 0; j < ARRAY_SIZE(handlers[i].irq_info); j++) {
+			triggered = handlers[i].val
+			       & (IRQ_LATCHED_MASK << (j * BITS_PER_IRQ));
+			rt_stat = handlers[i].val
+				& (IRQ_STATUS_MASK << (j * BITS_PER_IRQ));
+			prev_rt_stat = handlers[i].prev_val
+				& (IRQ_STATUS_MASK << (j * BITS_PER_IRQ));
+			changed = prev_rt_stat ^ rt_stat;
+
+			if (triggered || changed)
+				rt_stat ? handlers[i].irq_info[j].high++ :
+						handlers[i].irq_info[j].low++;
+
+			if ((triggered || changed)
+				&& handlers[i].irq_info[j].smb_irq != NULL) {
+				handler_count++;
+				rc = handlers[i].irq_info[j].smb_irq(chip,
+								rt_stat);
+				if (rc)
+					pr_err("Couldn't handle %d irq for reg 0x%02x rc = %d\n",
+						j, handlers[i].stat_reg, rc);
+			}
+		}
+		handlers[i].prev_val = handlers[i].val;
+	}
+
+	pr_debug("handler count = %d\n", handler_count);
+	if (handler_count) {
+		pr_debug("batt psy changed\n");
+		power_supply_changed(chip->batt_psy);
+	}
+
+	mutex_unlock(&chip->irq_complete);
+
+	return IRQ_HANDLED;
+}
+
+static void smb1351_external_power_changed(struct power_supply *psy)
+{
+	struct smb1351_charger *chip = power_supply_get_drvdata(psy);
+	union power_supply_propval prop = {0,};
+	int rc, current_limit = 0, online = 0;
+
+	if (chip->bms_psy_name)
+		chip->bms_psy =
+			power_supply_get_by_name((char *)chip->bms_psy_name);
+
+	rc = power_supply_get_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_ONLINE, &prop);
+	if (rc)
+		pr_err("Couldn't read USB online property, rc=%d\n", rc);
+	else
+		online = prop.intval;
+
+	rc = power_supply_get_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_CURRENT_MAX, &prop);
+	if (rc)
+		pr_err("Couldn't read USB current_max property, rc=%d\n", rc);
+	else
+		current_limit = prop.intval / 1000;
+
+	pr_debug("online = %d, current_limit = %d\n", online, current_limit);
+
+	smb1351_enable_volatile_writes(chip);
+	smb1351_set_usb_chg_current(chip, current_limit);
+
+	pr_debug("updating batt psy\n");
+}
+
+#define LAST_CNFG_REG	0x16
+static int show_cnfg_regs(struct seq_file *m, void *data)
+{
+	struct smb1351_charger *chip = m->private;
+	int rc;
+	u8 reg;
+	u8 addr;
+
+	for (addr = 0; addr <= LAST_CNFG_REG; addr++) {
+		rc = smb1351_read_reg(chip, addr, &reg);
+		if (!rc)
+			seq_printf(m, "0x%02x = 0x%02x\n", addr, reg);
+	}
+
+	return 0;
+}
+
+static int cnfg_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct smb1351_charger *chip = inode->i_private;
+
+	return single_open(file, show_cnfg_regs, chip);
+}
+
+static const struct file_operations cnfg_debugfs_ops = {
+	.owner		= THIS_MODULE,
+	.open		= cnfg_debugfs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#define FIRST_CMD_REG	0x30
+#define LAST_CMD_REG	0x34
+static int show_cmd_regs(struct seq_file *m, void *data)
+{
+	struct smb1351_charger *chip = m->private;
+	int rc;
+	u8 reg;
+	u8 addr;
+
+	for (addr = FIRST_CMD_REG; addr <= LAST_CMD_REG; addr++) {
+		rc = smb1351_read_reg(chip, addr, &reg);
+		if (!rc)
+			seq_printf(m, "0x%02x = 0x%02x\n", addr, reg);
+	}
+
+	return 0;
+}
+
+static int cmd_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct smb1351_charger *chip = inode->i_private;
+
+	return single_open(file, show_cmd_regs, chip);
+}
+
+static const struct file_operations cmd_debugfs_ops = {
+	.owner		= THIS_MODULE,
+	.open		= cmd_debugfs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#define FIRST_STATUS_REG	0x36
+#define LAST_STATUS_REG		0x3F
+static int show_status_regs(struct seq_file *m, void *data)
+{
+	struct smb1351_charger *chip = m->private;
+	int rc;
+	u8 reg;
+	u8 addr;
+
+	for (addr = FIRST_STATUS_REG; addr <= LAST_STATUS_REG; addr++) {
+		rc = smb1351_read_reg(chip, addr, &reg);
+		if (!rc)
+			seq_printf(m, "0x%02x = 0x%02x\n", addr, reg);
+	}
+
+	return 0;
+}
+
+static int status_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct smb1351_charger *chip = inode->i_private;
+
+	return single_open(file, show_status_regs, chip);
+}
+
+static const struct file_operations status_debugfs_ops = {
+	.owner		= THIS_MODULE,
+	.open		= status_debugfs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int show_irq_count(struct seq_file *m, void *data)
+{
+	int i, j, total = 0;
+
+	for (i = 0; i < ARRAY_SIZE(handlers); i++)
+		for (j = 0; j < 4; j++) {
+			seq_printf(m, "%s=%d\t(high=%d low=%d)\n",
+						handlers[i].irq_info[j].name,
+						handlers[i].irq_info[j].high
+						+ handlers[i].irq_info[j].low,
+						handlers[i].irq_info[j].high,
+						handlers[i].irq_info[j].low);
+			total += (handlers[i].irq_info[j].high
+					+ handlers[i].irq_info[j].low);
+		}
+
+	seq_printf(m, "\n\tTotal = %d\n", total);
+
+	return 0;
+}
+
+static int irq_count_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct smb1351_charger *chip = inode->i_private;
+
+	return single_open(file, show_irq_count, chip);
+}
+
+static const struct file_operations irq_count_debugfs_ops = {
+	.owner		= THIS_MODULE,
+	.open		= irq_count_debugfs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int get_reg(void *data, u64 *val)
+{
+	struct smb1351_charger *chip = data;
+	int rc;
+	u8 temp;
+
+	rc = smb1351_read_reg(chip, chip->peek_poke_address, &temp);
+	if (rc) {
+		pr_err("Couldn't read reg %x rc = %d\n",
+			chip->peek_poke_address, rc);
+		return -EAGAIN;
+	}
+	*val = temp;
+	return 0;
+}
+
+static int set_reg(void *data, u64 val)
+{
+	struct smb1351_charger *chip = data;
+	int rc;
+	u8 temp;
+
+	temp = (u8) val;
+	rc = smb1351_write_reg(chip, chip->peek_poke_address, temp);
+	if (rc) {
+		pr_err("Couldn't write 0x%02x to 0x%02x rc= %d\n",
+			temp, chip->peek_poke_address, rc);
+		return -EAGAIN;
+	}
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(poke_poke_debug_ops, get_reg, set_reg, "0x%02llx\n");
+
+static int force_irq_set(void *data, u64 val)
+{
+	struct smb1351_charger *chip = data;
+
+	smb1351_chg_stat_handler(chip->client->irq, data);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_irq_ops, NULL, force_irq_set, "0x%02llx\n");
+
+#ifdef DEBUG
+static void dump_regs(struct smb1351_charger *chip)
+{
+	int rc;
+	u8 reg;
+	u8 addr;
+
+	for (addr = 0; addr <= LAST_CNFG_REG; addr++) {
+		rc = smb1351_read_reg(chip, addr, &reg);
+		if (rc)
+			pr_err("Couldn't read 0x%02x rc = %d\n", addr, rc);
+		else
+			pr_debug("0x%02x = 0x%02x\n", addr, reg);
+	}
+
+	for (addr = FIRST_STATUS_REG; addr <= LAST_STATUS_REG; addr++) {
+		rc = smb1351_read_reg(chip, addr, &reg);
+		if (rc)
+			pr_err("Couldn't read 0x%02x rc = %d\n", addr, rc);
+		else
+			pr_debug("0x%02x = 0x%02x\n", addr, reg);
+	}
+
+	for (addr = FIRST_CMD_REG; addr <= LAST_CMD_REG; addr++) {
+		rc = smb1351_read_reg(chip, addr, &reg);
+		if (rc)
+			pr_err("Couldn't read 0x%02x rc = %d\n", addr, rc);
+		else
+			pr_debug("0x%02x = 0x%02x\n", addr, reg);
+	}
+}
+#else
+static void dump_regs(struct smb1351_charger *chip)
+{
+}
+#endif
+
+static int smb1351_parse_dt(struct smb1351_charger *chip)
+{
+	int rc;
+	struct device_node *node = chip->dev->of_node;
+
+	if (!node) {
+		pr_err("device tree info. missing\n");
+		return -EINVAL;
+	}
+
+	chip->usb_suspended_status = of_property_read_bool(node,
+					"qcom,charging-disabled");
+
+	chip->chg_autonomous_mode = of_property_read_bool(node,
+					"qcom,chg-autonomous-mode");
+
+	chip->disable_apsd = of_property_read_bool(node, "qcom,disable-apsd");
+
+	chip->using_pmic_therm = of_property_read_bool(node,
+						"qcom,using-pmic-therm");
+	chip->bms_controlled_charging  = of_property_read_bool(node,
+					"qcom,bms-controlled-charging");
+	chip->force_hvdcp_2p0 = of_property_read_bool(node,
+					"qcom,force-hvdcp-2p0");
+
+	rc = of_property_read_string(node, "qcom,bms-psy-name",
+						&chip->bms_psy_name);
+	if (rc)
+		chip->bms_psy_name = NULL;
+
+	rc = of_property_read_u32(node, "qcom,fastchg-current-max-ma",
+					&chip->target_fastchg_current_max_ma);
+	if (rc)
+		chip->target_fastchg_current_max_ma = SMB1351_CHG_FAST_MAX_MA;
+
+	chip->iterm_disabled = of_property_read_bool(node,
+					"qcom,iterm-disabled");
+
+	rc = of_property_read_u32(node, "qcom,iterm-ma", &chip->iterm_ma);
+	if (rc)
+		chip->iterm_ma = -EINVAL;
+
+	rc = of_property_read_u32(node, "qcom,float-voltage-mv",
+						&chip->vfloat_mv);
+	if (rc)
+		chip->vfloat_mv = -EINVAL;
+
+	rc = of_property_read_u32(node, "qcom,recharge-mv",
+						&chip->recharge_mv);
+	if (rc)
+		chip->recharge_mv = -EINVAL;
+
+	chip->recharge_disabled = of_property_read_bool(node,
+					"qcom,recharge-disabled");
+
+	/* thermal and jeita support */
+	rc = of_property_read_u32(node, "qcom,batt-cold-decidegc",
+						&chip->batt_cold_decidegc);
+	if (rc < 0)
+		chip->batt_cold_decidegc = -EINVAL;
+
+	rc = of_property_read_u32(node, "qcom,batt-hot-decidegc",
+						&chip->batt_hot_decidegc);
+	if (rc < 0)
+		chip->batt_hot_decidegc = -EINVAL;
+
+	rc = of_property_read_u32(node, "qcom,batt-warm-decidegc",
+						&chip->batt_warm_decidegc);
+
+	rc |= of_property_read_u32(node, "qcom,batt-cool-decidegc",
+						&chip->batt_cool_decidegc);
+
+	if (!rc) {
+		rc = of_property_read_u32(node, "qcom,batt-cool-mv",
+						&chip->batt_cool_mv);
+
+		rc |= of_property_read_u32(node, "qcom,batt-warm-mv",
+						&chip->batt_warm_mv);
+
+		rc |= of_property_read_u32(node, "qcom,batt-cool-ma",
+						&chip->batt_cool_ma);
+
+		rc |= of_property_read_u32(node, "qcom,batt-warm-ma",
+						&chip->batt_warm_ma);
+		if (rc)
+			chip->jeita_supported = false;
+		else
+			chip->jeita_supported = true;
+	}
+
+	pr_debug("jeita_supported = %d\n", chip->jeita_supported);
+
+	rc = of_property_read_u32(node, "qcom,batt-missing-decidegc",
+						&chip->batt_missing_decidegc);
+
+	chip->pinctrl_state_name = of_get_property(node, "pinctrl-names", NULL);
+
+	return 0;
+}
+
+static int smb1351_determine_initial_state(struct smb1351_charger *chip)
+{
+	int rc;
+	u8 reg = 0;
+
+	/*
+	 * It is okay to read the interrupt status here since
+	 * interrupts aren't requested. Reading interrupt status
+	 * clears the interrupt so be careful to read interrupt
+	 * status only in interrupt handling code
+	 */
+
+	rc = smb1351_read_reg(chip, IRQ_B_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read IRQ_B rc = %d\n", rc);
+		goto fail_init_status;
+	}
+
+	chip->battery_missing = (reg & IRQ_BATT_MISSING_BIT) ? true : false;
+
+	rc = smb1351_read_reg(chip, IRQ_C_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read IRQ_C rc = %d\n", rc);
+		goto fail_init_status;
+	}
+	chip->batt_full = (reg & IRQ_TERM_BIT) ? true : false;
+
+	rc = smb1351_read_reg(chip, IRQ_A_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read irq A rc = %d\n", rc);
+		return rc;
+	}
+
+	if (reg & IRQ_HOT_HARD_BIT)
+		chip->batt_hot = true;
+	if (reg & IRQ_COLD_HARD_BIT)
+		chip->batt_cold = true;
+	if (reg & IRQ_HOT_SOFT_BIT)
+		chip->batt_warm = true;
+	if (reg & IRQ_COLD_SOFT_BIT)
+		chip->batt_cool = true;
+
+	rc = smb1351_read_reg(chip, IRQ_E_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read IRQ_E rc = %d\n", rc);
+		goto fail_init_status;
+	}
+
+	if (reg & IRQ_USBIN_UV_BIT) {
+		smb1351_usbin_uv_handler(chip, 1);
+	} else {
+		smb1351_usbin_uv_handler(chip, 0);
+		smb1351_apsd_complete_handler(chip, 1);
+	}
+
+	rc = smb1351_read_reg(chip, IRQ_G_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read IRQ_G rc = %d\n", rc);
+		goto fail_init_status;
+	}
+
+	if (reg & IRQ_SOURCE_DET_BIT)
+		smb1351_apsd_complete_handler(chip, 1);
+
+	return 0;
+
+fail_init_status:
+	pr_err("Couldn't determine initial status\n");
+	return rc;
+}
+
+static int is_parallel_charger(struct i2c_client *client)
+{
+	struct device_node *node = client->dev.of_node;
+
+	return of_property_read_bool(node, "qcom,parallel-charger");
+}
+
+static int create_debugfs_entries(struct smb1351_charger *chip)
+{
+	struct dentry *ent;
+
+	chip->debug_root = debugfs_create_dir("smb1351", NULL);
+	if (!chip->debug_root) {
+		pr_err("Couldn't create debug dir\n");
+	} else {
+		ent = debugfs_create_file("config_registers", S_IFREG | 0444,
+					  chip->debug_root, chip,
+					  &cnfg_debugfs_ops);
+		if (!ent)
+			pr_err("Couldn't create cnfg debug file\n");
+
+		ent = debugfs_create_file("status_registers", S_IFREG | 0444,
+					  chip->debug_root, chip,
+					  &status_debugfs_ops);
+		if (!ent)
+			pr_err("Couldn't create status debug file\n");
+
+		ent = debugfs_create_file("cmd_registers", S_IFREG | 0444,
+					  chip->debug_root, chip,
+					  &cmd_debugfs_ops);
+		if (!ent)
+			pr_err("Couldn't create cmd debug file\n");
+
+		ent = debugfs_create_x32("address", S_IFREG | 0644,
+					  chip->debug_root,
+					  &(chip->peek_poke_address));
+		if (!ent)
+			pr_err("Couldn't create address debug file\n");
+
+		ent = debugfs_create_file("data", S_IFREG | 0644,
+					  chip->debug_root, chip,
+					  &poke_poke_debug_ops);
+		if (!ent)
+			pr_err("Couldn't create data debug file\n");
+
+		ent = debugfs_create_file("force_irq",
+					  S_IFREG | 0644,
+					  chip->debug_root, chip,
+					  &force_irq_ops);
+		if (!ent)
+			pr_err("Couldn't create data debug file\n");
+
+		ent = debugfs_create_file("irq_count", S_IFREG | 0444,
+					  chip->debug_root, chip,
+					  &irq_count_debugfs_ops);
+		if (!ent)
+			pr_err("Couldn't create count debug file\n");
+	}
+	return 0;
+}
+
+static int smb1351_main_charger_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	int rc;
+	struct smb1351_charger *chip;
+	struct power_supply *usb_psy;
+	struct power_supply_config batt_psy_cfg = {};
+	u8 reg = 0;
+
+	usb_psy = power_supply_get_by_name("usb");
+	if (!usb_psy) {
+		pr_debug("USB psy not found; deferring probe\n");
+		return -EPROBE_DEFER;
+	}
+
+	chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->client = client;
+	chip->dev = &client->dev;
+	chip->usb_psy = usb_psy;
+	chip->fake_battery_soc = -EINVAL;
+	INIT_DELAYED_WORK(&chip->chg_remove_work, smb1351_chg_remove_work);
+	INIT_DELAYED_WORK(&chip->hvdcp_det_work, smb1351_hvdcp_det_work);
+	device_init_wakeup(chip->dev, true);
+
+	/* probe the device to check if its actually connected */
+	rc = smb1351_read_reg(chip, CHG_REVISION_REG, &reg);
+	if (rc) {
+		pr_err("Failed to detect smb1351, device may be absent\n");
+		return -ENODEV;
+	}
+	pr_debug("smb1351 chip revision is %d\n", reg);
+
+	rc = smb1351_parse_dt(chip);
+	if (rc) {
+		pr_err("Couldn't parse DT nodes rc=%d\n", rc);
+		return rc;
+	}
+
+	/* using vadc and adc_tm for implementing pmic therm */
+	if (chip->using_pmic_therm) {
+		chip->vadc_dev = qpnp_get_vadc(chip->dev, "chg");
+		if (IS_ERR(chip->vadc_dev)) {
+			rc = PTR_ERR(chip->vadc_dev);
+			if (rc != -EPROBE_DEFER)
+				pr_err("vadc property missing\n");
+			return rc;
+		}
+		chip->adc_tm_dev = qpnp_get_adc_tm(chip->dev, "chg");
+		if (IS_ERR(chip->adc_tm_dev)) {
+			rc = PTR_ERR(chip->adc_tm_dev);
+			if (rc != -EPROBE_DEFER)
+				pr_err("adc_tm property missing\n");
+			return rc;
+		}
+	}
+
+	i2c_set_clientdata(client, chip);
+
+	chip->batt_psy_d.name		= "battery";
+	chip->batt_psy_d.type		= POWER_SUPPLY_TYPE_BATTERY;
+	chip->batt_psy_d.get_property	= smb1351_battery_get_property;
+	chip->batt_psy_d.set_property	= smb1351_battery_set_property;
+	chip->batt_psy_d.property_is_writeable =
+					smb1351_batt_property_is_writeable;
+	chip->batt_psy_d.properties	= smb1351_battery_properties;
+	chip->batt_psy_d.num_properties	=
+				ARRAY_SIZE(smb1351_battery_properties);
+	chip->batt_psy_d.external_power_changed =
+					smb1351_external_power_changed;
+
+	chip->resume_completed = true;
+	mutex_init(&chip->irq_complete);
+
+	batt_psy_cfg.drv_data = chip;
+	batt_psy_cfg.supplied_to = pm_batt_supplied_to;
+	batt_psy_cfg.num_supplicants = ARRAY_SIZE(pm_batt_supplied_to);
+	chip->batt_psy = devm_power_supply_register(chip->dev,
+			&chip->batt_psy_d,
+			&batt_psy_cfg);
+	if (IS_ERR(chip->batt_psy)) {
+		pr_err("Couldn't register batt psy rc=%ld\n",
+				PTR_ERR(chip->batt_psy));
+		return rc;
+	}
+
+	dump_regs(chip);
+
+	rc = smb1351_regulator_init(chip);
+	if (rc) {
+		pr_err("Couldn't initialize smb1351 ragulator rc=%d\n", rc);
+		goto fail_smb1351_regulator_init;
+	}
+
+	rc = smb1351_hw_init(chip);
+	if (rc) {
+		pr_err("Couldn't initialize hardware rc=%d\n", rc);
+		goto fail_smb1351_hw_init;
+	}
+
+	rc = smb1351_determine_initial_state(chip);
+	if (rc) {
+		pr_err("Couldn't determine initial state rc=%d\n", rc);
+		goto fail_smb1351_hw_init;
+	}
+
+	/* STAT irq configuration */
+	if (client->irq) {
+		rc = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+				smb1351_chg_stat_handler,
+				IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+				"smb1351_chg_stat_irq", chip);
+		if (rc) {
+			pr_err("Failed STAT irq=%d request rc = %d\n",
+				client->irq, rc);
+			goto fail_smb1351_hw_init;
+		}
+		enable_irq_wake(client->irq);
+	}
+
+	if (chip->using_pmic_therm) {
+		if (!chip->jeita_supported) {
+			/* add hot/cold temperature monitor */
+			chip->adc_param.low_temp = chip->batt_cold_decidegc;
+			chip->adc_param.high_temp = chip->batt_hot_decidegc;
+		} else {
+			chip->adc_param.low_temp = chip->batt_cool_decidegc;
+			chip->adc_param.high_temp = chip->batt_warm_decidegc;
+		}
+		chip->adc_param.timer_interval = ADC_MEAS1_INTERVAL_500MS;
+		chip->adc_param.state_request = ADC_TM_WARM_COOL_THR_ENABLE;
+		chip->adc_param.btm_ctx = chip;
+		chip->adc_param.threshold_notification =
+				smb1351_chg_adc_notification;
+		chip->adc_param.channel = LR_MUX1_BATT_THERM;
+
+		rc = qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
+							&chip->adc_param);
+		if (rc) {
+			pr_err("requesting ADC error %d\n", rc);
+			goto fail_smb1351_hw_init;
+		}
+	}
+
+	create_debugfs_entries(chip);
+
+	dump_regs(chip);
+
+	pr_info("smb1351 successfully probed. charger=%d, batt=%d version=%s\n",
+			chip->chg_present,
+			smb1351_get_prop_batt_present(chip),
+			smb1351_version_str[chip->version]);
+	return 0;
+
+fail_smb1351_hw_init:
+	regulator_unregister(chip->otg_vreg.rdev);
+fail_smb1351_regulator_init:
+	return rc;
+}
+
+static int smb1351_parallel_charger_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	int rc;
+	struct smb1351_charger *chip;
+	struct device_node *node = client->dev.of_node;
+	struct power_supply_config parallel_psy_cfg = {};
+
+	chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->client = client;
+	chip->dev = &client->dev;
+	chip->parallel_charger = true;
+	chip->parallel_charger_suspended = true;
+
+	chip->usb_suspended_status = of_property_read_bool(node,
+					"qcom,charging-disabled");
+	rc = of_property_read_u32(node, "qcom,float-voltage-mv",
+						&chip->vfloat_mv);
+	if (rc)
+		chip->vfloat_mv = -EINVAL;
+	rc = of_property_read_u32(node, "qcom,recharge-mv",
+						&chip->recharge_mv);
+	if (rc)
+		chip->recharge_mv = -EINVAL;
+
+	rc = of_property_read_u32(node, "qcom,parallel-en-pin-polarity",
+					&chip->parallel_pin_polarity_setting);
+	if (rc)
+		chip->parallel_pin_polarity_setting = EN_BY_PIN_LOW_ENABLE;
+	else
+		chip->parallel_pin_polarity_setting =
+				chip->parallel_pin_polarity_setting ?
+				EN_BY_PIN_HIGH_ENABLE : EN_BY_PIN_LOW_ENABLE;
+
+	if (of_property_read_bool(node,
+				"qcom,parallel-external-current-sense"))
+		chip->parallel_mode = POWER_SUPPLY_PL_USBIN_USBIN_EXT;
+	else
+		chip->parallel_mode = POWER_SUPPLY_PL_USBIN_USBIN;
+
+	i2c_set_clientdata(client, chip);
+
+	chip->parallel_psy_d.name = "parallel";
+	chip->parallel_psy_d.type = POWER_SUPPLY_TYPE_PARALLEL;
+	chip->parallel_psy_d.get_property = smb1351_parallel_get_property;
+	chip->parallel_psy_d.set_property = smb1351_parallel_set_property;
+	chip->parallel_psy_d.properties	= smb1351_parallel_properties;
+	chip->parallel_psy_d.property_is_writeable
+				= smb1351_parallel_is_writeable;
+	chip->parallel_psy_d.num_properties
+				= ARRAY_SIZE(smb1351_parallel_properties);
+
+	parallel_psy_cfg.drv_data = chip;
+	parallel_psy_cfg.num_supplicants = 0;
+	chip->parallel_psy = devm_power_supply_register(chip->dev,
+			&chip->parallel_psy_d,
+			&parallel_psy_cfg);
+	if (IS_ERR(chip->parallel_psy)) {
+		pr_err("Couldn't register parallel psy rc=%ld\n",
+				PTR_ERR(chip->parallel_psy));
+		return rc;
+	}
+
+	chip->resume_completed = true;
+	mutex_init(&chip->irq_complete);
+
+	create_debugfs_entries(chip);
+
+	pr_info("smb1351 parallel successfully probed.\n");
+
+	return 0;
+}
+
+static int smb1351_charger_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	if (is_parallel_charger(client))
+		return smb1351_parallel_charger_probe(client, id);
+	else
+		return smb1351_main_charger_probe(client, id);
+}
+
+static int smb1351_charger_remove(struct i2c_client *client)
+{
+	struct smb1351_charger *chip = i2c_get_clientdata(client);
+
+	cancel_delayed_work_sync(&chip->chg_remove_work);
+
+	mutex_destroy(&chip->irq_complete);
+	debugfs_remove_recursive(chip->debug_root);
+	return 0;
+}
+
+static int smb1351_suspend(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smb1351_charger *chip = i2c_get_clientdata(client);
+
+	/* no suspend resume activities for parallel charger */
+	if (chip->parallel_charger)
+		return 0;
+
+	mutex_lock(&chip->irq_complete);
+	chip->resume_completed = false;
+	mutex_unlock(&chip->irq_complete);
+
+	return 0;
+}
+
+static int smb1351_suspend_noirq(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smb1351_charger *chip = i2c_get_clientdata(client);
+
+	/* no suspend resume activities for parallel charger */
+	if (chip->parallel_charger)
+		return 0;
+
+	if (chip->irq_waiting) {
+		pr_err_ratelimited("Aborting suspend, an interrupt was detected while suspending\n");
+		return -EBUSY;
+	}
+	return 0;
+}
+
+static int smb1351_resume(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smb1351_charger *chip = i2c_get_clientdata(client);
+
+	/* no suspend resume activities for parallel charger */
+	if (chip->parallel_charger)
+		return 0;
+
+	mutex_lock(&chip->irq_complete);
+	chip->resume_completed = true;
+	if (chip->irq_waiting) {
+		mutex_unlock(&chip->irq_complete);
+		smb1351_chg_stat_handler(client->irq, chip);
+		enable_irq(client->irq);
+	} else {
+		mutex_unlock(&chip->irq_complete);
+	}
+	return 0;
+}
+
+static const struct dev_pm_ops smb1351_pm_ops = {
+	.suspend	= smb1351_suspend,
+	.suspend_noirq	= smb1351_suspend_noirq,
+	.resume		= smb1351_resume,
+};
+
+static const struct of_device_id smb1351_match_table[] = {
+	{ .compatible = "qcom,smb1351-charger",},
+	{ },
+};
+
+static const struct i2c_device_id smb1351_charger_id[] = {
+	{"smb1351-charger", 0},
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, smb1351_charger_id);
+
+static struct i2c_driver smb1351_charger_driver = {
+	.driver		= {
+		.name		= "smb1351-charger",
+		.owner		= THIS_MODULE,
+		.of_match_table	= smb1351_match_table,
+		.pm		= &smb1351_pm_ops,
+	},
+	.probe		= smb1351_charger_probe,
+	.remove		= smb1351_charger_remove,
+	.id_table	= smb1351_charger_id,
+};
+
+module_i2c_driver(smb1351_charger_driver);
+
+MODULE_DESCRIPTION("smb1351 Charger");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("i2c:smb1351-charger");
diff --git a/drivers/power/supply/qcom/smb135x-charger.c b/drivers/power/supply/qcom/smb135x-charger.c
new file mode 100644
index 0000000..803dd6e
--- /dev/null
+++ b/drivers/power/supply/qcom/smb135x-charger.c
@@ -0,0 +1,4578 @@
+/* Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/i2c.h>
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/power_supply.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
+#include <linux/pinctrl/consumer.h>
+
+#define SMB135X_BITS_PER_REG	8
+
+/* Mask/Bit helpers */
+#define _SMB135X_MASK(BITS, POS) \
+	((unsigned char)(((1 << (BITS)) - 1) << (POS)))
+#define SMB135X_MASK(LEFT_BIT_POS, RIGHT_BIT_POS) \
+		_SMB135X_MASK((LEFT_BIT_POS) - (RIGHT_BIT_POS) + 1, \
+				(RIGHT_BIT_POS))
+
+/* Config registers */
+#define CFG_3_REG			0x03
+#define CHG_ITERM_50MA			0x08
+#define CHG_ITERM_100MA			0x10
+#define CHG_ITERM_150MA			0x18
+#define CHG_ITERM_200MA			0x20
+#define CHG_ITERM_250MA			0x28
+#define CHG_ITERM_300MA			0x00
+#define CHG_ITERM_500MA			0x30
+#define CHG_ITERM_600MA			0x38
+#define CHG_ITERM_MASK			SMB135X_MASK(5, 3)
+
+#define CFG_4_REG			0x04
+#define CHG_INHIBIT_MASK		SMB135X_MASK(7, 6)
+#define CHG_INHIBIT_50MV_VAL		0x00
+#define CHG_INHIBIT_100MV_VAL		0x40
+#define CHG_INHIBIT_200MV_VAL		0x80
+#define CHG_INHIBIT_300MV_VAL		0xC0
+
+#define CFG_5_REG			0x05
+#define RECHARGE_200MV_BIT		BIT(2)
+#define USB_2_3_BIT			BIT(5)
+
+#define CFG_A_REG			0x0A
+#define DCIN_INPUT_MASK			SMB135X_MASK(4, 0)
+
+#define CFG_C_REG			0x0C
+#define USBIN_INPUT_MASK		SMB135X_MASK(4, 0)
+#define USBIN_ADAPTER_ALLOWANCE_MASK	SMB135X_MASK(7, 5)
+#define ALLOW_5V_ONLY			0x00
+#define ALLOW_5V_OR_9V			0x20
+#define ALLOW_5V_TO_9V			0x40
+#define ALLOW_9V_ONLY			0x60
+
+#define CFG_D_REG			0x0D
+
+#define CFG_E_REG			0x0E
+#define POLARITY_100_500_BIT		BIT(2)
+#define USB_CTRL_BY_PIN_BIT		BIT(1)
+#define HVDCP_5_9_BIT			BIT(4)
+
+#define CFG_11_REG			0x11
+#define PRIORITY_BIT			BIT(7)
+#define AUTO_SRC_DET_EN_BIT			BIT(0)
+
+#define USBIN_DCIN_CFG_REG		0x12
+#define USBIN_SUSPEND_VIA_COMMAND_BIT	BIT(6)
+
+#define CFG_14_REG			0x14
+#define CHG_EN_BY_PIN_BIT		BIT(7)
+#define CHG_EN_ACTIVE_LOW_BIT		BIT(6)
+#define CHG_EN_ACTIVE_HIGH_BIT		0x0
+#define PRE_TO_FAST_REQ_CMD_BIT		BIT(5)
+#define DISABLE_CURRENT_TERM_BIT	BIT(3)
+#define DISABLE_AUTO_RECHARGE_BIT	BIT(2)
+#define EN_CHG_INHIBIT_BIT		BIT(0)
+
+#define CFG_16_REG			0x16
+#define SAFETY_TIME_EN_BIT		BIT(5)
+#define SAFETY_TIME_EN_SHIFT		5
+#define SAFETY_TIME_MINUTES_MASK	SMB135X_MASK(3, 2)
+#define SAFETY_TIME_MINUTES_SHIFT	2
+
+#define CFG_17_REG			0x17
+#define CHG_STAT_DISABLE_BIT		BIT(0)
+#define CHG_STAT_ACTIVE_HIGH_BIT	BIT(1)
+#define CHG_STAT_IRQ_ONLY_BIT		BIT(4)
+
+#define CFG_19_REG			0x19
+#define BATT_MISSING_ALGO_BIT		BIT(2)
+#define BATT_MISSING_THERM_BIT		BIT(1)
+
+#define CFG_1A_REG			0x1A
+#define HOT_SOFT_VFLOAT_COMP_EN_BIT	BIT(3)
+#define COLD_SOFT_VFLOAT_COMP_EN_BIT	BIT(2)
+#define HOT_SOFT_CURRENT_COMP_EN_BIT	BIT(1)
+#define COLD_SOFT_CURRENT_COMP_EN_BIT	BIT(0)
+
+#define CFG_1B_REG			0x1B
+#define COLD_HARD_MASK			SMB135X_MASK(7, 6)
+#define COLD_HARD_SHIFT			6
+#define HOT_HARD_MASK			SMB135X_MASK(5, 4)
+#define HOT_HARD_SHIFT			4
+#define COLD_SOFT_MASK			SMB135X_MASK(3, 2)
+#define COLD_SOFT_SHIFT			2
+#define HOT_SOFT_MASK			SMB135X_MASK(1, 0)
+#define HOT_SOFT_SHIFT			0
+
+#define VFLOAT_REG			0x1E
+
+#define VERSION1_REG			0x2A
+#define VERSION1_MASK			SMB135X_MASK(7,	6)
+#define VERSION1_SHIFT			6
+#define VERSION2_REG			0x32
+#define VERSION2_MASK			SMB135X_MASK(1,	0)
+#define VERSION3_REG			0x34
+
+/* Irq Config registers */
+#define IRQ_CFG_REG			0x07
+#define IRQ_BAT_HOT_COLD_HARD_BIT	BIT(7)
+#define IRQ_BAT_HOT_COLD_SOFT_BIT	BIT(6)
+#define IRQ_OTG_OVER_CURRENT_BIT	BIT(4)
+#define IRQ_USBIN_UV_BIT		BIT(2)
+#define IRQ_INTERNAL_TEMPERATURE_BIT	BIT(0)
+
+#define IRQ2_CFG_REG			0x08
+#define IRQ2_SAFETY_TIMER_BIT		BIT(7)
+#define IRQ2_CHG_ERR_BIT		BIT(6)
+#define IRQ2_CHG_PHASE_CHANGE_BIT	BIT(4)
+#define IRQ2_CHG_INHIBIT_BIT		BIT(3)
+#define IRQ2_POWER_OK_BIT		BIT(2)
+#define IRQ2_BATT_MISSING_BIT		BIT(1)
+#define IRQ2_VBAT_LOW_BIT		BIT(0)
+
+#define IRQ3_CFG_REG			0x09
+#define IRQ3_RID_DETECT_BIT		BIT(4)
+#define IRQ3_SRC_DETECT_BIT		BIT(2)
+#define IRQ3_DCIN_UV_BIT		BIT(0)
+
+#define USBIN_OTG_REG			0x0F
+#define OTG_CNFG_MASK			SMB135X_MASK(3,	2)
+#define OTG_CNFG_PIN_CTRL		0x04
+#define OTG_CNFG_COMMAND_CTRL		0x08
+#define OTG_CNFG_AUTO_CTRL		0x0C
+
+/* Command Registers */
+#define CMD_I2C_REG			0x40
+#define ALLOW_VOLATILE_BIT		BIT(6)
+
+#define CMD_INPUT_LIMIT			0x41
+#define USB_SHUTDOWN_BIT		BIT(6)
+#define DC_SHUTDOWN_BIT			BIT(5)
+#define USE_REGISTER_FOR_CURRENT	BIT(2)
+#define USB_100_500_AC_MASK		SMB135X_MASK(1, 0)
+#define USB_100_VAL			0x02
+#define USB_500_VAL			0x00
+#define USB_AC_VAL			0x01
+
+#define CMD_CHG_REG			0x42
+#define CMD_CHG_EN			BIT(1)
+#define OTG_EN				BIT(0)
+
+/* Status registers */
+#define STATUS_1_REG			0x47
+#define USING_USB_BIT			BIT(1)
+#define USING_DC_BIT			BIT(0)
+
+#define STATUS_2_REG			0x48
+#define HARD_LIMIT_STS_BIT		BIT(6)
+
+#define STATUS_4_REG			0x4A
+#define BATT_NET_CHG_CURRENT_BIT	BIT(7)
+#define BATT_LESS_THAN_2V		BIT(4)
+#define CHG_HOLD_OFF_BIT		BIT(3)
+#define CHG_TYPE_MASK			SMB135X_MASK(2, 1)
+#define CHG_TYPE_SHIFT			1
+#define BATT_NOT_CHG_VAL		0x0
+#define BATT_PRE_CHG_VAL		0x1
+#define BATT_FAST_CHG_VAL		0x2
+#define BATT_TAPER_CHG_VAL		0x3
+#define CHG_EN_BIT			BIT(0)
+
+#define STATUS_5_REG			0x4B
+#define CDP_BIT				BIT(7)
+#define DCP_BIT				BIT(6)
+#define OTHER_BIT			BIT(5)
+#define SDP_BIT				BIT(4)
+#define ACA_A_BIT			BIT(3)
+#define ACA_B_BIT			BIT(2)
+#define ACA_C_BIT			BIT(1)
+#define ACA_DOCK_BIT			BIT(0)
+
+#define STATUS_6_REG			0x4C
+#define RID_FLOAT_BIT			BIT(3)
+#define RID_A_BIT			BIT(2)
+#define RID_B_BIT			BIT(1)
+#define RID_C_BIT			BIT(0)
+
+#define STATUS_7_REG			0x4D
+
+#define STATUS_8_REG			0x4E
+#define USBIN_9V			BIT(5)
+#define USBIN_UNREG			BIT(4)
+#define USBIN_LV			BIT(3)
+#define DCIN_9V				BIT(2)
+#define DCIN_UNREG			BIT(1)
+#define DCIN_LV				BIT(0)
+
+#define STATUS_9_REG			0x4F
+#define REV_MASK			SMB135X_MASK(3, 0)
+
+/* Irq Status registers */
+#define IRQ_A_REG			0x50
+#define IRQ_A_HOT_HARD_BIT		BIT(6)
+#define IRQ_A_COLD_HARD_BIT		BIT(4)
+#define IRQ_A_HOT_SOFT_BIT		BIT(2)
+#define IRQ_A_COLD_SOFT_BIT		BIT(0)
+
+#define IRQ_B_REG			0x51
+#define IRQ_B_BATT_TERMINAL_BIT		BIT(6)
+#define IRQ_B_BATT_MISSING_BIT		BIT(4)
+#define IRQ_B_VBAT_LOW_BIT		BIT(2)
+#define IRQ_B_TEMPERATURE_BIT		BIT(0)
+
+#define IRQ_C_REG			0x52
+#define IRQ_C_TERM_BIT			BIT(0)
+#define IRQ_C_FASTCHG_BIT		BIT(6)
+
+#define IRQ_D_REG			0x53
+#define IRQ_D_TIMEOUT_BIT		BIT(2)
+
+#define IRQ_E_REG			0x54
+#define IRQ_E_DC_OV_BIT			BIT(6)
+#define IRQ_E_DC_UV_BIT			BIT(4)
+#define IRQ_E_USB_OV_BIT		BIT(2)
+#define IRQ_E_USB_UV_BIT		BIT(0)
+
+#define IRQ_F_REG			0x55
+#define IRQ_F_POWER_OK_BIT		BIT(0)
+
+#define IRQ_G_REG			0x56
+#define IRQ_G_SRC_DETECT_BIT		BIT(6)
+
+enum {
+	WRKARND_USB100_BIT = BIT(0),
+	WRKARND_APSD_FAIL = BIT(1),
+};
+
+enum {
+	REV_1 = 1,	/* Rev 1.0 */
+	REV_1_1 = 2,	/* Rev 1.1 */
+	REV_2 = 3,		/* Rev 2 */
+	REV_2_1 = 5,	/* Rev 2.1 */
+	REV_MAX,
+};
+
+static char *revision_str[] = {
+	[REV_1] = "rev1",
+	[REV_1_1] = "rev1.1",
+	[REV_2] = "rev2",
+	[REV_2_1] = "rev2.1",
+};
+
+enum {
+	V_SMB1356,
+	V_SMB1357,
+	V_SMB1358,
+	V_SMB1359,
+	V_MAX,
+};
+
+static int version_data[] = {
+	[V_SMB1356] = V_SMB1356,
+	[V_SMB1357] = V_SMB1357,
+	[V_SMB1358] = V_SMB1358,
+	[V_SMB1359] = V_SMB1359,
+};
+
+static char *version_str[] = {
+	[V_SMB1356] = "smb1356",
+	[V_SMB1357] = "smb1357",
+	[V_SMB1358] = "smb1358",
+	[V_SMB1359] = "smb1359",
+};
+
+enum {
+	USER = BIT(0),
+	THERMAL = BIT(1),
+	CURRENT = BIT(2),
+};
+
+enum path_type {
+	USB,
+	DC,
+};
+
+static int chg_time[] = {
+	192,
+	384,
+	768,
+	1536,
+};
+
+static char *pm_batt_supplied_to[] = {
+	"bms",
+};
+
+struct smb135x_regulator {
+	struct regulator_desc	rdesc;
+	struct regulator_dev	*rdev;
+};
+
+struct smb135x_chg {
+	struct i2c_client		*client;
+	struct device			*dev;
+	struct mutex			read_write_lock;
+
+	u8				revision;
+	int				version;
+
+	bool				chg_enabled;
+	bool				chg_disabled_permanently;
+
+	bool				usb_present;
+	bool				dc_present;
+	bool				usb_slave_present;
+	bool				dc_ov;
+
+	bool				bmd_algo_disabled;
+	bool				iterm_disabled;
+	int				iterm_ma;
+	int				vfloat_mv;
+	int				safety_time;
+	int				resume_delta_mv;
+	int				fake_battery_soc;
+	struct dentry			*debug_root;
+	int				usb_current_arr_size;
+	int				*usb_current_table;
+	int				dc_current_arr_size;
+	int				*dc_current_table;
+	bool				inhibit_disabled;
+	int				fastchg_current_arr_size;
+	int				*fastchg_current_table;
+	int				fastchg_ma;
+	u8				irq_cfg_mask[3];
+	int				otg_oc_count;
+	struct delayed_work		reset_otg_oc_count_work;
+	struct mutex			otg_oc_count_lock;
+	struct delayed_work		hvdcp_det_work;
+
+	bool				parallel_charger;
+	bool				parallel_charger_present;
+	bool				bms_controlled_charging;
+	u32				parallel_pin_polarity_setting;
+
+	/* psy */
+	struct power_supply		*usb_psy;
+	int				usb_psy_ma;
+	int				real_usb_psy_ma;
+	struct power_supply_desc	batt_psy_d;
+	struct power_supply		*batt_psy;
+	struct power_supply_desc	dc_psy_d;
+	struct power_supply		*dc_psy;
+	struct power_supply_desc	parallel_psy_d;
+	struct power_supply		*parallel_psy;
+	struct power_supply		*bms_psy;
+	int				dc_psy_type;
+	int				dc_psy_ma;
+	const char			*bms_psy_name;
+
+	/* status tracking */
+	bool				chg_done_batt_full;
+	bool				batt_present;
+	bool				batt_hot;
+	bool				batt_cold;
+	bool				batt_warm;
+	bool				batt_cool;
+
+	bool				resume_completed;
+	bool				irq_waiting;
+	u32				usb_suspended;
+	u32				dc_suspended;
+	struct mutex			path_suspend_lock;
+
+	u32				peek_poke_address;
+	struct smb135x_regulator	otg_vreg;
+	int				skip_writes;
+	int				skip_reads;
+	u32				workaround_flags;
+	bool				soft_vfloat_comp_disabled;
+	bool				soft_current_comp_disabled;
+	struct mutex			irq_complete;
+	struct regulator		*therm_bias_vreg;
+	struct regulator		*usb_pullup_vreg;
+	struct delayed_work		wireless_insertion_work;
+
+	unsigned int			thermal_levels;
+	unsigned int			therm_lvl_sel;
+	unsigned int			*thermal_mitigation;
+	unsigned int			gamma_setting_num;
+	unsigned int			*gamma_setting;
+	struct mutex			current_change_lock;
+
+	const char			*pinctrl_state_name;
+	struct pinctrl			*smb_pinctrl;
+
+	bool				apsd_rerun;
+	bool				id_line_not_connected;
+};
+
+#define RETRY_COUNT 5
+int retry_sleep_ms[RETRY_COUNT] = {
+	10, 20, 30, 40, 50
+};
+
+static int __smb135x_read(struct smb135x_chg *chip, int reg,
+				u8 *val)
+{
+	s32 ret;
+	int retry_count = 0;
+
+retry:
+	ret = i2c_smbus_read_byte_data(chip->client, reg);
+	if (ret < 0 && retry_count < RETRY_COUNT) {
+		/* sleep for few ms before retrying */
+		msleep(retry_sleep_ms[retry_count++]);
+		goto retry;
+	}
+	if (ret < 0) {
+		dev_err(chip->dev,
+			"i2c read fail: can't read from %02x: %d\n", reg, ret);
+		return ret;
+	}
+	*val = ret;
+
+	return 0;
+}
+
+static int __smb135x_write(struct smb135x_chg *chip, int reg,
+						u8 val)
+{
+	s32 ret;
+	int retry_count = 0;
+
+retry:
+	ret = i2c_smbus_write_byte_data(chip->client, reg, val);
+	if (ret < 0 && retry_count < RETRY_COUNT) {
+		/* sleep for few ms before retrying */
+		msleep(retry_sleep_ms[retry_count++]);
+		goto retry;
+	}
+	if (ret < 0) {
+		dev_err(chip->dev,
+			"i2c write fail: can't write %02x to %02x: %d\n",
+			val, reg, ret);
+		return ret;
+	}
+	pr_debug("Writing 0x%02x=0x%02x\n", reg, val);
+	return 0;
+}
+
+static int smb135x_read(struct smb135x_chg *chip, int reg,
+				u8 *val)
+{
+	int rc;
+
+	if (chip->skip_reads) {
+		*val = 0;
+		return 0;
+	}
+	mutex_lock(&chip->read_write_lock);
+	pm_stay_awake(chip->dev);
+	rc = __smb135x_read(chip, reg, val);
+	pm_relax(chip->dev);
+	mutex_unlock(&chip->read_write_lock);
+
+	return rc;
+}
+
+static int smb135x_write(struct smb135x_chg *chip, int reg,
+						u8 val)
+{
+	int rc;
+
+	if (chip->skip_writes)
+		return 0;
+
+	mutex_lock(&chip->read_write_lock);
+	pm_stay_awake(chip->dev);
+	rc = __smb135x_write(chip, reg, val);
+	pm_relax(chip->dev);
+	mutex_unlock(&chip->read_write_lock);
+
+	return rc;
+}
+
+static int smb135x_masked_write(struct smb135x_chg *chip, int reg,
+						u8 mask, u8 val)
+{
+	s32 rc;
+	u8 temp;
+
+	if (chip->skip_writes || chip->skip_reads)
+		return 0;
+
+	mutex_lock(&chip->read_write_lock);
+	rc = __smb135x_read(chip, reg, &temp);
+	if (rc < 0) {
+		dev_err(chip->dev, "read failed: reg=%03X, rc=%d\n", reg, rc);
+		goto out;
+	}
+	temp &= ~mask;
+	temp |= val & mask;
+	rc = __smb135x_write(chip, reg, temp);
+	if (rc < 0) {
+		dev_err(chip->dev,
+			"write failed: reg=%03X, rc=%d\n", reg, rc);
+	}
+out:
+	mutex_unlock(&chip->read_write_lock);
+	return rc;
+}
+
+static int read_revision(struct smb135x_chg *chip, u8 *revision)
+{
+	int rc;
+	u8 reg;
+
+	rc = smb135x_read(chip, STATUS_9_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read status 9 rc = %d\n", rc);
+		return rc;
+	}
+	*revision = (reg & REV_MASK);
+	return 0;
+}
+
+static int read_version1(struct smb135x_chg *chip, u8 *version)
+{
+	int rc;
+	u8 reg;
+
+	rc = smb135x_read(chip, VERSION1_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read version 1 rc = %d\n", rc);
+		return rc;
+	}
+	*version = (reg & VERSION1_MASK) >> VERSION1_SHIFT;
+	return 0;
+}
+
+static int read_version2(struct smb135x_chg *chip, u8 *version)
+{
+	int rc;
+	u8 reg;
+
+	rc = smb135x_read(chip, VERSION2_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read version 2 rc = %d\n", rc);
+		return rc;
+	}
+	*version = (reg & VERSION2_MASK);
+	return 0;
+}
+
+static int read_version3(struct smb135x_chg *chip, u8 *version)
+{
+	int rc;
+	u8 reg;
+
+	rc = smb135x_read(chip, VERSION3_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read version 3 rc = %d\n", rc);
+		return rc;
+	}
+	*version = reg;
+	return 0;
+}
+
+#define TRIM_23_REG		0x23
+#define CHECK_USB100_GOOD_BIT	BIT(1)
+static bool is_usb100_broken(struct smb135x_chg *chip)
+{
+	int rc;
+	u8 reg;
+
+	rc = smb135x_read(chip, TRIM_23_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read status 9 rc = %d\n", rc);
+		return rc;
+	}
+	return !!(reg & CHECK_USB100_GOOD_BIT);
+}
+
+static bool is_usb_slave_present(struct smb135x_chg *chip)
+{
+	bool usb_slave_present;
+	u8 reg;
+	int rc;
+
+	if (chip->id_line_not_connected)
+		return false;
+
+	rc = smb135x_read(chip, STATUS_6_REG, &reg);
+	if (rc < 0) {
+		pr_err("Couldn't read stat 6 rc = %d\n", rc);
+		return false;
+	}
+
+	if ((reg & (RID_FLOAT_BIT | RID_A_BIT | RID_B_BIT | RID_C_BIT)) == 0)
+		usb_slave_present = 1;
+	else
+		usb_slave_present = 0;
+
+	pr_debug("stat6= 0x%02x slave_present = %d\n", reg, usb_slave_present);
+	return usb_slave_present;
+}
+
+static char *usb_type_str[] = {
+	"ACA_DOCK",	/* bit 0 */
+	"ACA_C",	/* bit 1 */
+	"ACA_B",	/* bit 2 */
+	"ACA_A",	/* bit 3 */
+	"SDP",		/* bit 4 */
+	"OTHER",	/* bit 5 */
+	"DCP",		/* bit 6 */
+	"CDP",		/* bit 7 */
+	"NONE",		/* bit 8  error case */
+};
+
+/* helper to return the string of USB type */
+static char *get_usb_type_name(u8 stat_5)
+{
+	unsigned long stat = stat_5;
+
+	return usb_type_str[find_first_bit(&stat, SMB135X_BITS_PER_REG)];
+}
+
+static enum power_supply_type usb_type_enum[] = {
+	POWER_SUPPLY_TYPE_USB_ACA,	/* bit 0 */
+	POWER_SUPPLY_TYPE_USB_ACA,	/* bit 1 */
+	POWER_SUPPLY_TYPE_USB_ACA,	/* bit 2 */
+	POWER_SUPPLY_TYPE_USB_ACA,	/* bit 3 */
+	POWER_SUPPLY_TYPE_USB,		/* bit 4 */
+	POWER_SUPPLY_TYPE_UNKNOWN,	/* bit 5 */
+	POWER_SUPPLY_TYPE_USB_DCP,	/* bit 6 */
+	POWER_SUPPLY_TYPE_USB_CDP,	/* bit 7 */
+	POWER_SUPPLY_TYPE_UNKNOWN,	/* bit 8 error case, report UNKNWON */
+};
+
+/* helper to return enum power_supply_type of USB type */
+static enum power_supply_type get_usb_supply_type(u8 stat_5)
+{
+	unsigned long stat = stat_5;
+
+	return usb_type_enum[find_first_bit(&stat, SMB135X_BITS_PER_REG)];
+}
+
+static enum power_supply_property smb135x_battery_properties[] = {
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_TECHNOLOGY,
+	POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
+};
+
+static int smb135x_get_prop_batt_status(struct smb135x_chg *chip)
+{
+	int rc;
+	int status = POWER_SUPPLY_STATUS_DISCHARGING;
+	u8 reg = 0;
+	u8 chg_type;
+
+	if (chip->chg_done_batt_full)
+		return POWER_SUPPLY_STATUS_FULL;
+
+	rc = smb135x_read(chip, STATUS_4_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Unable to read STATUS_4_REG rc = %d\n", rc);
+		return POWER_SUPPLY_STATUS_UNKNOWN;
+	}
+
+	if (reg & CHG_HOLD_OFF_BIT) {
+		/*
+		 * when chg hold off happens the battery is
+		 * not charging
+		 */
+		status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+		goto out;
+	}
+
+	chg_type = (reg & CHG_TYPE_MASK) >> CHG_TYPE_SHIFT;
+
+	if (chg_type == BATT_NOT_CHG_VAL)
+		status = POWER_SUPPLY_STATUS_DISCHARGING;
+	else
+		status = POWER_SUPPLY_STATUS_CHARGING;
+out:
+	pr_debug("STATUS_4_REG=%x\n", reg);
+	return status;
+}
+
+static int smb135x_get_prop_batt_present(struct smb135x_chg *chip)
+{
+	int rc;
+	u8 reg;
+
+	rc = smb135x_read(chip, STATUS_4_REG, &reg);
+	if (rc < 0)
+		return 0;
+
+	/* treat battery gone if less than 2V */
+	if (reg & BATT_LESS_THAN_2V)
+		return 0;
+
+	return chip->batt_present;
+}
+
+static int smb135x_get_prop_charge_type(struct smb135x_chg *chip)
+{
+	int rc;
+	u8 reg;
+	u8 chg_type;
+
+	rc = smb135x_read(chip, STATUS_4_REG, &reg);
+	if (rc < 0)
+		return POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+
+	chg_type = (reg & CHG_TYPE_MASK) >> CHG_TYPE_SHIFT;
+	if (chg_type == BATT_NOT_CHG_VAL)
+		return POWER_SUPPLY_CHARGE_TYPE_NONE;
+	else if (chg_type == BATT_FAST_CHG_VAL)
+		return POWER_SUPPLY_CHARGE_TYPE_FAST;
+	else if (chg_type == BATT_PRE_CHG_VAL)
+		return POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+	else if (chg_type == BATT_TAPER_CHG_VAL)
+		return POWER_SUPPLY_CHARGE_TYPE_TAPER;
+
+	return POWER_SUPPLY_CHARGE_TYPE_NONE;
+}
+
+#define DEFAULT_BATT_CAPACITY	50
+static int smb135x_get_prop_batt_capacity(struct smb135x_chg *chip)
+{
+	union power_supply_propval ret = {0, };
+
+	if (chip->fake_battery_soc >= 0)
+		return chip->fake_battery_soc;
+	if (chip->bms_psy) {
+		power_supply_get_property(chip->bms_psy,
+				POWER_SUPPLY_PROP_CAPACITY, &ret);
+		return ret.intval;
+	}
+
+	return DEFAULT_BATT_CAPACITY;
+}
+
+static int smb135x_get_prop_batt_health(struct smb135x_chg *chip)
+{
+	union power_supply_propval ret = {0, };
+
+	if (chip->batt_hot)
+		ret.intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+	else if (chip->batt_cold)
+		ret.intval = POWER_SUPPLY_HEALTH_COLD;
+	else if (chip->batt_warm)
+		ret.intval = POWER_SUPPLY_HEALTH_WARM;
+	else if (chip->batt_cool)
+		ret.intval = POWER_SUPPLY_HEALTH_COOL;
+	else
+		ret.intval = POWER_SUPPLY_HEALTH_GOOD;
+
+	return ret.intval;
+}
+
+static int smb135x_enable_volatile_writes(struct smb135x_chg *chip)
+{
+	int rc;
+
+	rc = smb135x_masked_write(chip, CMD_I2C_REG,
+			ALLOW_VOLATILE_BIT, ALLOW_VOLATILE_BIT);
+	if (rc < 0)
+		dev_err(chip->dev,
+			"Couldn't set VOLATILE_W_PERM_BIT rc=%d\n", rc);
+
+	return rc;
+}
+
+static int usb_current_table_smb1356[] = {
+	180,
+	240,
+	270,
+	285,
+	300,
+	330,
+	360,
+	390,
+	420,
+	540,
+	570,
+	600,
+	660,
+	720,
+	840,
+	900,
+	960,
+	1080,
+	1110,
+	1128,
+	1146,
+	1170,
+	1182,
+	1200,
+	1230,
+	1260,
+	1380,
+	1440,
+	1560,
+	1620,
+	1680,
+	1800
+};
+
+static int fastchg_current_table[] = {
+	300,
+	400,
+	450,
+	475,
+	500,
+	550,
+	600,
+	650,
+	700,
+	900,
+	950,
+	1000,
+	1100,
+	1200,
+	1400,
+	2700,
+	1500,
+	1600,
+	1800,
+	1850,
+	1880,
+	1910,
+	2800,
+	1950,
+	1970,
+	2000,
+	2050,
+	2100,
+	2300,
+	2400,
+	2500,
+	3000
+};
+
+static int usb_current_table_smb1357_smb1358[] = {
+	300,
+	400,
+	450,
+	475,
+	500,
+	550,
+	600,
+	650,
+	700,
+	900,
+	950,
+	1000,
+	1100,
+	1200,
+	1400,
+	1450,
+	1500,
+	1600,
+	1800,
+	1850,
+	1880,
+	1910,
+	1930,
+	1950,
+	1970,
+	2000,
+	2050,
+	2100,
+	2300,
+	2400,
+	2500,
+	3000
+};
+
+static int usb_current_table_smb1359[] = {
+	300,
+	400,
+	450,
+	475,
+	500,
+	550,
+	600,
+	650,
+	700,
+	900,
+	950,
+	1000,
+	1100,
+	1200,
+	1400,
+	1450,
+	1500,
+	1600,
+	1800,
+	1850,
+	1880,
+	1910,
+	1930,
+	1950,
+	1970,
+	2000,
+	2050,
+	2100,
+	2300,
+	2400,
+	2500
+};
+
+static int dc_current_table_smb1356[] = {
+	180,
+	240,
+	270,
+	285,
+	300,
+	330,
+	360,
+	390,
+	420,
+	540,
+	570,
+	600,
+	660,
+	720,
+	840,
+	870,
+	900,
+	960,
+	1080,
+	1110,
+	1128,
+	1146,
+	1158,
+	1170,
+	1182,
+	1200,
+};
+
+static int dc_current_table[] = {
+	300,
+	400,
+	450,
+	475,
+	500,
+	550,
+	600,
+	650,
+	700,
+	900,
+	950,
+	1000,
+	1100,
+	1200,
+	1400,
+	1450,
+	1500,
+	1600,
+	1800,
+	1850,
+	1880,
+	1910,
+	1930,
+	1950,
+	1970,
+	2000,
+};
+
+#define CURRENT_100_MA		100
+#define CURRENT_150_MA		150
+#define CURRENT_500_MA		500
+#define CURRENT_900_MA		900
+#define SUSPEND_CURRENT_MA	2
+
+static int __smb135x_usb_suspend(struct smb135x_chg *chip, bool suspend)
+{
+	int rc;
+
+	rc = smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+			USB_SHUTDOWN_BIT, suspend ? USB_SHUTDOWN_BIT : 0);
+	if (rc < 0)
+		dev_err(chip->dev, "Couldn't set cfg 11 rc = %d\n", rc);
+	return rc;
+}
+
+static int __smb135x_dc_suspend(struct smb135x_chg *chip, bool suspend)
+{
+	int rc = 0;
+
+	rc = smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+			DC_SHUTDOWN_BIT, suspend ? DC_SHUTDOWN_BIT : 0);
+	if (rc < 0)
+		dev_err(chip->dev, "Couldn't set cfg 11 rc = %d\n", rc);
+	return rc;
+}
+
+static int smb135x_path_suspend(struct smb135x_chg *chip, enum path_type path,
+						int reason, bool suspend)
+{
+	int rc = 0;
+	int suspended;
+	int *path_suspended;
+	int (*func)(struct smb135x_chg *chip, bool suspend);
+
+	mutex_lock(&chip->path_suspend_lock);
+	if (path == USB) {
+		suspended = chip->usb_suspended;
+		path_suspended = &chip->usb_suspended;
+		func = __smb135x_usb_suspend;
+	} else {
+		suspended = chip->dc_suspended;
+		path_suspended = &chip->dc_suspended;
+		func = __smb135x_dc_suspend;
+	}
+
+	if (suspend == false)
+		suspended &= ~reason;
+	else
+		suspended |= reason;
+
+	if (*path_suspended && !suspended)
+		rc = func(chip, 0);
+	if (!(*path_suspended) && suspended)
+		rc = func(chip, 1);
+
+	if (rc)
+		dev_err(chip->dev, "Couldn't set/unset suspend for %s path rc = %d\n",
+					path == USB ? "usb" : "dc",
+					rc);
+	else
+		*path_suspended = suspended;
+
+	mutex_unlock(&chip->path_suspend_lock);
+	return rc;
+}
+
+static int smb135x_get_usb_chg_current(struct smb135x_chg *chip)
+{
+	if (chip->usb_suspended)
+		return SUSPEND_CURRENT_MA;
+	else
+		return chip->real_usb_psy_ma;
+}
+#define FCC_MASK			SMB135X_MASK(4, 0)
+#define CFG_1C_REG			0x1C
+static int smb135x_get_fastchg_current(struct smb135x_chg *chip)
+{
+	u8 reg;
+	int rc;
+
+	rc = smb135x_read(chip, CFG_1C_REG, &reg);
+	if (rc < 0) {
+		pr_debug("cannot read 1c rc = %d\n", rc);
+		return 0;
+	}
+	reg &= FCC_MASK;
+	if (reg < 0 || chip->fastchg_current_arr_size == 0
+			|| reg > chip->fastchg_current_table[
+				chip->fastchg_current_arr_size - 1]) {
+		dev_err(chip->dev, "Current table out of range\n");
+		return -EINVAL;
+	}
+	return chip->fastchg_current_table[reg];
+}
+
+static int smb135x_set_fastchg_current(struct smb135x_chg *chip,
+							int current_ma)
+{
+	int i, rc, diff, best, best_diff;
+	u8 reg;
+
+	/*
+	 * if there is no array loaded or if the smallest current limit is
+	 * above the requested current, then do nothing
+	 */
+	if (chip->fastchg_current_arr_size == 0) {
+		dev_err(chip->dev, "no table loaded\n");
+		return -EINVAL;
+	} else if ((current_ma - chip->fastchg_current_table[0]) < 0) {
+		dev_err(chip->dev, "invalid current requested\n");
+		return -EINVAL;
+	}
+
+	/* use the closest setting under the requested current */
+	best = 0;
+	best_diff = current_ma - chip->fastchg_current_table[best];
+
+	for (i = 1; i < chip->fastchg_current_arr_size; i++) {
+		diff = current_ma - chip->fastchg_current_table[i];
+		if (diff >= 0 && diff < best_diff) {
+			best_diff = diff;
+			best = i;
+		}
+	}
+	i = best;
+
+	reg = i & FCC_MASK;
+	rc = smb135x_masked_write(chip, CFG_1C_REG, FCC_MASK, reg);
+	if (rc < 0)
+		dev_err(chip->dev, "cannot write to config c rc = %d\n", rc);
+	pr_debug("fastchg current set to %dma\n",
+			chip->fastchg_current_table[i]);
+	return rc;
+}
+
+static int smb135x_set_high_usb_chg_current(struct smb135x_chg *chip,
+							int current_ma)
+{
+	int i, rc;
+	u8 usb_cur_val;
+
+	for (i = chip->usb_current_arr_size - 1; i >= 0; i--) {
+		if (current_ma >= chip->usb_current_table[i])
+			break;
+	}
+	if (i < 0) {
+		dev_err(chip->dev,
+			"Cannot find %dma current_table using %d\n",
+			current_ma, CURRENT_150_MA);
+		rc = smb135x_masked_write(chip, CFG_5_REG,
+						USB_2_3_BIT, USB_2_3_BIT);
+		rc |= smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+				USB_100_500_AC_MASK, USB_100_VAL);
+		if (rc < 0)
+			dev_err(chip->dev, "Couldn't set %dmA rc=%d\n",
+					CURRENT_150_MA, rc);
+		else
+			chip->real_usb_psy_ma = CURRENT_150_MA;
+		return rc;
+	}
+
+	usb_cur_val = i & USBIN_INPUT_MASK;
+	rc = smb135x_masked_write(chip, CFG_C_REG,
+				USBIN_INPUT_MASK, usb_cur_val);
+	if (rc < 0) {
+		dev_err(chip->dev, "cannot write to config c rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+					USB_100_500_AC_MASK, USB_AC_VAL);
+	if (rc < 0)
+		dev_err(chip->dev, "Couldn't write cfg 5 rc = %d\n", rc);
+	else
+		chip->real_usb_psy_ma = chip->usb_current_table[i];
+	return rc;
+}
+
+#define MAX_VERSION			0xF
+#define USB_100_PROBLEM_VERSION		0x2
+/* if APSD results are used
+ *	if SDP is detected it will look at 500mA setting
+ *		if set it will draw 500mA
+ *		if unset it will draw 100mA
+ *	if CDP/DCP it will look at 0x0C setting
+ *		i.e. values in 0x41[1, 0] does not matter
+ */
+static int smb135x_set_usb_chg_current(struct smb135x_chg *chip,
+							int current_ma)
+{
+	int rc;
+
+	pr_debug("USB current_ma = %d\n", current_ma);
+
+	if (chip->workaround_flags & WRKARND_USB100_BIT) {
+		pr_info("USB requested = %dmA using %dmA\n", current_ma,
+						CURRENT_500_MA);
+		current_ma = CURRENT_500_MA;
+	}
+
+	if (current_ma == 0)
+		/* choose the lowest available value of 100mA */
+		current_ma = CURRENT_100_MA;
+
+	if (current_ma == SUSPEND_CURRENT_MA) {
+		/* force suspend bit */
+		rc = smb135x_path_suspend(chip, USB, CURRENT, true);
+		chip->real_usb_psy_ma = SUSPEND_CURRENT_MA;
+		goto out;
+	}
+	if (current_ma < CURRENT_150_MA) {
+		/* force 100mA */
+		rc = smb135x_masked_write(chip, CFG_5_REG, USB_2_3_BIT, 0);
+		rc |= smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+				USB_100_500_AC_MASK, USB_100_VAL);
+		rc |= smb135x_path_suspend(chip, USB, CURRENT, false);
+		chip->real_usb_psy_ma = CURRENT_100_MA;
+		goto out;
+	}
+	/* specific current values */
+	if (current_ma == CURRENT_150_MA) {
+		rc = smb135x_masked_write(chip, CFG_5_REG,
+						USB_2_3_BIT, USB_2_3_BIT);
+		rc |= smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+				USB_100_500_AC_MASK, USB_100_VAL);
+		rc |= smb135x_path_suspend(chip, USB, CURRENT, false);
+		chip->real_usb_psy_ma = CURRENT_150_MA;
+		goto out;
+	}
+	if (current_ma == CURRENT_500_MA) {
+		rc = smb135x_masked_write(chip, CFG_5_REG, USB_2_3_BIT, 0);
+		rc |= smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+				USB_100_500_AC_MASK, USB_500_VAL);
+		rc |= smb135x_path_suspend(chip, USB, CURRENT, false);
+		chip->real_usb_psy_ma = CURRENT_500_MA;
+		goto out;
+	}
+	if (current_ma == CURRENT_900_MA) {
+		rc = smb135x_masked_write(chip, CFG_5_REG,
+						USB_2_3_BIT, USB_2_3_BIT);
+		rc |= smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+				USB_100_500_AC_MASK, USB_500_VAL);
+		rc |= smb135x_path_suspend(chip, USB, CURRENT, false);
+		chip->real_usb_psy_ma = CURRENT_900_MA;
+		goto out;
+	}
+
+	rc = smb135x_set_high_usb_chg_current(chip, current_ma);
+	rc |= smb135x_path_suspend(chip, USB, CURRENT, false);
+out:
+	if (rc < 0)
+		dev_err(chip->dev,
+			"Couldn't set %dmA rc = %d\n", current_ma, rc);
+	return rc;
+}
+
+static int smb135x_set_dc_chg_current(struct smb135x_chg *chip,
+							int current_ma)
+{
+	int i, rc;
+	u8 dc_cur_val;
+
+	for (i = chip->dc_current_arr_size - 1; i >= 0; i--) {
+		if (chip->dc_psy_ma >= chip->dc_current_table[i])
+			break;
+	}
+	dc_cur_val = i & DCIN_INPUT_MASK;
+	rc = smb135x_masked_write(chip, CFG_A_REG,
+				DCIN_INPUT_MASK, dc_cur_val);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't set dc charge current rc = %d\n",
+				rc);
+		return rc;
+	}
+	return 0;
+}
+
+static int smb135x_set_appropriate_current(struct smb135x_chg *chip,
+						enum path_type path)
+{
+	int therm_ma, current_ma;
+	int path_current = (path == USB) ? chip->usb_psy_ma : chip->dc_psy_ma;
+	int (*func)(struct smb135x_chg *chip, int current_ma);
+	int rc = 0;
+
+	if (!chip->usb_psy && path == USB)
+		return 0;
+
+	/*
+	 * If battery is absent do not modify the current at all, these
+	 * would be some appropriate values set by the bootloader or default
+	 * configuration and since it is the only source of power we should
+	 * not change it
+	 */
+	if (!chip->batt_present) {
+		pr_debug("ignoring current request since battery is absent\n");
+		return 0;
+	}
+
+	if (path == USB) {
+		path_current = chip->usb_psy_ma;
+		func = smb135x_set_usb_chg_current;
+	} else {
+		path_current = chip->dc_psy_ma;
+		func = smb135x_set_dc_chg_current;
+		if (chip->dc_psy_type == -EINVAL)
+			func = NULL;
+	}
+
+	if (chip->therm_lvl_sel > 0
+			&& chip->therm_lvl_sel < (chip->thermal_levels - 1))
+		/*
+		 * consider thermal limit only when it is active and not at
+		 * the highest level
+		 */
+		therm_ma = chip->thermal_mitigation[chip->therm_lvl_sel];
+	else
+		therm_ma = path_current;
+
+	current_ma = min(therm_ma, path_current);
+	if (func != NULL)
+		rc = func(chip, current_ma);
+	if (rc < 0)
+		dev_err(chip->dev, "Couldn't set %s current to min(%d, %d)rc = %d\n",
+				path == USB ? "usb" : "dc",
+				therm_ma, path_current,
+				rc);
+	return rc;
+}
+
+static int smb135x_charging_enable(struct smb135x_chg *chip, int enable)
+{
+	int rc;
+
+	rc = smb135x_masked_write(chip, CMD_CHG_REG,
+				CMD_CHG_EN, enable ? CMD_CHG_EN : 0);
+	if (rc < 0) {
+		dev_err(chip->dev,
+			"Couldn't set CHG_ENABLE_BIT enable = %d rc = %d\n",
+			enable, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int __smb135x_charging(struct smb135x_chg *chip, int enable)
+{
+	int rc = 0;
+
+	pr_debug("charging enable = %d\n", enable);
+
+	if (chip->chg_disabled_permanently) {
+		pr_debug("charging is disabled permanetly\n");
+		return -EINVAL;
+	}
+
+	rc = smb135x_charging_enable(chip, enable);
+	if (rc < 0) {
+		dev_err(chip->dev,
+			"Couldn't %s charging  rc = %d\n",
+			enable ? "enable" : "disable", rc);
+		return rc;
+	}
+	chip->chg_enabled = enable;
+
+	/* set the suspended status */
+	rc = smb135x_path_suspend(chip, DC, USER, !enable);
+	if (rc < 0) {
+		dev_err(chip->dev,
+			"Couldn't set dc suspend to %d rc = %d\n",
+			enable, rc);
+		return rc;
+	}
+	rc = smb135x_path_suspend(chip, USB, USER, !enable);
+	if (rc < 0) {
+		dev_err(chip->dev,
+			"Couldn't set usb suspend to %d rc = %d\n",
+			enable, rc);
+		return rc;
+	}
+
+	pr_debug("charging %s\n",
+			enable ?  "enabled" : "disabled running from batt");
+	return rc;
+}
+
+static int smb135x_charging(struct smb135x_chg *chip, int enable)
+{
+	int rc = 0;
+
+	pr_debug("charging enable = %d\n", enable);
+
+	__smb135x_charging(chip, enable);
+
+	if (chip->usb_psy) {
+		pr_debug("usb psy changed\n");
+		power_supply_changed(chip->usb_psy);
+	}
+	if (chip->dc_psy_type != -EINVAL) {
+		pr_debug("dc psy changed\n");
+		power_supply_changed(chip->dc_psy);
+	}
+	pr_debug("charging %s\n",
+			enable ?  "enabled" : "disabled running from batt");
+	return rc;
+}
+
+static int smb135x_system_temp_level_set(struct smb135x_chg *chip,
+								int lvl_sel)
+{
+	int rc = 0;
+	int prev_therm_lvl;
+
+	if (!chip->thermal_mitigation) {
+		pr_err("Thermal mitigation not supported\n");
+		return -EINVAL;
+	}
+
+	if (lvl_sel < 0) {
+		pr_err("Unsupported level selected %d\n", lvl_sel);
+		return -EINVAL;
+	}
+
+	if (lvl_sel >= chip->thermal_levels) {
+		pr_err("Unsupported level selected %d forcing %d\n", lvl_sel,
+				chip->thermal_levels - 1);
+		lvl_sel = chip->thermal_levels - 1;
+	}
+
+	if (lvl_sel == chip->therm_lvl_sel)
+		return 0;
+
+	mutex_lock(&chip->current_change_lock);
+	prev_therm_lvl = chip->therm_lvl_sel;
+	chip->therm_lvl_sel = lvl_sel;
+	if (chip->therm_lvl_sel == (chip->thermal_levels - 1)) {
+		/*
+		 * Disable charging if highest value selected by
+		 * setting the DC and USB path in suspend
+		 */
+		rc = smb135x_path_suspend(chip, DC, THERMAL, true);
+		if (rc < 0) {
+			dev_err(chip->dev,
+				"Couldn't set dc suspend rc %d\n", rc);
+			goto out;
+		}
+		rc = smb135x_path_suspend(chip, USB, THERMAL, true);
+		if (rc < 0) {
+			dev_err(chip->dev,
+				"Couldn't set usb suspend rc %d\n", rc);
+			goto out;
+		}
+		goto out;
+	}
+
+	smb135x_set_appropriate_current(chip, USB);
+	smb135x_set_appropriate_current(chip, DC);
+
+	if (prev_therm_lvl == chip->thermal_levels - 1) {
+		/*
+		 * If previously highest value was selected charging must have
+		 * been disabed. Enable charging by taking the DC and USB path
+		 * out of suspend.
+		 */
+		rc = smb135x_path_suspend(chip, DC, THERMAL, false);
+		if (rc < 0) {
+			dev_err(chip->dev,
+				"Couldn't set dc suspend rc %d\n", rc);
+			goto out;
+		}
+		rc = smb135x_path_suspend(chip, USB, THERMAL, false);
+		if (rc < 0) {
+			dev_err(chip->dev,
+				"Couldn't set usb suspend rc %d\n", rc);
+			goto out;
+		}
+	}
+out:
+	mutex_unlock(&chip->current_change_lock);
+	return rc;
+}
+
+static int smb135x_battery_set_property(struct power_supply *psy,
+				       enum power_supply_property prop,
+				       const union power_supply_propval *val)
+{
+	int rc = 0, update_psy = 0;
+	struct smb135x_chg *chip = power_supply_get_drvdata(psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_STATUS:
+		if (!chip->bms_controlled_charging) {
+			rc = -EINVAL;
+			break;
+		}
+		switch (val->intval) {
+		case POWER_SUPPLY_STATUS_FULL:
+			rc = smb135x_charging_enable(chip, false);
+			if (rc < 0) {
+				dev_err(chip->dev, "Couldn't disable charging rc = %d\n",
+						rc);
+			} else {
+				chip->chg_done_batt_full = true;
+				update_psy = 1;
+				dev_dbg(chip->dev, "status = FULL chg_done_batt_full = %d",
+						chip->chg_done_batt_full);
+			}
+			break;
+		case POWER_SUPPLY_STATUS_DISCHARGING:
+			chip->chg_done_batt_full = false;
+			update_psy = 1;
+			dev_dbg(chip->dev, "status = DISCHARGING chg_done_batt_full = %d",
+					chip->chg_done_batt_full);
+			break;
+		case POWER_SUPPLY_STATUS_CHARGING:
+			rc = smb135x_charging_enable(chip, true);
+			if (rc < 0) {
+				dev_err(chip->dev, "Couldn't enable charging rc = %d\n",
+						rc);
+			} else {
+				chip->chg_done_batt_full = false;
+				dev_dbg(chip->dev, "status = CHARGING chg_done_batt_full = %d",
+						chip->chg_done_batt_full);
+			}
+			break;
+		default:
+			update_psy = 0;
+			rc = -EINVAL;
+		}
+		break;
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		smb135x_charging(chip, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		chip->fake_battery_soc = val->intval;
+		update_psy = 1;
+		break;
+	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+		smb135x_system_temp_level_set(chip, val->intval);
+		break;
+	default:
+		rc = -EINVAL;
+	}
+
+	if (!rc && update_psy)
+		power_supply_changed(chip->batt_psy);
+	return rc;
+}
+
+static int smb135x_battery_is_writeable(struct power_supply *psy,
+				       enum power_supply_property prop)
+{
+	int rc;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+	case POWER_SUPPLY_PROP_CAPACITY:
+	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+		rc = 1;
+		break;
+	default:
+		rc = 0;
+		break;
+	}
+	return rc;
+}
+
+static int smb135x_battery_get_property(struct power_supply *psy,
+				       enum power_supply_property prop,
+				       union power_supply_propval *val)
+{
+	struct smb135x_chg *chip = power_supply_get_drvdata(psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_STATUS:
+		val->intval = smb135x_get_prop_batt_status(chip);
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		val->intval = smb135x_get_prop_batt_present(chip);
+		break;
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		val->intval = chip->chg_enabled;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		val->intval = smb135x_get_prop_charge_type(chip);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		val->intval = smb135x_get_prop_batt_capacity(chip);
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		val->intval = smb135x_get_prop_batt_health(chip);
+		break;
+	case POWER_SUPPLY_PROP_TECHNOLOGY:
+		val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+		break;
+	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+		val->intval = chip->therm_lvl_sel;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static enum power_supply_property smb135x_dc_properties[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_HEALTH,
+};
+
+static int smb135x_dc_get_property(struct power_supply *psy,
+				       enum power_supply_property prop,
+				       union power_supply_propval *val)
+{
+	struct smb135x_chg *chip = power_supply_get_drvdata(psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_PRESENT:
+		val->intval = chip->dc_present;
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		val->intval = chip->chg_enabled ? chip->dc_present : 0;
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		val->intval = chip->dc_present;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#define MIN_FLOAT_MV	3600
+#define MAX_FLOAT_MV	4500
+
+#define MID_RANGE_FLOAT_MV_MIN		3600
+#define MID_RANGE_FLOAT_MIN_VAL		0x05
+#define MID_RANGE_FLOAT_STEP_MV		20
+
+#define HIGH_RANGE_FLOAT_MIN_MV		4340
+#define HIGH_RANGE_FLOAT_MIN_VAL	0x2A
+#define HIGH_RANGE_FLOAT_STEP_MV	10
+
+#define VHIGH_RANGE_FLOAT_MIN_MV	4400
+#define VHIGH_RANGE_FLOAT_MIN_VAL	0x2E
+#define VHIGH_RANGE_FLOAT_STEP_MV	20
+static int smb135x_float_voltage_set(struct smb135x_chg *chip, int vfloat_mv)
+{
+	u8 temp;
+
+	if ((vfloat_mv < MIN_FLOAT_MV) || (vfloat_mv > MAX_FLOAT_MV)) {
+		dev_err(chip->dev, "bad float voltage mv =%d asked to set\n",
+					vfloat_mv);
+		return -EINVAL;
+	}
+
+	if (vfloat_mv <= HIGH_RANGE_FLOAT_MIN_MV) {
+		/* mid range */
+		temp = MID_RANGE_FLOAT_MIN_VAL
+			+ (vfloat_mv - MID_RANGE_FLOAT_MV_MIN)
+				/ MID_RANGE_FLOAT_STEP_MV;
+	} else if (vfloat_mv < VHIGH_RANGE_FLOAT_MIN_MV) {
+		/* high range */
+		temp = HIGH_RANGE_FLOAT_MIN_VAL
+			+ (vfloat_mv - HIGH_RANGE_FLOAT_MIN_MV)
+				/ HIGH_RANGE_FLOAT_STEP_MV;
+	} else {
+		/* very high range */
+		temp = VHIGH_RANGE_FLOAT_MIN_VAL
+			+ (vfloat_mv - VHIGH_RANGE_FLOAT_MIN_MV)
+				/ VHIGH_RANGE_FLOAT_STEP_MV;
+	}
+
+	return smb135x_write(chip, VFLOAT_REG, temp);
+}
+
+static int smb135x_set_resume_threshold(struct smb135x_chg *chip,
+		int resume_delta_mv)
+{
+	int rc;
+	u8 reg;
+
+	if (!chip->inhibit_disabled) {
+		if (resume_delta_mv < 100)
+			reg = CHG_INHIBIT_50MV_VAL;
+		else if (resume_delta_mv < 200)
+			reg = CHG_INHIBIT_100MV_VAL;
+		else if (resume_delta_mv < 300)
+			reg = CHG_INHIBIT_200MV_VAL;
+		else
+			reg = CHG_INHIBIT_300MV_VAL;
+
+		rc = smb135x_masked_write(chip, CFG_4_REG, CHG_INHIBIT_MASK,
+						reg);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't set inhibit val rc = %d\n",
+						rc);
+			return rc;
+		}
+	}
+
+	if (resume_delta_mv < 200)
+		reg = 0;
+	else
+		reg = RECHARGE_200MV_BIT;
+
+	rc = smb135x_masked_write(chip, CFG_5_REG, RECHARGE_200MV_BIT, reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't set recharge  rc = %d\n", rc);
+		return rc;
+	}
+	return 0;
+}
+
+static enum power_supply_property smb135x_parallel_properties[] = {
+	POWER_SUPPLY_PROP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+};
+
+static bool smb135x_is_input_current_limited(struct smb135x_chg *chip)
+{
+	int rc;
+	u8 reg;
+
+	rc = smb135x_read(chip, STATUS_2_REG, &reg);
+	if (rc) {
+		pr_debug("Couldn't read _REG for ICL status rc = %d\n", rc);
+		return false;
+	}
+
+	return !!(reg & HARD_LIMIT_STS_BIT);
+}
+
+static int smb135x_parallel_set_chg_present(struct smb135x_chg *chip,
+						int present)
+{
+	u8 val;
+	int rc;
+
+	if (present == chip->parallel_charger_present) {
+		pr_debug("present %d -> %d, skipping\n",
+				chip->parallel_charger_present, present);
+		return 0;
+	}
+
+	if (present) {
+		/* Check if SMB135x is present */
+		rc = smb135x_read(chip, VERSION1_REG, &val);
+		if (rc) {
+			pr_debug("Failed to detect smb135x-parallel charger may be absent\n");
+			return -ENODEV;
+		}
+
+		rc = smb135x_enable_volatile_writes(chip);
+		if (rc < 0) {
+			dev_err(chip->dev,
+				"Couldn't configure for volatile rc = %d\n",
+				rc);
+			return rc;
+		}
+
+		/* set the float voltage */
+		if (chip->vfloat_mv != -EINVAL) {
+			rc = smb135x_float_voltage_set(chip, chip->vfloat_mv);
+			if (rc < 0) {
+				dev_err(chip->dev,
+					"Couldn't set float voltage rc = %d\n",
+					rc);
+				return rc;
+			}
+		}
+
+		/* resume threshold */
+		if (chip->resume_delta_mv != -EINVAL) {
+			smb135x_set_resume_threshold(chip,
+					chip->resume_delta_mv);
+		}
+
+		rc = smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+					USE_REGISTER_FOR_CURRENT,
+					USE_REGISTER_FOR_CURRENT);
+		if (rc < 0) {
+			dev_err(chip->dev,
+				"Couldn't set input limit cmd rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		/* set chg en by pin active low and enable auto recharge */
+		rc = smb135x_masked_write(chip, CFG_14_REG,
+				CHG_EN_BY_PIN_BIT | CHG_EN_ACTIVE_LOW_BIT
+				| DISABLE_AUTO_RECHARGE_BIT,
+				CHG_EN_BY_PIN_BIT |
+				chip->parallel_pin_polarity_setting);
+
+		/* set bit 0 = 100mA bit 1 = 500mA and set register control */
+		rc = smb135x_masked_write(chip, CFG_E_REG,
+				POLARITY_100_500_BIT | USB_CTRL_BY_PIN_BIT,
+				POLARITY_100_500_BIT);
+		if (rc < 0) {
+			dev_err(chip->dev,
+				"Couldn't set usbin cfg rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		/* control USB suspend via command bits */
+		rc = smb135x_masked_write(chip, USBIN_DCIN_CFG_REG,
+			USBIN_SUSPEND_VIA_COMMAND_BIT,
+			USBIN_SUSPEND_VIA_COMMAND_BIT);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't set cfg rc=%d\n", rc);
+			return rc;
+		}
+
+		/* set the fastchg_current to the lowest setting */
+		if (chip->fastchg_current_arr_size > 0)
+			rc = smb135x_set_fastchg_current(chip,
+					chip->fastchg_current_table[0]);
+
+		/*
+		 * enforce chip->chg_enabled since this could be the first
+		 * time we have i2c access to the charger after
+		 * chip->chg_enabled has been modified
+		 */
+		smb135x_charging(chip, chip->chg_enabled);
+	}
+
+	chip->parallel_charger_present = present;
+	/*
+	 * When present is being set force USB suspend, start charging
+	 * only when CURRENT_MAX is set.
+	 *
+	 * Usually the chip will be shutdown (no i2c access to the chip)
+	 * when USB is removed, however there could be situations when
+	 * it is not.  To cover for USB reinsetions in such situations
+	 * force USB suspend when present is being unset.
+	 * It is likely that i2c access could fail here - do not return error.
+	 * (It is not possible to detect whether the chip is in shutdown state
+	 * or not except for the i2c error).
+	 */
+	chip->usb_psy_ma = SUSPEND_CURRENT_MA;
+	rc = smb135x_path_suspend(chip, USB, CURRENT, true);
+
+	if (present) {
+		if (rc) {
+			dev_err(chip->dev,
+				"Couldn't set usb suspend to true rc = %d\n",
+				rc);
+			return rc;
+		}
+		/* Check if the USB is configured for suspend. If not, do it */
+		mutex_lock(&chip->path_suspend_lock);
+		rc = smb135x_read(chip, CMD_INPUT_LIMIT, &val);
+		if (rc) {
+			dev_err(chip->dev,
+				"Couldn't read 0x%02x rc:%d\n", CMD_INPUT_LIMIT,
+				rc);
+			mutex_unlock(&chip->path_suspend_lock);
+			return rc;
+		} else if (!(val & BIT(6))) {
+			rc = __smb135x_usb_suspend(chip, 1);
+		}
+		mutex_unlock(&chip->path_suspend_lock);
+		if (rc) {
+			dev_err(chip->dev,
+				"Couldn't set usb to suspend rc:%d\n", rc);
+			return rc;
+		}
+	} else {
+		chip->real_usb_psy_ma = SUSPEND_CURRENT_MA;
+	}
+	return 0;
+}
+
+static int smb135x_parallel_set_property(struct power_supply *psy,
+				       enum power_supply_property prop,
+				       const union power_supply_propval *val)
+{
+	int rc = 0;
+	struct smb135x_chg *chip = power_supply_get_drvdata(psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		if (chip->parallel_charger_present)
+			smb135x_charging(chip, val->intval);
+		else
+			chip->chg_enabled = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		rc = smb135x_parallel_set_chg_present(chip, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		if (chip->parallel_charger_present) {
+			rc = smb135x_set_fastchg_current(chip,
+						val->intval / 1000);
+		}
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		if (chip->parallel_charger_present) {
+			chip->usb_psy_ma = val->intval / 1000;
+			rc = smb135x_set_usb_chg_current(chip,
+							chip->usb_psy_ma);
+		}
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		if (chip->parallel_charger_present &&
+			(chip->vfloat_mv != val->intval)) {
+			rc = smb135x_float_voltage_set(chip, val->intval);
+			if (!rc)
+				chip->vfloat_mv = val->intval;
+		} else {
+			chip->vfloat_mv = val->intval;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+	return rc;
+}
+
+static int smb135x_parallel_is_writeable(struct power_supply *psy,
+				       enum power_supply_property prop)
+{
+	int rc;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		rc = 1;
+		break;
+	default:
+		rc = 0;
+		break;
+	}
+	return rc;
+}
+static int smb135x_parallel_get_property(struct power_supply *psy,
+				       enum power_supply_property prop,
+				       union power_supply_propval *val)
+{
+	struct smb135x_chg *chip = power_supply_get_drvdata(psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		val->intval = chip->chg_enabled;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		if (chip->parallel_charger_present)
+			val->intval = smb135x_get_usb_chg_current(chip) * 1000;
+		else
+			val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		val->intval = chip->vfloat_mv;
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		val->intval = chip->parallel_charger_present;
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		if (chip->parallel_charger_present)
+			val->intval = smb135x_get_fastchg_current(chip) * 1000;
+		else
+			val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_STATUS:
+		if (chip->parallel_charger_present)
+			val->intval = smb135x_get_prop_batt_status(chip);
+		else
+			val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+		if (chip->parallel_charger_present)
+			val->intval = smb135x_is_input_current_limited(chip);
+		else
+			val->intval = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void smb135x_external_power_changed(struct power_supply *psy)
+{
+	struct smb135x_chg *chip = power_supply_get_drvdata(psy);
+	union power_supply_propval prop = {0,};
+	int rc, current_limit = 0;
+
+	if (!chip->usb_psy)
+		return;
+
+	if (chip->bms_psy_name)
+		chip->bms_psy =
+			power_supply_get_by_name((char *)chip->bms_psy_name);
+
+	rc = power_supply_get_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_CURRENT_MAX, &prop);
+	if (rc < 0)
+		dev_err(chip->dev,
+			"could not read USB current_max property, rc=%d\n", rc);
+	else
+		current_limit = prop.intval / 1000;
+
+	pr_debug("current_limit = %d\n", current_limit);
+
+	if (chip->usb_psy_ma != current_limit) {
+		mutex_lock(&chip->current_change_lock);
+		chip->usb_psy_ma = current_limit;
+		rc = smb135x_set_appropriate_current(chip, USB);
+		mutex_unlock(&chip->current_change_lock);
+		if (rc < 0)
+			dev_err(chip->dev, "Couldn't set usb current rc = %d\n",
+					rc);
+	}
+
+	rc = power_supply_get_property(chip->usb_psy,
+			POWER_SUPPLY_PROP_ONLINE, &prop);
+	if (rc < 0)
+		dev_err(chip->dev,
+			"could not read USB ONLINE property, rc=%d\n", rc);
+
+	/* update online property */
+	rc = 0;
+	if (chip->usb_present && chip->chg_enabled && chip->usb_psy_ma != 0) {
+		if (prop.intval == 0) {
+			prop.intval = 1;
+			rc = power_supply_set_property(chip->usb_psy,
+					POWER_SUPPLY_PROP_ONLINE, &prop);
+		}
+	} else {
+		if (prop.intval == 1) {
+			prop.intval = 0;
+			power_supply_set_property(chip->usb_psy,
+					POWER_SUPPLY_PROP_ONLINE, &prop);
+		}
+	}
+	if (rc < 0)
+		dev_err(chip->dev, "could not set usb online, rc=%d\n", rc);
+}
+
+static bool elapsed_msec_greater(struct timeval *start_time,
+				struct timeval *end_time, int ms)
+{
+	int msec_elapsed;
+
+	msec_elapsed = (end_time->tv_sec - start_time->tv_sec) * 1000 +
+		DIV_ROUND_UP(end_time->tv_usec - start_time->tv_usec, 1000);
+
+	return (msec_elapsed > ms);
+}
+
+#define MAX_STEP_MS		10
+static int smb135x_chg_otg_enable(struct smb135x_chg *chip)
+{
+	int rc = 0;
+	int restart_count = 0;
+	struct timeval time_a, time_b, time_c, time_d;
+	u8 reg;
+
+	if (chip->revision == REV_2) {
+		/*
+		 * Workaround for a hardware bug where the OTG needs to be
+		 * enabled disabled and enabled for it to be actually enabled.
+		 * The time between each step should be atmost MAX_STEP_MS
+		 *
+		 * Note that if enable-disable executes within the timeframe
+		 * but the final enable takes more than MAX_STEP_ME, we treat
+		 * it as the first enable and try disabling again. We don't
+		 * want to issue enable back to back.
+		 *
+		 * Notice the instances when time is captured and the
+		 * successive steps.
+		 * timeA-enable-timeC-disable-timeB-enable-timeD.
+		 * When
+		 * (timeB - timeA) < MAX_STEP_MS AND
+		 *			(timeC - timeD) < MAX_STEP_MS
+		 * then it is guaranteed that the successive steps
+		 * must have executed within MAX_STEP_MS
+		 */
+		do_gettimeofday(&time_a);
+restart_from_enable:
+		/* first step - enable otg */
+		rc = smb135x_masked_write(chip, CMD_CHG_REG, OTG_EN, OTG_EN);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't enable OTG mode rc=%d\n",
+					rc);
+			return rc;
+		}
+
+restart_from_disable:
+		/* second step - disable otg */
+		do_gettimeofday(&time_c);
+		rc = smb135x_masked_write(chip, CMD_CHG_REG, OTG_EN, 0);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't enable OTG mode rc=%d\n",
+					rc);
+			return rc;
+		}
+		do_gettimeofday(&time_b);
+
+		if (elapsed_msec_greater(&time_a, &time_b, MAX_STEP_MS)) {
+			restart_count++;
+			if (restart_count > 10) {
+				dev_err(chip->dev,
+						"Couldn't enable OTG restart_count=%d\n",
+						restart_count);
+				return -EAGAIN;
+			}
+			time_a = time_b;
+			pr_debug("restarting from first enable\n");
+			goto restart_from_enable;
+		}
+
+		/* third step (first step in case of a failure) - enable otg */
+		time_a = time_b;
+		rc = smb135x_masked_write(chip, CMD_CHG_REG, OTG_EN, OTG_EN);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't enable OTG mode rc=%d\n",
+					rc);
+			return rc;
+		}
+		do_gettimeofday(&time_d);
+
+		if (elapsed_msec_greater(&time_c, &time_d, MAX_STEP_MS)) {
+			restart_count++;
+			if (restart_count > 10) {
+				dev_err(chip->dev,
+						"Couldn't enable OTG restart_count=%d\n",
+						restart_count);
+				return -EAGAIN;
+			}
+			pr_debug("restarting from disable\n");
+			goto restart_from_disable;
+		}
+	} else {
+		rc = smb135x_read(chip, CMD_CHG_REG, &reg);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't read cmd reg rc=%d\n",
+					rc);
+			return rc;
+		}
+		if (reg & OTG_EN) {
+			/* if it is set, disable it before re-enabling it */
+			rc = smb135x_masked_write(chip, CMD_CHG_REG, OTG_EN, 0);
+			if (rc < 0) {
+				dev_err(chip->dev, "Couldn't disable OTG mode rc=%d\n",
+						rc);
+				return rc;
+			}
+		}
+		rc = smb135x_masked_write(chip, CMD_CHG_REG, OTG_EN, OTG_EN);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't enable OTG mode rc=%d\n",
+					rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int smb135x_chg_otg_regulator_enable(struct regulator_dev *rdev)
+{
+	int rc = 0;
+	struct smb135x_chg *chip = rdev_get_drvdata(rdev);
+
+	chip->otg_oc_count = 0;
+	rc = smb135x_chg_otg_enable(chip);
+	if (rc)
+		dev_err(chip->dev, "Couldn't enable otg regulator rc=%d\n", rc);
+
+	return rc;
+}
+
+static int smb135x_chg_otg_regulator_disable(struct regulator_dev *rdev)
+{
+	int rc = 0;
+	struct smb135x_chg *chip = rdev_get_drvdata(rdev);
+
+	mutex_lock(&chip->otg_oc_count_lock);
+	cancel_delayed_work_sync(&chip->reset_otg_oc_count_work);
+	mutex_unlock(&chip->otg_oc_count_lock);
+	rc = smb135x_masked_write(chip, CMD_CHG_REG, OTG_EN, 0);
+	if (rc < 0)
+		dev_err(chip->dev, "Couldn't disable OTG mode rc=%d\n", rc);
+	return rc;
+}
+
+static int smb135x_chg_otg_regulator_is_enable(struct regulator_dev *rdev)
+{
+	int rc = 0;
+	u8 reg = 0;
+	struct smb135x_chg *chip = rdev_get_drvdata(rdev);
+
+	rc = smb135x_read(chip, CMD_CHG_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev,
+				"Couldn't read OTG enable bit rc=%d\n", rc);
+		return rc;
+	}
+
+	return  (reg & OTG_EN) ? 1 : 0;
+}
+
+struct regulator_ops smb135x_chg_otg_reg_ops = {
+	.enable		= smb135x_chg_otg_regulator_enable,
+	.disable	= smb135x_chg_otg_regulator_disable,
+	.is_enabled	= smb135x_chg_otg_regulator_is_enable,
+};
+
+static int smb135x_set_current_tables(struct smb135x_chg *chip)
+{
+	switch (chip->version) {
+	case V_SMB1356:
+		chip->usb_current_table = usb_current_table_smb1356;
+		chip->usb_current_arr_size
+			= ARRAY_SIZE(usb_current_table_smb1356);
+		chip->dc_current_table = dc_current_table_smb1356;
+		chip->dc_current_arr_size
+			= ARRAY_SIZE(dc_current_table_smb1356);
+		chip->fastchg_current_table = NULL;
+		chip->fastchg_current_arr_size = 0;
+		break;
+	case V_SMB1357:
+		chip->usb_current_table = usb_current_table_smb1357_smb1358;
+		chip->usb_current_arr_size
+			= ARRAY_SIZE(usb_current_table_smb1357_smb1358);
+		chip->dc_current_table = dc_current_table;
+		chip->dc_current_arr_size = ARRAY_SIZE(dc_current_table);
+		chip->fastchg_current_table = fastchg_current_table;
+		chip->fastchg_current_arr_size
+			= ARRAY_SIZE(fastchg_current_table);
+		break;
+	case V_SMB1358:
+		chip->usb_current_table = usb_current_table_smb1357_smb1358;
+		chip->usb_current_arr_size
+			= ARRAY_SIZE(usb_current_table_smb1357_smb1358);
+		chip->dc_current_table = dc_current_table;
+		chip->dc_current_arr_size = ARRAY_SIZE(dc_current_table);
+		chip->fastchg_current_table = fastchg_current_table;
+		chip->fastchg_current_arr_size
+			= ARRAY_SIZE(fastchg_current_table);
+		break;
+	case V_SMB1359:
+		chip->usb_current_table = usb_current_table_smb1359;
+		chip->usb_current_arr_size
+			= ARRAY_SIZE(usb_current_table_smb1359);
+		chip->dc_current_table = dc_current_table;
+		chip->dc_current_arr_size = ARRAY_SIZE(dc_current_table);
+		chip->fastchg_current_table = NULL;
+		chip->fastchg_current_arr_size = 0;
+		break;
+	}
+	return 0;
+}
+
+#define SMB1356_VERSION3_BIT	BIT(7)
+#define SMB1357_VERSION1_VAL	0x01
+#define SMB1358_VERSION1_VAL	0x02
+#define SMB1359_VERSION1_VAL	0x00
+#define SMB1357_VERSION2_VAL	0x01
+#define SMB1358_VERSION2_VAL	0x02
+#define SMB1359_VERSION2_VAL	0x00
+static int smb135x_chip_version_and_revision(struct smb135x_chg *chip)
+{
+	int rc;
+	u8 version1, version2, version3;
+
+	/* read the revision */
+	rc = read_revision(chip, &chip->revision);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read revision rc = %d\n", rc);
+		return rc;
+	}
+
+	if (chip->revision >= REV_MAX || revision_str[chip->revision] == NULL) {
+		dev_err(chip->dev, "Bad revision found = %d\n", chip->revision);
+		return -EINVAL;
+	}
+
+	/* check if it is smb1356 */
+	rc = read_version3(chip, &version3);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read version3 rc = %d\n", rc);
+		return rc;
+	}
+
+	if (version3 & SMB1356_VERSION3_BIT) {
+		chip->version = V_SMB1356;
+		goto wrkarnd_and_input_current_values;
+	}
+
+	/* check if it is smb1357, smb1358 or smb1359 based on revision */
+	if (chip->revision <= REV_1_1) {
+		rc = read_version1(chip, &version1);
+		if (rc < 0) {
+			dev_err(chip->dev,
+				"Couldn't read version 1 rc = %d\n", rc);
+			return rc;
+		}
+		switch (version1) {
+		case SMB1357_VERSION1_VAL:
+			chip->version = V_SMB1357;
+			break;
+		case SMB1358_VERSION1_VAL:
+			chip->version = V_SMB1358;
+			break;
+		case SMB1359_VERSION1_VAL:
+			chip->version = V_SMB1359;
+			break;
+		default:
+			dev_err(chip->dev,
+				"Unknown version 1 = 0x%02x rc = %d\n",
+				version1, rc);
+			return rc;
+		}
+	} else {
+		rc = read_version2(chip, &version2);
+		if (rc < 0) {
+			dev_err(chip->dev,
+				"Couldn't read version 2 rc = %d\n", rc);
+			return rc;
+		}
+		switch (version2) {
+		case SMB1357_VERSION2_VAL:
+			chip->version = V_SMB1357;
+			break;
+		case SMB1358_VERSION2_VAL:
+			chip->version = V_SMB1358;
+			break;
+		case SMB1359_VERSION2_VAL:
+			chip->version = V_SMB1359;
+			break;
+		default:
+			dev_err(chip->dev,
+					"Unknown version 2 = 0x%02x rc = %d\n",
+					version2, rc);
+			return rc;
+		}
+	}
+
+wrkarnd_and_input_current_values:
+	if (is_usb100_broken(chip))
+		chip->workaround_flags |= WRKARND_USB100_BIT;
+	/*
+	 * Rev v1.0 and v1.1 of SMB135x fails charger type detection
+	 * (apsd) due to interference on the D+/- lines by the USB phy.
+	 * Set the workaround flag to disable charger type reporting
+	 * for this revision.
+	 */
+	if (chip->revision <= REV_1_1)
+		chip->workaround_flags |= WRKARND_APSD_FAIL;
+
+	pr_debug("workaround_flags = %x\n", chip->workaround_flags);
+
+	return smb135x_set_current_tables(chip);
+}
+
+static int smb135x_regulator_init(struct smb135x_chg *chip)
+{
+	int rc = 0;
+	struct regulator_config cfg = {};
+
+	chip->otg_vreg.rdesc.owner = THIS_MODULE;
+	chip->otg_vreg.rdesc.type = REGULATOR_VOLTAGE;
+	chip->otg_vreg.rdesc.ops = &smb135x_chg_otg_reg_ops;
+	chip->otg_vreg.rdesc.name = chip->dev->of_node->name;
+	chip->otg_vreg.rdesc.of_match = chip->dev->of_node->name;
+	cfg.dev = chip->dev;
+	cfg.driver_data = chip;
+
+	chip->otg_vreg.rdev = regulator_register(&chip->otg_vreg.rdesc, &cfg);
+	if (IS_ERR(chip->otg_vreg.rdev)) {
+		rc = PTR_ERR(chip->otg_vreg.rdev);
+		chip->otg_vreg.rdev = NULL;
+		if (rc != -EPROBE_DEFER)
+			dev_err(chip->dev,
+				"OTG reg failed, rc=%d\n", rc);
+	}
+
+	return rc;
+}
+
+static void smb135x_regulator_deinit(struct smb135x_chg *chip)
+{
+	if (chip->otg_vreg.rdev)
+		regulator_unregister(chip->otg_vreg.rdev);
+}
+
+static void wireless_insertion_work(struct work_struct *work)
+{
+	struct smb135x_chg *chip =
+		container_of(work, struct smb135x_chg,
+				wireless_insertion_work.work);
+
+	/* unsuspend dc */
+	smb135x_path_suspend(chip, DC, CURRENT, false);
+}
+
+static int hot_hard_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+	chip->batt_hot = !!rt_stat;
+	return 0;
+}
+static int cold_hard_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+	chip->batt_cold = !!rt_stat;
+	return 0;
+}
+static int hot_soft_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+	chip->batt_warm = !!rt_stat;
+	return 0;
+}
+static int cold_soft_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+	chip->batt_cool = !!rt_stat;
+	return 0;
+}
+static int battery_missing_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+	chip->batt_present = !rt_stat;
+	return 0;
+}
+static int vbat_low_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_warn("vbat low\n");
+	return 0;
+}
+static int chg_hot_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_warn("chg hot\n");
+	return 0;
+}
+static int chg_term_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+
+	/*
+	 * This handler gets called even when the charger based termination
+	 * is disabled (due to change in RT status). However, in a bms
+	 * controlled design the battery status should not be updated.
+	 */
+	if (!chip->iterm_disabled)
+		chip->chg_done_batt_full = !!rt_stat;
+	return 0;
+}
+
+static int taper_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+	return 0;
+}
+
+static int fast_chg_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+
+	if (rt_stat & IRQ_C_FASTCHG_BIT)
+		chip->chg_done_batt_full = false;
+
+	return 0;
+}
+
+static int recharge_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	int rc;
+
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+
+	if (chip->bms_controlled_charging) {
+		rc = smb135x_charging_enable(chip, true);
+		if (rc < 0)
+			dev_err(chip->dev, "Couldn't enable charging rc = %d\n",
+					rc);
+	}
+
+	return 0;
+}
+
+static int safety_timeout_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_warn("safety timeout rt_stat = 0x%02x\n", rt_stat);
+	return 0;
+}
+
+/**
+ * power_ok_handler() - called when the switcher turns on or turns off
+ * @chip: pointer to smb135x_chg chip
+ * @rt_stat: the status bit indicating switcher turning on or off
+ */
+static int power_ok_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+	return 0;
+}
+
+static int rid_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	bool usb_slave_present;
+	union power_supply_propval pval = {0, };
+
+	usb_slave_present = is_usb_slave_present(chip);
+
+	if (chip->usb_slave_present ^ usb_slave_present) {
+		chip->usb_slave_present = usb_slave_present;
+		if (chip->usb_psy) {
+			pr_debug("setting usb psy usb_otg = %d\n",
+					chip->usb_slave_present);
+			pval.intval = chip->usb_slave_present;
+			power_supply_set_property(chip->usb_psy,
+					POWER_SUPPLY_PROP_USB_OTG, &pval);
+		}
+	}
+	return 0;
+}
+
+#define RESET_OTG_OC_COUNT_MS	100
+static void reset_otg_oc_count_work(struct work_struct *work)
+{
+	struct smb135x_chg *chip =
+		container_of(work, struct smb135x_chg,
+				reset_otg_oc_count_work.work);
+
+	mutex_lock(&chip->otg_oc_count_lock);
+	pr_debug("It has been %dmS since OverCurrent interrupt resetting the count\n",
+			RESET_OTG_OC_COUNT_MS);
+	chip->otg_oc_count = 0;
+	mutex_unlock(&chip->otg_oc_count_lock);
+}
+
+#define MAX_OTG_RETRY	3
+static int otg_oc_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	int rc;
+
+	mutex_lock(&chip->otg_oc_count_lock);
+	cancel_delayed_work_sync(&chip->reset_otg_oc_count_work);
+	++chip->otg_oc_count;
+	if (chip->otg_oc_count < MAX_OTG_RETRY) {
+		rc = smb135x_chg_otg_enable(chip);
+		if (rc < 0)
+			dev_err(chip->dev, "Couldn't enable  OTG mode rc=%d\n",
+				rc);
+	} else {
+		pr_warn_ratelimited("Tried enabling OTG %d times, the USB slave is nonconformant.\n",
+			chip->otg_oc_count);
+	}
+
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+	schedule_delayed_work(&chip->reset_otg_oc_count_work,
+			msecs_to_jiffies(RESET_OTG_OC_COUNT_MS));
+	mutex_unlock(&chip->otg_oc_count_lock);
+	return 0;
+}
+
+static int handle_dc_removal(struct smb135x_chg *chip)
+{
+	union power_supply_propval prop;
+
+	if (chip->dc_psy_type == POWER_SUPPLY_TYPE_WIRELESS) {
+		cancel_delayed_work_sync(&chip->wireless_insertion_work);
+		smb135x_path_suspend(chip, DC, CURRENT, true);
+	}
+	if (chip->dc_psy_type != -EINVAL) {
+		prop.intval = chip->dc_present;
+		power_supply_set_property(chip->dc_psy,
+				POWER_SUPPLY_PROP_ONLINE, &prop);
+	}
+	return 0;
+}
+
+#define DCIN_UNSUSPEND_DELAY_MS		1000
+static int handle_dc_insertion(struct smb135x_chg *chip)
+{
+	union power_supply_propval prop;
+
+	if (chip->dc_psy_type == POWER_SUPPLY_TYPE_WIRELESS)
+		schedule_delayed_work(&chip->wireless_insertion_work,
+			msecs_to_jiffies(DCIN_UNSUSPEND_DELAY_MS));
+	if (chip->dc_psy_type != -EINVAL) {
+		prop.intval = chip->dc_present;
+		power_supply_set_property(chip->dc_psy,
+				POWER_SUPPLY_PROP_ONLINE, &prop);
+	}
+	return 0;
+}
+/**
+ * dcin_uv_handler() - called when the dc voltage crosses the uv threshold
+ * @chip: pointer to smb135x_chg chip
+ * @rt_stat: the status bit indicating whether dc voltage is uv
+ */
+static int dcin_uv_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	/*
+	 * rt_stat indicates if dc is undervolted. If so dc_present
+	 * should be marked removed
+	 */
+	bool dc_present = !rt_stat;
+
+	pr_debug("chip->dc_present = %d dc_present = %d\n",
+			chip->dc_present, dc_present);
+
+	if (chip->dc_present && !dc_present) {
+		/* dc removed */
+		chip->dc_present = dc_present;
+		handle_dc_removal(chip);
+	}
+
+	if (!chip->dc_present && dc_present) {
+		/* dc inserted */
+		chip->dc_present = dc_present;
+		handle_dc_insertion(chip);
+	}
+
+	return 0;
+}
+
+static int dcin_ov_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	/*
+	 * rt_stat indicates if dc is overvolted. If so dc_present
+	 * should be marked removed
+	 */
+	bool dc_present = !rt_stat;
+
+	pr_debug("chip->dc_present = %d dc_present = %d\n",
+			chip->dc_present, dc_present);
+
+	chip->dc_ov = !!rt_stat;
+
+	if (chip->dc_present && !dc_present) {
+		/* dc removed */
+		chip->dc_present = dc_present;
+		handle_dc_removal(chip);
+	}
+
+	if (!chip->dc_present && dc_present) {
+		/* dc inserted */
+		chip->dc_present = dc_present;
+		handle_dc_insertion(chip);
+	}
+	return 0;
+}
+
+static int handle_usb_removal(struct smb135x_chg *chip)
+{
+	union power_supply_propval pval = {0,};
+
+	if (chip->usb_psy) {
+		cancel_delayed_work_sync(&chip->hvdcp_det_work);
+		pm_relax(chip->dev);
+		pr_debug("setting usb psy type = %d\n",
+				POWER_SUPPLY_TYPE_UNKNOWN);
+		pval.intval = POWER_SUPPLY_TYPE_UNKNOWN;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_TYPE, &pval);
+
+		pr_debug("setting usb psy present = %d\n", chip->usb_present);
+		pval.intval = chip->usb_present;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_PRESENT,
+				&pval);
+
+		pr_debug("Setting usb psy dp=r dm=r\n");
+		pval.intval = POWER_SUPPLY_DP_DM_DPR_DMR;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_DP_DM,
+				&pval);
+	}
+	return 0;
+}
+
+static int rerun_apsd(struct smb135x_chg *chip)
+{
+	int rc;
+
+	pr_debug("Reruning APSD\nDisabling APSD\n");
+	rc = smb135x_masked_write(chip, CFG_11_REG, AUTO_SRC_DET_EN_BIT, 0);
+	if (rc) {
+		dev_err(chip->dev, "Couldn't Disable APSD rc=%d\n", rc);
+		return rc;
+	}
+	pr_debug("Allow only 9V chargers\n");
+	rc = smb135x_masked_write(chip, CFG_C_REG,
+			USBIN_ADAPTER_ALLOWANCE_MASK, ALLOW_9V_ONLY);
+	if (rc)
+		dev_err(chip->dev, "Couldn't Allow 9V rc=%d\n", rc);
+	pr_debug("Enabling APSD\n");
+	rc = smb135x_masked_write(chip, CFG_11_REG, AUTO_SRC_DET_EN_BIT, 1);
+	if (rc)
+		dev_err(chip->dev, "Couldn't Enable APSD rc=%d\n", rc);
+	pr_debug("Allow 5V-9V\n");
+	rc = smb135x_masked_write(chip, CFG_C_REG,
+			USBIN_ADAPTER_ALLOWANCE_MASK, ALLOW_5V_TO_9V);
+	if (rc)
+		dev_err(chip->dev, "Couldn't Allow 5V-9V rc=%d\n", rc);
+	return rc;
+}
+
+static void smb135x_hvdcp_det_work(struct work_struct *work)
+{
+	int rc;
+	u8 reg;
+	struct smb135x_chg *chip = container_of(work, struct smb135x_chg,
+							hvdcp_det_work.work);
+	union power_supply_propval pval = {0,};
+
+	rc = smb135x_read(chip, STATUS_7_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read STATUS_7_REG rc == %d\n", rc);
+		goto end;
+	}
+	pr_debug("STATUS_7_REG = 0x%02X\n", reg);
+
+	if (reg) {
+		pr_debug("HVDCP detected; notifying USB PSY\n");
+		pval.intval = POWER_SUPPLY_TYPE_USB_HVDCP;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_TYPE, &pval);
+	}
+end:
+	pm_relax(chip->dev);
+}
+
+#define HVDCP_NOTIFY_MS 2500
+static int handle_usb_insertion(struct smb135x_chg *chip)
+{
+	u8 reg;
+	int rc;
+	char *usb_type_name = "null";
+	enum power_supply_type usb_supply_type;
+	union power_supply_propval pval = {0,};
+
+	/* usb inserted */
+	rc = smb135x_read(chip, STATUS_5_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read status 5 rc = %d\n", rc);
+		return rc;
+	}
+	/*
+	 * Report the charger type as UNKNOWN if the
+	 * apsd-fail flag is set. This nofifies the USB driver
+	 * to initiate a s/w based charger type detection.
+	 */
+	if (chip->workaround_flags & WRKARND_APSD_FAIL)
+		reg = 0;
+
+	usb_type_name = get_usb_type_name(reg);
+	usb_supply_type = get_usb_supply_type(reg);
+	pr_debug("inserted %s, usb psy type = %d stat_5 = 0x%02x apsd_rerun = %d\n",
+			usb_type_name, usb_supply_type, reg, chip->apsd_rerun);
+
+	if (chip->batt_present && !chip->apsd_rerun && chip->usb_psy) {
+		if (usb_supply_type == POWER_SUPPLY_TYPE_USB) {
+			pr_debug("Setting usb psy dp=f dm=f SDP and rerun\n");
+			pval.intval = POWER_SUPPLY_DP_DM_DPF_DMF;
+			power_supply_set_property(chip->usb_psy,
+					POWER_SUPPLY_PROP_DP_DM,
+					&pval);
+			chip->apsd_rerun = true;
+			rerun_apsd(chip);
+			/* rising edge of src detect will happen in few mS */
+			return 0;
+		}
+
+		pr_debug("Set usb psy dp=f dm=f DCP and no rerun\n");
+		pval.intval = POWER_SUPPLY_DP_DM_DPF_DMF;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_DP_DM,
+				&pval);
+	}
+
+	if (usb_supply_type == POWER_SUPPLY_TYPE_USB_DCP) {
+		pr_debug("schedule hvdcp detection worker\n");
+		pm_stay_awake(chip->dev);
+		schedule_delayed_work(&chip->hvdcp_det_work,
+					msecs_to_jiffies(HVDCP_NOTIFY_MS));
+	}
+
+	if (chip->usb_psy) {
+		if (chip->bms_controlled_charging) {
+			/* enable charging on USB insertion */
+			rc = smb135x_charging_enable(chip, true);
+			if (rc < 0)
+				dev_err(chip->dev, "Couldn't enable charging rc = %d\n",
+						rc);
+		}
+		pr_debug("setting usb psy type = %d\n", usb_supply_type);
+		pval.intval = usb_supply_type;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_TYPE, &pval);
+
+		pr_debug("setting usb psy present = %d\n", chip->usb_present);
+		pval.intval = chip->usb_present;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_PRESENT,
+				&pval);
+	}
+	chip->apsd_rerun = false;
+	return 0;
+}
+
+/**
+ * usbin_uv_handler()
+ * @chip: pointer to smb135x_chg chip
+ * @rt_stat: the status bit indicating chg insertion/removal
+ */
+static int usbin_uv_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	/*
+	 * rt_stat indicates if usb is undervolted
+	 */
+	bool usb_present = !rt_stat;
+
+	pr_debug("chip->usb_present = %d usb_present = %d\n",
+			chip->usb_present, usb_present);
+
+	return 0;
+}
+
+static int usbin_ov_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	union power_supply_propval pval = {0, };
+	/*
+	 * rt_stat indicates if usb is overvolted. If so usb_present
+	 * should be marked removed
+	 */
+	bool usb_present = !rt_stat;
+
+	pr_debug("chip->usb_present = %d usb_present = %d\n",
+			chip->usb_present, usb_present);
+	if (chip->usb_present && !usb_present) {
+		/* USB removed */
+		chip->usb_present = usb_present;
+		handle_usb_removal(chip);
+	} else if (!chip->usb_present && usb_present) {
+		/* USB inserted */
+		chip->usb_present = usb_present;
+		handle_usb_insertion(chip);
+	}
+
+	if (chip->usb_psy) {
+		pval.intval = rt_stat ? POWER_SUPPLY_HEALTH_OVERVOLTAGE
+					: POWER_SUPPLY_HEALTH_GOOD;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_HEALTH, &pval);
+	}
+
+	return 0;
+}
+
+/**
+ * src_detect_handler() - this is called on rising edge when USB
+ *			charger type is detected and on falling edge when
+ *			USB voltage falls below the coarse detect voltage
+ *			(1V), use it for handling USB charger insertion
+ *			and removal.
+ * @chip: pointer to smb135x_chg chip
+ * @rt_stat: the status bit indicating chg insertion/removal
+ */
+static int src_detect_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	bool usb_present = !!rt_stat;
+
+	pr_debug("chip->usb_present = %d usb_present = %d\n",
+			chip->usb_present, usb_present);
+
+	if (!chip->usb_present && usb_present) {
+		/* USB inserted */
+		chip->usb_present = usb_present;
+		handle_usb_insertion(chip);
+	} else if (usb_present && chip->apsd_rerun) {
+		handle_usb_insertion(chip);
+	} else if (chip->usb_present && !usb_present) {
+		chip->usb_present = !chip->usb_present;
+		handle_usb_removal(chip);
+	}
+
+	return 0;
+}
+
+static int chg_inhibit_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	/*
+	 * charger is inserted when the battery voltage is high
+	 * so h/w won't start charging just yet. Treat this as
+	 * battery full
+	 */
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+
+	if (!chip->inhibit_disabled)
+		chip->chg_done_batt_full = !!rt_stat;
+	return 0;
+}
+
+struct smb_irq_info {
+	const char		*name;
+	int			(*smb_irq)(struct smb135x_chg *chip,
+							u8 rt_stat);
+	int			high;
+	int			low;
+};
+
+struct irq_handler_info {
+	u8			stat_reg;
+	u8			val;
+	u8			prev_val;
+	struct smb_irq_info	irq_info[4];
+};
+
+static struct irq_handler_info handlers[] = {
+	{IRQ_A_REG, 0, 0,
+		{
+			{
+				.name		= "cold_soft",
+				.smb_irq	= cold_soft_handler,
+			},
+			{
+				.name		= "hot_soft",
+				.smb_irq	= hot_soft_handler,
+			},
+			{
+				.name		= "cold_hard",
+				.smb_irq	= cold_hard_handler,
+			},
+			{
+				.name		= "hot_hard",
+				.smb_irq	= hot_hard_handler,
+			},
+		},
+	},
+	{IRQ_B_REG, 0, 0,
+		{
+			{
+				.name		= "chg_hot",
+				.smb_irq	= chg_hot_handler,
+			},
+			{
+				.name		= "vbat_low",
+				.smb_irq	= vbat_low_handler,
+			},
+			{
+				.name		= "battery_missing",
+				.smb_irq	= battery_missing_handler,
+			},
+			{
+				.name		= "battery_missing",
+				.smb_irq	= battery_missing_handler,
+			},
+		},
+	},
+	{IRQ_C_REG, 0, 0,
+		{
+			{
+				.name		= "chg_term",
+				.smb_irq	= chg_term_handler,
+			},
+			{
+				.name		= "taper",
+				.smb_irq	= taper_handler,
+			},
+			{
+				.name		= "recharge",
+				.smb_irq	= recharge_handler,
+			},
+			{
+				.name		= "fast_chg",
+				.smb_irq	= fast_chg_handler,
+			},
+		},
+	},
+	{IRQ_D_REG, 0, 0,
+		{
+			{
+				.name		= "prechg_timeout",
+			},
+			{
+				.name		= "safety_timeout",
+				.smb_irq	= safety_timeout_handler,
+			},
+			{
+				.name		= "aicl_done",
+			},
+			{
+				.name		= "battery_ov",
+			},
+		},
+	},
+	{IRQ_E_REG, 0, 0,
+		{
+			{
+				.name		= "usbin_uv",
+				.smb_irq	= usbin_uv_handler,
+			},
+			{
+				.name		= "usbin_ov",
+				.smb_irq	= usbin_ov_handler,
+			},
+			{
+				.name		= "dcin_uv",
+				.smb_irq	= dcin_uv_handler,
+			},
+			{
+				.name		= "dcin_ov",
+				.smb_irq	= dcin_ov_handler,
+			},
+		},
+	},
+	{IRQ_F_REG, 0, 0,
+		{
+			{
+				.name		= "power_ok",
+				.smb_irq	= power_ok_handler,
+			},
+			{
+				.name		= "rid",
+				.smb_irq	= rid_handler,
+			},
+			{
+				.name		= "otg_fail",
+			},
+			{
+				.name		= "otg_oc",
+				.smb_irq	= otg_oc_handler,
+			},
+		},
+	},
+	{IRQ_G_REG, 0, 0,
+		{
+			{
+				.name		= "chg_inhibit",
+				.smb_irq	= chg_inhibit_handler,
+			},
+			{
+				.name		= "chg_error",
+			},
+			{
+				.name		= "wd_timeout",
+			},
+			{
+				.name		= "src_detect",
+				.smb_irq	= src_detect_handler,
+			},
+		},
+	},
+};
+
+static int smb135x_irq_read(struct smb135x_chg *chip)
+{
+	int rc, i;
+
+	/*
+	 * When dcin path is suspended the irq triggered status is not cleared
+	 * causing a storm. To prevent this situation unsuspend dcin path while
+	 * reading interrupts and restore its status back.
+	 */
+	mutex_lock(&chip->path_suspend_lock);
+
+	if (chip->dc_suspended)
+		__smb135x_dc_suspend(chip, false);
+
+	for (i = 0; i < ARRAY_SIZE(handlers); i++) {
+		rc = smb135x_read(chip, handlers[i].stat_reg,
+						&handlers[i].val);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't read %d rc = %d\n",
+					handlers[i].stat_reg, rc);
+			handlers[i].val = 0;
+			continue;
+		}
+	}
+
+	if (chip->dc_suspended)
+		__smb135x_dc_suspend(chip, true);
+
+	mutex_unlock(&chip->path_suspend_lock);
+
+	return rc;
+}
+#define IRQ_LATCHED_MASK	0x02
+#define IRQ_STATUS_MASK		0x01
+#define BITS_PER_IRQ		2
+static irqreturn_t smb135x_chg_stat_handler(int irq, void *dev_id)
+{
+	struct smb135x_chg *chip = dev_id;
+	int i, j;
+	u8 triggered;
+	u8 changed;
+	u8 rt_stat, prev_rt_stat;
+	int rc;
+	int handler_count = 0;
+
+	mutex_lock(&chip->irq_complete);
+	chip->irq_waiting = true;
+	if (!chip->resume_completed) {
+		dev_dbg(chip->dev, "IRQ triggered before device-resume\n");
+		disable_irq_nosync(irq);
+		mutex_unlock(&chip->irq_complete);
+		return IRQ_HANDLED;
+	}
+	chip->irq_waiting = false;
+
+	smb135x_irq_read(chip);
+	for (i = 0; i < ARRAY_SIZE(handlers); i++) {
+		for (j = 0; j < ARRAY_SIZE(handlers[i].irq_info); j++) {
+			triggered = handlers[i].val
+			       & (IRQ_LATCHED_MASK << (j * BITS_PER_IRQ));
+			rt_stat = handlers[i].val
+				& (IRQ_STATUS_MASK << (j * BITS_PER_IRQ));
+			prev_rt_stat = handlers[i].prev_val
+				& (IRQ_STATUS_MASK << (j * BITS_PER_IRQ));
+			changed = prev_rt_stat ^ rt_stat;
+
+			if (triggered || changed)
+				rt_stat ? handlers[i].irq_info[j].high++ :
+						handlers[i].irq_info[j].low++;
+
+			if ((triggered || changed)
+				&& handlers[i].irq_info[j].smb_irq != NULL) {
+				handler_count++;
+				rc = handlers[i].irq_info[j].smb_irq(chip,
+								rt_stat);
+				if (rc < 0)
+					dev_err(chip->dev,
+						"Couldn't handle %d irq for reg 0x%02x rc = %d\n",
+						j, handlers[i].stat_reg, rc);
+			}
+		}
+		handlers[i].prev_val = handlers[i].val;
+	}
+
+	pr_debug("handler count = %d\n", handler_count);
+	if (handler_count) {
+		pr_debug("batt psy changed\n");
+		power_supply_changed(chip->batt_psy);
+		if (chip->usb_psy) {
+			pr_debug("usb psy changed\n");
+			power_supply_changed(chip->usb_psy);
+		}
+		if (chip->dc_psy_type != -EINVAL) {
+			pr_debug("dc psy changed\n");
+			power_supply_changed(chip->dc_psy);
+		}
+	}
+
+	mutex_unlock(&chip->irq_complete);
+
+	return IRQ_HANDLED;
+}
+
+#define LAST_CNFG_REG	0x1F
+static int show_cnfg_regs(struct seq_file *m, void *data)
+{
+	struct smb135x_chg *chip = m->private;
+	int rc;
+	u8 reg;
+	u8 addr;
+
+	for (addr = 0; addr <= LAST_CNFG_REG; addr++) {
+		rc = smb135x_read(chip, addr, &reg);
+		if (!rc)
+			seq_printf(m, "0x%02x = 0x%02x\n", addr, reg);
+	}
+
+	return 0;
+}
+
+static int cnfg_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct smb135x_chg *chip = inode->i_private;
+
+	return single_open(file, show_cnfg_regs, chip);
+}
+
+static const struct file_operations cnfg_debugfs_ops = {
+	.owner		= THIS_MODULE,
+	.open		= cnfg_debugfs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#define FIRST_CMD_REG	0x40
+#define LAST_CMD_REG	0x42
+static int show_cmd_regs(struct seq_file *m, void *data)
+{
+	struct smb135x_chg *chip = m->private;
+	int rc;
+	u8 reg;
+	u8 addr;
+
+	for (addr = FIRST_CMD_REG; addr <= LAST_CMD_REG; addr++) {
+		rc = smb135x_read(chip, addr, &reg);
+		if (!rc)
+			seq_printf(m, "0x%02x = 0x%02x\n", addr, reg);
+	}
+
+	return 0;
+}
+
+static int cmd_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct smb135x_chg *chip = inode->i_private;
+
+	return single_open(file, show_cmd_regs, chip);
+}
+
+static const struct file_operations cmd_debugfs_ops = {
+	.owner		= THIS_MODULE,
+	.open		= cmd_debugfs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#define FIRST_STATUS_REG	0x46
+#define LAST_STATUS_REG		0x56
+static int show_status_regs(struct seq_file *m, void *data)
+{
+	struct smb135x_chg *chip = m->private;
+	int rc;
+	u8 reg;
+	u8 addr;
+
+	for (addr = FIRST_STATUS_REG; addr <= LAST_STATUS_REG; addr++) {
+		rc = smb135x_read(chip, addr, &reg);
+		if (!rc)
+			seq_printf(m, "0x%02x = 0x%02x\n", addr, reg);
+	}
+
+	return 0;
+}
+
+static int status_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct smb135x_chg *chip = inode->i_private;
+
+	return single_open(file, show_status_regs, chip);
+}
+
+static const struct file_operations status_debugfs_ops = {
+	.owner		= THIS_MODULE,
+	.open		= status_debugfs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int show_irq_count(struct seq_file *m, void *data)
+{
+	int i, j, total = 0;
+
+	for (i = 0; i < ARRAY_SIZE(handlers); i++)
+		for (j = 0; j < 4; j++) {
+			seq_printf(m, "%s=%d\t(high=%d low=%d)\n",
+						handlers[i].irq_info[j].name,
+						handlers[i].irq_info[j].high
+						+ handlers[i].irq_info[j].low,
+						handlers[i].irq_info[j].high,
+						handlers[i].irq_info[j].low);
+			total += (handlers[i].irq_info[j].high
+					+ handlers[i].irq_info[j].low);
+		}
+
+	seq_printf(m, "\n\tTotal = %d\n", total);
+
+	return 0;
+}
+
+static int irq_count_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct smb135x_chg *chip = inode->i_private;
+
+	return single_open(file, show_irq_count, chip);
+}
+
+static const struct file_operations irq_count_debugfs_ops = {
+	.owner		= THIS_MODULE,
+	.open		= irq_count_debugfs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int get_reg(void *data, u64 *val)
+{
+	struct smb135x_chg *chip = data;
+	int rc;
+	u8 temp;
+
+	rc = smb135x_read(chip, chip->peek_poke_address, &temp);
+	if (rc < 0) {
+		dev_err(chip->dev,
+			"Couldn't read reg %x rc = %d\n",
+			chip->peek_poke_address, rc);
+		return -EAGAIN;
+	}
+	*val = temp;
+	return 0;
+}
+
+static int set_reg(void *data, u64 val)
+{
+	struct smb135x_chg *chip = data;
+	int rc;
+	u8 temp;
+
+	temp = (u8) val;
+	rc = smb135x_write(chip, chip->peek_poke_address, temp);
+	if (rc < 0) {
+		dev_err(chip->dev,
+			"Couldn't write 0x%02x to 0x%02x rc= %d\n",
+			chip->peek_poke_address, temp, rc);
+		return -EAGAIN;
+	}
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(poke_poke_debug_ops, get_reg, set_reg, "0x%02llx\n");
+
+static int force_irq_set(void *data, u64 val)
+{
+	struct smb135x_chg *chip = data;
+
+	smb135x_chg_stat_handler(chip->client->irq, data);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_irq_ops, NULL, force_irq_set, "0x%02llx\n");
+
+static int force_rechg_set(void *data, u64 val)
+{
+	int rc = 0;
+	struct smb135x_chg *chip = data;
+
+	if (!chip->chg_enabled) {
+		pr_debug("Charging Disabled force recharge not allowed\n");
+		return -EINVAL;
+	}
+
+	if (!chip->inhibit_disabled) {
+		rc = smb135x_masked_write(chip, CFG_14_REG, EN_CHG_INHIBIT_BIT,
+					0);
+		if (rc)
+			dev_err(chip->dev,
+				"Couldn't disable charge-inhibit rc=%d\n", rc);
+
+		/* delay for charge-inhibit to take affect */
+		msleep(500);
+	}
+
+	rc |= smb135x_charging(chip, false);
+	rc |= smb135x_charging(chip, true);
+
+	if (!chip->inhibit_disabled) {
+		rc |= smb135x_masked_write(chip, CFG_14_REG,
+				EN_CHG_INHIBIT_BIT, EN_CHG_INHIBIT_BIT);
+		if (rc)
+			dev_err(chip->dev,
+				"Couldn't enable charge-inhibit rc=%d\n", rc);
+	}
+
+	return rc;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_rechg_ops, NULL, force_rechg_set, "0x%02llx\n");
+
+#ifdef DEBUG
+static void dump_regs(struct smb135x_chg *chip)
+{
+	int rc;
+	u8 reg;
+	u8 addr;
+
+	for (addr = 0; addr <= LAST_CNFG_REG; addr++) {
+		rc = smb135x_read(chip, addr, &reg);
+		if (rc < 0)
+			dev_err(chip->dev, "Couldn't read 0x%02x rc = %d\n",
+					addr, rc);
+		else
+			pr_debug("0x%02x = 0x%02x\n", addr, reg);
+	}
+
+	for (addr = FIRST_STATUS_REG; addr <= LAST_STATUS_REG; addr++) {
+		rc = smb135x_read(chip, addr, &reg);
+		if (rc < 0)
+			dev_err(chip->dev, "Couldn't read 0x%02x rc = %d\n",
+					addr, rc);
+		else
+			pr_debug("0x%02x = 0x%02x\n", addr, reg);
+	}
+
+	for (addr = FIRST_CMD_REG; addr <= LAST_CMD_REG; addr++) {
+		rc = smb135x_read(chip, addr, &reg);
+		if (rc < 0)
+			dev_err(chip->dev, "Couldn't read 0x%02x rc = %d\n",
+					addr, rc);
+		else
+			pr_debug("0x%02x = 0x%02x\n", addr, reg);
+	}
+}
+#else
+static void dump_regs(struct smb135x_chg *chip)
+{
+}
+#endif
+static int determine_initial_status(struct smb135x_chg *chip)
+{
+	union power_supply_propval pval = {0, };
+	int rc;
+	u8 reg;
+
+	/*
+	 * It is okay to read the interrupt status here since
+	 * interrupts aren't requested. reading interrupt status
+	 * clears the interrupt so be careful to read interrupt
+	 * status only in interrupt handling code
+	 */
+
+	chip->batt_present = true;
+	rc = smb135x_read(chip, IRQ_B_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read irq b rc = %d\n", rc);
+		return rc;
+	}
+	if (reg & IRQ_B_BATT_TERMINAL_BIT || reg & IRQ_B_BATT_MISSING_BIT)
+		chip->batt_present = false;
+	rc = smb135x_read(chip, STATUS_4_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read status 4 rc = %d\n", rc);
+		return rc;
+	}
+	/* treat battery gone if less than 2V */
+	if (reg & BATT_LESS_THAN_2V)
+		chip->batt_present = false;
+
+	rc = smb135x_read(chip, IRQ_A_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read irq A rc = %d\n", rc);
+		return rc;
+	}
+
+	if (reg & IRQ_A_HOT_HARD_BIT)
+		chip->batt_hot = true;
+	if (reg & IRQ_A_COLD_HARD_BIT)
+		chip->batt_cold = true;
+	if (reg & IRQ_A_HOT_SOFT_BIT)
+		chip->batt_warm = true;
+	if (reg & IRQ_A_COLD_SOFT_BIT)
+		chip->batt_cool = true;
+
+	rc = smb135x_read(chip, IRQ_C_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read irq A rc = %d\n", rc);
+		return rc;
+	}
+	if (reg & IRQ_C_TERM_BIT)
+		chip->chg_done_batt_full = true;
+
+	rc = smb135x_read(chip, IRQ_E_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read irq E rc = %d\n", rc);
+		return rc;
+	}
+	chip->usb_present = !(reg & IRQ_E_USB_OV_BIT)
+				&& !(reg & IRQ_E_USB_UV_BIT);
+	chip->dc_present = !(reg & IRQ_E_DC_OV_BIT) && !(reg & IRQ_E_DC_UV_BIT);
+
+	if (chip->usb_present)
+		handle_usb_insertion(chip);
+	else
+		handle_usb_removal(chip);
+
+	if (chip->dc_psy_type != -EINVAL) {
+		if (chip->dc_psy_type == POWER_SUPPLY_TYPE_WIRELESS) {
+			/*
+			 * put the dc path in suspend state if it is powered
+			 * by wireless charger
+			 */
+			if (chip->dc_present)
+				smb135x_path_suspend(chip, DC, CURRENT, false);
+			else
+				smb135x_path_suspend(chip, DC, CURRENT, true);
+		}
+	}
+
+	chip->usb_slave_present = is_usb_slave_present(chip);
+	if (chip->usb_psy && !chip->id_line_not_connected) {
+		pr_debug("setting usb psy usb_otg = %d\n",
+				chip->usb_slave_present);
+		pval.intval = chip->usb_slave_present;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_USB_OTG, &pval);
+	}
+	return 0;
+}
+
+static int smb135x_hw_init(struct smb135x_chg *chip)
+{
+	int rc;
+	int i;
+	u8 reg, mask;
+
+	if (chip->pinctrl_state_name) {
+		chip->smb_pinctrl = pinctrl_get_select(chip->dev,
+						chip->pinctrl_state_name);
+		if (IS_ERR(chip->smb_pinctrl)) {
+			pr_err("Could not get/set %s pinctrl state rc = %ld\n",
+						chip->pinctrl_state_name,
+						PTR_ERR(chip->smb_pinctrl));
+			return PTR_ERR(chip->smb_pinctrl);
+		}
+	}
+
+	if (chip->therm_bias_vreg) {
+		rc = regulator_enable(chip->therm_bias_vreg);
+		if (rc) {
+			pr_err("Couldn't enable therm-bias rc = %d\n", rc);
+			return rc;
+		}
+	}
+
+	/*
+	 * Enable USB data line pullup regulator this is needed for the D+
+	 * line to be at proper voltage for HVDCP charger detection.
+	 */
+	if (chip->usb_pullup_vreg) {
+		rc = regulator_enable(chip->usb_pullup_vreg);
+		if (rc) {
+			pr_err("Unable to enable data line pull-up regulator rc=%d\n",
+					rc);
+			if (chip->therm_bias_vreg)
+				regulator_disable(chip->therm_bias_vreg);
+			return rc;
+		}
+	}
+
+	rc = smb135x_enable_volatile_writes(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't configure for volatile rc = %d\n",
+				rc);
+		goto free_regulator;
+	}
+
+	/*
+	 * force using current from the register i.e. ignore auto
+	 * power source detect (APSD) mA ratings
+	 */
+	mask = USE_REGISTER_FOR_CURRENT;
+
+	if (chip->workaround_flags & WRKARND_USB100_BIT)
+		reg = 0;
+	else
+		/* this ignores APSD results */
+		reg = USE_REGISTER_FOR_CURRENT;
+
+	rc = smb135x_masked_write(chip, CMD_INPUT_LIMIT, mask, reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't set input limit cmd rc=%d\n", rc);
+		goto free_regulator;
+	}
+
+	/* set bit 0 = 100mA bit 1 = 500mA and set register control */
+	rc = smb135x_masked_write(chip, CFG_E_REG,
+			POLARITY_100_500_BIT | USB_CTRL_BY_PIN_BIT,
+			POLARITY_100_500_BIT);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't set usbin cfg rc=%d\n", rc);
+		goto free_regulator;
+	}
+
+	/*
+	 * set chg en by cmd register, set chg en by writing bit 1,
+	 * enable auto pre to fast, enable current termination, enable
+	 * auto recharge, enable chg inhibition based on the dt flag
+	 */
+	if (chip->inhibit_disabled)
+		reg = 0;
+	else
+		reg = EN_CHG_INHIBIT_BIT;
+
+	rc = smb135x_masked_write(chip, CFG_14_REG,
+			CHG_EN_BY_PIN_BIT | CHG_EN_ACTIVE_LOW_BIT
+			| PRE_TO_FAST_REQ_CMD_BIT | DISABLE_AUTO_RECHARGE_BIT
+			| EN_CHG_INHIBIT_BIT, reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't set cfg 14 rc=%d\n", rc);
+		goto free_regulator;
+	}
+
+	/* control USB suspend via command bits */
+	rc = smb135x_masked_write(chip, USBIN_DCIN_CFG_REG,
+		USBIN_SUSPEND_VIA_COMMAND_BIT, USBIN_SUSPEND_VIA_COMMAND_BIT);
+
+	/* set the float voltage */
+	if (chip->vfloat_mv != -EINVAL) {
+		rc = smb135x_float_voltage_set(chip, chip->vfloat_mv);
+		if (rc < 0) {
+			dev_err(chip->dev,
+				"Couldn't set float voltage rc = %d\n", rc);
+			goto free_regulator;
+		}
+	}
+
+	/* set iterm */
+	if (chip->iterm_ma != -EINVAL) {
+		if (chip->iterm_disabled) {
+			dev_err(chip->dev, "Error: Both iterm_disabled and iterm_ma set\n");
+			rc = -EINVAL;
+			goto free_regulator;
+		} else {
+			if (chip->iterm_ma <= 50)
+				reg = CHG_ITERM_50MA;
+			else if (chip->iterm_ma <= 100)
+				reg = CHG_ITERM_100MA;
+			else if (chip->iterm_ma <= 150)
+				reg = CHG_ITERM_150MA;
+			else if (chip->iterm_ma <= 200)
+				reg = CHG_ITERM_200MA;
+			else if (chip->iterm_ma <= 250)
+				reg = CHG_ITERM_250MA;
+			else if (chip->iterm_ma <= 300)
+				reg = CHG_ITERM_300MA;
+			else if (chip->iterm_ma <= 500)
+				reg = CHG_ITERM_500MA;
+			else
+				reg = CHG_ITERM_600MA;
+
+			rc = smb135x_masked_write(chip, CFG_3_REG,
+							CHG_ITERM_MASK, reg);
+			if (rc) {
+				dev_err(chip->dev,
+					"Couldn't set iterm rc = %d\n", rc);
+				goto free_regulator;
+			}
+
+			rc = smb135x_masked_write(chip, CFG_14_REG,
+						DISABLE_CURRENT_TERM_BIT, 0);
+			if (rc) {
+				dev_err(chip->dev,
+					"Couldn't enable iterm rc = %d\n", rc);
+				goto free_regulator;
+			}
+		}
+	} else  if (chip->iterm_disabled) {
+		rc = smb135x_masked_write(chip, CFG_14_REG,
+					DISABLE_CURRENT_TERM_BIT,
+					DISABLE_CURRENT_TERM_BIT);
+		if (rc) {
+			dev_err(chip->dev, "Couldn't set iterm rc = %d\n",
+								rc);
+			goto free_regulator;
+		}
+	}
+
+	/* set the safety time voltage */
+	if (chip->safety_time != -EINVAL) {
+		if (chip->safety_time == 0) {
+			/* safety timer disabled */
+			reg = 1 << SAFETY_TIME_EN_SHIFT;
+			rc = smb135x_masked_write(chip, CFG_16_REG,
+						SAFETY_TIME_EN_BIT, reg);
+			if (rc < 0) {
+				dev_err(chip->dev,
+				"Couldn't disable safety timer rc = %d\n",
+				rc);
+				goto free_regulator;
+			}
+		} else {
+			for (i = 0; i < ARRAY_SIZE(chg_time); i++) {
+				if (chip->safety_time <= chg_time[i]) {
+					reg = i << SAFETY_TIME_MINUTES_SHIFT;
+					break;
+				}
+			}
+			rc = smb135x_masked_write(chip, CFG_16_REG,
+				SAFETY_TIME_EN_BIT | SAFETY_TIME_MINUTES_MASK,
+				reg);
+			if (rc < 0) {
+				dev_err(chip->dev,
+					"Couldn't set safety timer rc = %d\n",
+					rc);
+				goto free_regulator;
+			}
+		}
+	}
+
+	/* battery missing detection */
+	rc = smb135x_masked_write(chip, CFG_19_REG,
+			BATT_MISSING_ALGO_BIT | BATT_MISSING_THERM_BIT,
+			chip->bmd_algo_disabled ? BATT_MISSING_THERM_BIT :
+						BATT_MISSING_ALGO_BIT);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't set batt_missing config = %d\n",
+									rc);
+		goto free_regulator;
+	}
+
+	/* set maximum fastchg current */
+	if (chip->fastchg_ma != -EINVAL) {
+		rc = smb135x_set_fastchg_current(chip, chip->fastchg_ma);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't set fastchg current = %d\n",
+									rc);
+			goto free_regulator;
+		}
+	}
+
+	if (chip->usb_pullup_vreg) {
+		/* enable 9V HVDCP adapter support */
+		rc = smb135x_masked_write(chip, CFG_E_REG, HVDCP_5_9_BIT,
+					HVDCP_5_9_BIT);
+		if (rc < 0) {
+			dev_err(chip->dev,
+				"Couldn't request for 5 or 9V rc=%d\n", rc);
+			goto free_regulator;
+		}
+	}
+
+	if (chip->gamma_setting) {
+		rc = smb135x_masked_write(chip, CFG_1B_REG, COLD_HARD_MASK,
+				chip->gamma_setting[0] << COLD_HARD_SHIFT);
+
+		rc |= smb135x_masked_write(chip, CFG_1B_REG, HOT_HARD_MASK,
+				chip->gamma_setting[1] << HOT_HARD_SHIFT);
+
+		rc |= smb135x_masked_write(chip, CFG_1B_REG, COLD_SOFT_MASK,
+				chip->gamma_setting[2] << COLD_SOFT_SHIFT);
+
+		rc |= smb135x_masked_write(chip, CFG_1B_REG, HOT_SOFT_MASK,
+				chip->gamma_setting[3] << HOT_SOFT_SHIFT);
+		if (rc < 0)
+			goto free_regulator;
+	}
+
+	__smb135x_charging(chip, chip->chg_enabled);
+
+	/* interrupt enabling - active low */
+	if (chip->client->irq) {
+		mask = CHG_STAT_IRQ_ONLY_BIT | CHG_STAT_ACTIVE_HIGH_BIT
+			| CHG_STAT_DISABLE_BIT;
+		reg = CHG_STAT_IRQ_ONLY_BIT;
+		rc = smb135x_masked_write(chip, CFG_17_REG, mask, reg);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't set irq config rc = %d\n",
+					rc);
+			goto free_regulator;
+		}
+
+		/* enabling only interesting interrupts */
+		rc = smb135x_write(chip, IRQ_CFG_REG,
+			IRQ_BAT_HOT_COLD_HARD_BIT
+			| IRQ_BAT_HOT_COLD_SOFT_BIT
+			| IRQ_OTG_OVER_CURRENT_BIT
+			| IRQ_INTERNAL_TEMPERATURE_BIT
+			| IRQ_USBIN_UV_BIT);
+
+		rc |= smb135x_write(chip, IRQ2_CFG_REG,
+			IRQ2_SAFETY_TIMER_BIT
+			| IRQ2_CHG_ERR_BIT
+			| IRQ2_CHG_PHASE_CHANGE_BIT
+			| IRQ2_POWER_OK_BIT
+			| IRQ2_BATT_MISSING_BIT
+			| IRQ2_VBAT_LOW_BIT);
+
+		rc |= smb135x_write(chip, IRQ3_CFG_REG, IRQ3_SRC_DETECT_BIT
+				| IRQ3_DCIN_UV_BIT | IRQ3_RID_DETECT_BIT);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't set irq enable rc = %d\n",
+					rc);
+			goto free_regulator;
+		}
+	}
+
+	/* resume threshold */
+	if (chip->resume_delta_mv != -EINVAL)
+		smb135x_set_resume_threshold(chip, chip->resume_delta_mv);
+
+	/* DC path current settings */
+	if (chip->dc_psy_type != -EINVAL) {
+		rc = smb135x_set_dc_chg_current(chip, chip->dc_psy_ma);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't set dc charge current rc = %d\n",
+					rc);
+			goto free_regulator;
+		}
+	}
+
+	/*
+	 * on some devices the battery is powered via external sources which
+	 * could raise its voltage above the float voltage. smb135x chips go
+	 * in to reverse boost in such a situation and the workaround is to
+	 * disable float voltage compensation (note that the battery will appear
+	 * hot/cold when powered via external source).
+	 */
+
+	if (chip->soft_vfloat_comp_disabled) {
+		mask = HOT_SOFT_VFLOAT_COMP_EN_BIT
+				| COLD_SOFT_VFLOAT_COMP_EN_BIT;
+		rc = smb135x_masked_write(chip, CFG_1A_REG, mask, 0);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't disable soft vfloat rc = %d\n",
+					rc);
+			goto free_regulator;
+		}
+	}
+
+	if (chip->soft_current_comp_disabled) {
+		mask = HOT_SOFT_CURRENT_COMP_EN_BIT
+				| COLD_SOFT_CURRENT_COMP_EN_BIT;
+		rc = smb135x_masked_write(chip, CFG_1A_REG, mask, 0);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't disable soft current rc = %d\n",
+					rc);
+			goto free_regulator;
+		}
+	}
+
+	/*
+	 * Command mode for OTG control. This gives us RID interrupts but keeps
+	 * enabling the 5V OTG via i2c register control
+	 */
+	rc = smb135x_masked_write(chip, USBIN_OTG_REG, OTG_CNFG_MASK,
+			OTG_CNFG_COMMAND_CTRL);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't write to otg cfg reg rc = %d\n",
+				rc);
+		goto free_regulator;
+	}
+	return 0;
+
+free_regulator:
+	if (chip->therm_bias_vreg)
+		regulator_disable(chip->therm_bias_vreg);
+	if (chip->usb_pullup_vreg)
+		regulator_disable(chip->usb_pullup_vreg);
+	return rc;
+}
+
+static const struct of_device_id smb135x_match_table[] = {
+	{
+		.compatible	= "qcom,smb1356-charger",
+		.data		= &version_data[V_SMB1356],
+	},
+	{
+		.compatible	= "qcom,smb1357-charger",
+		.data		= &version_data[V_SMB1357],
+	},
+	{
+		.compatible	= "qcom,smb1358-charger",
+		.data		= &version_data[V_SMB1358],
+	},
+	{
+		.compatible	= "qcom,smb1359-charger",
+		.data		= &version_data[V_SMB1359],
+	},
+	{ },
+};
+
+#define DC_MA_MIN 300
+#define DC_MA_MAX 2000
+#define NUM_GAMMA_VALUES 4
+static int smb_parse_dt(struct smb135x_chg *chip)
+{
+	int rc;
+	struct device_node *node = chip->dev->of_node;
+	const char *dc_psy_type;
+
+	if (!node) {
+		dev_err(chip->dev, "device tree info. missing\n");
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32(node, "qcom,float-voltage-mv",
+						&chip->vfloat_mv);
+	if (rc < 0)
+		chip->vfloat_mv = -EINVAL;
+
+	rc = of_property_read_u32(node, "qcom,charging-timeout",
+						&chip->safety_time);
+	if (rc < 0)
+		chip->safety_time = -EINVAL;
+
+	if (!rc &&
+		(chip->safety_time > chg_time[ARRAY_SIZE(chg_time) - 1])) {
+		dev_err(chip->dev, "Bad charging-timeout %d\n",
+						chip->safety_time);
+		return -EINVAL;
+	}
+
+	chip->bmd_algo_disabled = of_property_read_bool(node,
+						"qcom,bmd-algo-disabled");
+
+	chip->dc_psy_type = -EINVAL;
+	dc_psy_type = of_get_property(node, "qcom,dc-psy-type", NULL);
+	if (dc_psy_type) {
+		if (strcmp(dc_psy_type, "Mains") == 0)
+			chip->dc_psy_type = POWER_SUPPLY_TYPE_MAINS;
+		else if (strcmp(dc_psy_type, "Wireless") == 0)
+			chip->dc_psy_type = POWER_SUPPLY_TYPE_WIRELESS;
+	}
+
+	if (chip->dc_psy_type != -EINVAL) {
+		rc = of_property_read_u32(node, "qcom,dc-psy-ma",
+							&chip->dc_psy_ma);
+		if (rc < 0) {
+			dev_err(chip->dev,
+					"no mA current for dc rc = %d\n", rc);
+			return rc;
+		}
+
+		if (chip->dc_psy_ma < DC_MA_MIN
+				|| chip->dc_psy_ma > DC_MA_MAX) {
+			dev_err(chip->dev, "Bad dc mA %d\n", chip->dc_psy_ma);
+			return -EINVAL;
+		}
+	}
+
+	rc = of_property_read_u32(node, "qcom,recharge-thresh-mv",
+						&chip->resume_delta_mv);
+	if (rc < 0)
+		chip->resume_delta_mv = -EINVAL;
+
+	rc = of_property_read_u32(node, "qcom,iterm-ma", &chip->iterm_ma);
+	if (rc < 0)
+		chip->iterm_ma = -EINVAL;
+
+	chip->iterm_disabled = of_property_read_bool(node,
+						"qcom,iterm-disabled");
+
+	chip->chg_disabled_permanently = (of_property_read_bool(node,
+						"qcom,charging-disabled"));
+	chip->chg_enabled = !chip->chg_disabled_permanently;
+
+	chip->inhibit_disabled  = of_property_read_bool(node,
+						"qcom,inhibit-disabled");
+
+	chip->bms_controlled_charging  = of_property_read_bool(node,
+					"qcom,bms-controlled-charging");
+
+	rc = of_property_read_string(node, "qcom,bms-psy-name",
+						&chip->bms_psy_name);
+	if (rc)
+		chip->bms_psy_name = NULL;
+
+	rc = of_property_read_u32(node, "qcom,fastchg-ma", &chip->fastchg_ma);
+	if (rc < 0)
+		chip->fastchg_ma = -EINVAL;
+
+	chip->soft_vfloat_comp_disabled = of_property_read_bool(node,
+					"qcom,soft-vfloat-comp-disabled");
+
+	chip->soft_current_comp_disabled = of_property_read_bool(node,
+					"qcom,soft-current-comp-disabled");
+
+	if (of_find_property(node, "therm-bias-supply", NULL)) {
+		/* get the thermistor bias regulator */
+		chip->therm_bias_vreg = devm_regulator_get(chip->dev,
+							"therm-bias");
+		if (IS_ERR(chip->therm_bias_vreg))
+			return PTR_ERR(chip->therm_bias_vreg);
+	}
+
+	/*
+	 * Gamma value indicates the ratio of the pull up resistors and NTC
+	 * resistor in battery pack. There are 4 options, refer to the graphic
+	 * user interface and choose the right one.
+	 */
+	if (of_find_property(node, "qcom,gamma-setting",
+					&chip->gamma_setting_num)) {
+		chip->gamma_setting_num = chip->gamma_setting_num /
+					sizeof(chip->gamma_setting_num);
+		if (chip->gamma_setting_num != NUM_GAMMA_VALUES) {
+			pr_err("Gamma setting not correct!\n");
+			return -EINVAL;
+		}
+
+		chip->gamma_setting = devm_kzalloc(chip->dev,
+			chip->gamma_setting_num *
+				sizeof(chip->gamma_setting_num), GFP_KERNEL);
+		if (!chip->gamma_setting) {
+			pr_err("gamma setting kzalloc failed!\n");
+			return -ENOMEM;
+		}
+
+		rc = of_property_read_u32_array(node,
+					"qcom,gamma-setting",
+				chip->gamma_setting, chip->gamma_setting_num);
+		if (rc) {
+			pr_err("Couldn't read gamma setting, rc = %d\n", rc);
+			return rc;
+		}
+	}
+
+	if (of_find_property(node, "qcom,thermal-mitigation",
+					&chip->thermal_levels)) {
+		chip->thermal_mitigation = devm_kzalloc(chip->dev,
+			chip->thermal_levels,
+			GFP_KERNEL);
+
+		if (chip->thermal_mitigation == NULL) {
+			pr_err("thermal mitigation kzalloc() failed.\n");
+			return -ENOMEM;
+		}
+
+		chip->thermal_levels /= sizeof(int);
+		rc = of_property_read_u32_array(node,
+				"qcom,thermal-mitigation",
+				chip->thermal_mitigation, chip->thermal_levels);
+		if (rc) {
+			pr_err("Couldn't read threm limits rc = %d\n", rc);
+			return rc;
+		}
+	}
+
+	if (of_find_property(node, "usb-pullup-supply", NULL)) {
+		/* get the data line pull-up regulator */
+		chip->usb_pullup_vreg = devm_regulator_get(chip->dev,
+							"usb-pullup");
+		if (IS_ERR(chip->usb_pullup_vreg))
+			return PTR_ERR(chip->usb_pullup_vreg);
+	}
+
+	chip->pinctrl_state_name = of_get_property(node, "pinctrl-names", NULL);
+
+	chip->id_line_not_connected = of_property_read_bool(node,
+						"qcom,id-line-not-connected");
+	return 0;
+}
+
+static int create_debugfs_entries(struct smb135x_chg *chip)
+{
+	chip->debug_root = debugfs_create_dir("smb135x", NULL);
+	if (!chip->debug_root)
+		dev_err(chip->dev, "Couldn't create debug dir\n");
+
+	if (chip->debug_root) {
+		struct dentry *ent;
+
+		ent = debugfs_create_file("config_registers", S_IFREG | 0444,
+					  chip->debug_root, chip,
+					  &cnfg_debugfs_ops);
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create cnfg debug file\n");
+
+		ent = debugfs_create_file("status_registers", S_IFREG | 0444,
+					  chip->debug_root, chip,
+					  &status_debugfs_ops);
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create status debug file\n");
+
+		ent = debugfs_create_file("cmd_registers", S_IFREG | 0444,
+					  chip->debug_root, chip,
+					  &cmd_debugfs_ops);
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create cmd debug file\n");
+
+		ent = debugfs_create_x32("address", S_IFREG | 0644,
+					  chip->debug_root,
+					  &(chip->peek_poke_address));
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create address debug file\n");
+
+		ent = debugfs_create_file("data", S_IFREG | 0644,
+					  chip->debug_root, chip,
+					  &poke_poke_debug_ops);
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create data debug file\n");
+
+		ent = debugfs_create_file("force_irq",
+					  S_IFREG | 0644,
+					  chip->debug_root, chip,
+					  &force_irq_ops);
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create force_irq debug file\n");
+
+		ent = debugfs_create_x32("skip_writes",
+					  S_IFREG | 0644,
+					  chip->debug_root,
+					  &(chip->skip_writes));
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create skip writes debug file\n");
+
+		ent = debugfs_create_x32("skip_reads",
+					  S_IFREG | 0644,
+					  chip->debug_root,
+					  &(chip->skip_reads));
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create skip reads debug file\n");
+
+		ent = debugfs_create_file("irq_count", S_IFREG | 0444,
+					  chip->debug_root, chip,
+					  &irq_count_debugfs_ops);
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create irq_count debug file\n");
+
+		ent = debugfs_create_file("force_recharge",
+					  S_IFREG | 0644,
+					  chip->debug_root, chip,
+					  &force_rechg_ops);
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create force recharge debug file\n");
+
+		ent = debugfs_create_x32("usb_suspend_votes",
+					  S_IFREG | 0644,
+					  chip->debug_root,
+					  &(chip->usb_suspended));
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create usb_suspend_votes file\n");
+
+		ent = debugfs_create_x32("dc_suspend_votes",
+					  S_IFREG | 0644,
+					  chip->debug_root,
+					  &(chip->dc_suspended));
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create dc_suspend_votes file\n");
+	}
+	return 0;
+}
+
+static int is_parallel_charger(struct i2c_client *client)
+{
+	struct device_node *node = client->dev.of_node;
+
+	return of_property_read_bool(node, "qcom,parallel-charger");
+}
+
+static int smb135x_main_charger_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	int rc;
+	struct smb135x_chg *chip;
+	struct power_supply *usb_psy;
+	struct power_supply_config batt_psy_cfg = {};
+	struct power_supply_config dc_psy_cfg = {};
+	u8 reg = 0;
+
+	chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->client = client;
+	chip->dev = &client->dev;
+
+	rc = smb_parse_dt(chip);
+	if (rc < 0) {
+		dev_err(&client->dev, "Unable to parse DT nodes\n");
+		return rc;
+	}
+
+	usb_psy = power_supply_get_by_name("usb");
+	if (!usb_psy && chip->chg_enabled) {
+		dev_dbg(&client->dev, "USB supply not found; defer probe\n");
+		return -EPROBE_DEFER;
+	}
+	chip->usb_psy = usb_psy;
+
+	chip->fake_battery_soc = -EINVAL;
+
+	INIT_DELAYED_WORK(&chip->wireless_insertion_work,
+					wireless_insertion_work);
+
+	INIT_DELAYED_WORK(&chip->reset_otg_oc_count_work,
+					reset_otg_oc_count_work);
+	INIT_DELAYED_WORK(&chip->hvdcp_det_work, smb135x_hvdcp_det_work);
+	mutex_init(&chip->path_suspend_lock);
+	mutex_init(&chip->current_change_lock);
+	mutex_init(&chip->read_write_lock);
+	mutex_init(&chip->otg_oc_count_lock);
+	device_init_wakeup(chip->dev, true);
+	/* probe the device to check if its actually connected */
+	rc = smb135x_read(chip, CFG_4_REG, &reg);
+	if (rc) {
+		pr_err("Failed to detect SMB135x, device may be absent\n");
+		return -ENODEV;
+	}
+
+	i2c_set_clientdata(client, chip);
+
+	rc = smb135x_chip_version_and_revision(chip);
+	if (rc) {
+		dev_err(&client->dev,
+			"Couldn't detect version/revision rc=%d\n", rc);
+		return rc;
+	}
+
+	dump_regs(chip);
+
+	rc = smb135x_regulator_init(chip);
+	if  (rc) {
+		dev_err(&client->dev,
+			"Couldn't initialize regulator rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smb135x_hw_init(chip);
+	if (rc < 0) {
+		dev_err(&client->dev,
+			"Unable to initialize hardware rc = %d\n", rc);
+		goto free_regulator;
+	}
+
+	rc = determine_initial_status(chip);
+	if (rc < 0) {
+		dev_err(&client->dev,
+			"Unable to determine init status rc = %d\n", rc);
+		goto free_regulator;
+	}
+
+	chip->batt_psy_d.name = "battery";
+	chip->batt_psy_d.type = POWER_SUPPLY_TYPE_BATTERY;
+	chip->batt_psy_d.get_property = smb135x_battery_get_property;
+	chip->batt_psy_d.set_property = smb135x_battery_set_property;
+	chip->batt_psy_d.properties = smb135x_battery_properties;
+	chip->batt_psy_d.num_properties
+		= ARRAY_SIZE(smb135x_battery_properties);
+	chip->batt_psy_d.external_power_changed
+		= smb135x_external_power_changed;
+	chip->batt_psy_d.property_is_writeable = smb135x_battery_is_writeable;
+
+	batt_psy_cfg.drv_data = chip;
+	batt_psy_cfg.num_supplicants = 0;
+	if (chip->bms_controlled_charging) {
+		batt_psy_cfg.supplied_to = pm_batt_supplied_to;
+		batt_psy_cfg.num_supplicants
+					= ARRAY_SIZE(pm_batt_supplied_to);
+	}
+	chip->batt_psy = devm_power_supply_register(chip->dev,
+				&chip->batt_psy_d, &batt_psy_cfg);
+	if (IS_ERR(chip->batt_psy)) {
+		dev_err(&client->dev, "Unable to register batt_psy rc = %ld\n",
+				PTR_ERR(chip->batt_psy));
+		goto free_regulator;
+	}
+
+	if (chip->dc_psy_type != -EINVAL) {
+		chip->dc_psy_d.name = "dc";
+		chip->dc_psy_d.type = chip->dc_psy_type;
+		chip->dc_psy_d.get_property = smb135x_dc_get_property;
+		chip->dc_psy_d.properties = smb135x_dc_properties;
+		chip->dc_psy_d.num_properties
+			= ARRAY_SIZE(smb135x_dc_properties);
+
+		dc_psy_cfg.drv_data = chip;
+		dc_psy_cfg.num_supplicants = 0;
+		chip->dc_psy = devm_power_supply_register(chip->dev,
+				&chip->dc_psy_d,
+				&dc_psy_cfg);
+
+		if (IS_ERR(chip->dc_psy)) {
+			dev_err(&client->dev,
+				"Unable to register dc_psy rc = %ld\n",
+				PTR_ERR(chip->dc_psy));
+			goto free_regulator;
+		}
+	}
+
+	chip->resume_completed = true;
+	mutex_init(&chip->irq_complete);
+
+	/* STAT irq configuration */
+	if (client->irq) {
+		rc = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+				smb135x_chg_stat_handler,
+				IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+				"smb135x_chg_stat_irq", chip);
+		if (rc < 0) {
+			dev_err(&client->dev,
+				"request_irq for irq=%d  failed rc = %d\n",
+				client->irq, rc);
+			goto free_regulator;
+		}
+		enable_irq_wake(client->irq);
+	}
+
+	create_debugfs_entries(chip);
+	dev_info(chip->dev, "SMB135X version = %s revision = %s successfully probed batt=%d dc = %d usb = %d\n",
+			version_str[chip->version],
+			revision_str[chip->revision],
+			smb135x_get_prop_batt_present(chip),
+			chip->dc_present, chip->usb_present);
+	return 0;
+
+free_regulator:
+	smb135x_regulator_deinit(chip);
+	return rc;
+}
+
+static int smb135x_parallel_charger_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	int rc;
+	struct smb135x_chg *chip;
+	const struct of_device_id *match;
+	struct device_node *node = client->dev.of_node;
+	struct power_supply_config parallel_psy_cfg = {};
+
+	chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->client = client;
+	chip->dev = &client->dev;
+	chip->parallel_charger = true;
+	chip->dc_psy_type = -EINVAL;
+
+	chip->chg_enabled = !(of_property_read_bool(node,
+						"qcom,charging-disabled"));
+
+	rc = of_property_read_u32(node, "qcom,recharge-thresh-mv",
+						&chip->resume_delta_mv);
+	if (rc < 0)
+		chip->resume_delta_mv = -EINVAL;
+
+	rc = of_property_read_u32(node, "qcom,float-voltage-mv",
+						&chip->vfloat_mv);
+	if (rc < 0)
+		chip->vfloat_mv = -EINVAL;
+
+	rc = of_property_read_u32(node, "qcom,parallel-en-pin-polarity",
+					&chip->parallel_pin_polarity_setting);
+	if (rc)
+		chip->parallel_pin_polarity_setting = CHG_EN_ACTIVE_LOW_BIT;
+	else
+		chip->parallel_pin_polarity_setting =
+				chip->parallel_pin_polarity_setting ?
+				CHG_EN_ACTIVE_HIGH_BIT : CHG_EN_ACTIVE_LOW_BIT;
+
+	mutex_init(&chip->path_suspend_lock);
+	mutex_init(&chip->current_change_lock);
+	mutex_init(&chip->read_write_lock);
+
+	match = of_match_node(smb135x_match_table, node);
+	if (match == NULL) {
+		dev_err(chip->dev, "device tree match not found\n");
+		return -EINVAL;
+	}
+
+	chip->version = *(int *)match->data;
+	smb135x_set_current_tables(chip);
+
+	i2c_set_clientdata(client, chip);
+
+	chip->parallel_psy_d.name = "parallel";
+	chip->parallel_psy_d.type = POWER_SUPPLY_TYPE_PARALLEL;
+	chip->parallel_psy_d.get_property = smb135x_parallel_get_property;
+	chip->parallel_psy_d.set_property = smb135x_parallel_set_property;
+	chip->parallel_psy_d.properties	= smb135x_parallel_properties;
+	chip->parallel_psy_d.property_is_writeable
+				= smb135x_parallel_is_writeable;
+	chip->parallel_psy_d.num_properties
+				= ARRAY_SIZE(smb135x_parallel_properties);
+
+	parallel_psy_cfg.drv_data = chip;
+	parallel_psy_cfg.num_supplicants = 0;
+	chip->parallel_psy = devm_power_supply_register(chip->dev,
+			&chip->parallel_psy_d,
+			&parallel_psy_cfg);
+	if (IS_ERR(chip->parallel_psy)) {
+		dev_err(&client->dev,
+			"Unable to register parallel_psy rc = %ld\n",
+			PTR_ERR(chip->parallel_psy));
+		return rc;
+	}
+
+	chip->resume_completed = true;
+	mutex_init(&chip->irq_complete);
+
+	create_debugfs_entries(chip);
+
+	dev_info(chip->dev, "SMB135X USB PARALLEL CHARGER version = %s successfully probed\n",
+			version_str[chip->version]);
+	return 0;
+}
+
+static int smb135x_chg_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	if (is_parallel_charger(client))
+		return smb135x_parallel_charger_probe(client, id);
+	else
+		return smb135x_main_charger_probe(client, id);
+}
+
+static int smb135x_chg_remove(struct i2c_client *client)
+{
+	int rc;
+	struct smb135x_chg *chip = i2c_get_clientdata(client);
+
+	debugfs_remove_recursive(chip->debug_root);
+
+	if (chip->parallel_charger)
+		goto mutex_destroy;
+
+	if (chip->therm_bias_vreg) {
+		rc = regulator_disable(chip->therm_bias_vreg);
+		if (rc)
+			pr_err("Couldn't disable therm-bias rc = %d\n", rc);
+	}
+
+	if (chip->usb_pullup_vreg) {
+		rc = regulator_disable(chip->usb_pullup_vreg);
+		if (rc)
+			pr_err("Couldn't disable data-pullup rc = %d\n", rc);
+	}
+
+	smb135x_regulator_deinit(chip);
+
+mutex_destroy:
+	mutex_destroy(&chip->irq_complete);
+	return 0;
+}
+
+static int smb135x_suspend(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smb135x_chg *chip = i2c_get_clientdata(client);
+	int i, rc;
+
+	/* no suspend resume activities for parallel charger */
+	if (chip->parallel_charger)
+		return 0;
+
+	/* Save the current IRQ config */
+	for (i = 0; i < 3; i++) {
+		rc = smb135x_read(chip, IRQ_CFG_REG + i,
+					&chip->irq_cfg_mask[i]);
+		if (rc)
+			dev_err(chip->dev,
+				"Couldn't save irq cfg regs rc=%d\n", rc);
+	}
+
+	/* enable only important IRQs */
+	rc = smb135x_write(chip, IRQ_CFG_REG, IRQ_USBIN_UV_BIT);
+	if (rc < 0)
+		dev_err(chip->dev, "Couldn't set irq_cfg rc = %d\n", rc);
+
+	rc = smb135x_write(chip, IRQ2_CFG_REG, IRQ2_BATT_MISSING_BIT
+						| IRQ2_VBAT_LOW_BIT
+						| IRQ2_POWER_OK_BIT);
+	if (rc < 0)
+		dev_err(chip->dev, "Couldn't set irq2_cfg rc = %d\n", rc);
+
+	rc = smb135x_write(chip, IRQ3_CFG_REG, IRQ3_SRC_DETECT_BIT
+			| IRQ3_DCIN_UV_BIT | IRQ3_RID_DETECT_BIT);
+	if (rc < 0)
+		dev_err(chip->dev, "Couldn't set irq3_cfg rc = %d\n", rc);
+
+	mutex_lock(&chip->irq_complete);
+	chip->resume_completed = false;
+	mutex_unlock(&chip->irq_complete);
+
+	return 0;
+}
+
+static int smb135x_suspend_noirq(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smb135x_chg *chip = i2c_get_clientdata(client);
+
+	/* no suspend resume activities for parallel charger */
+	if (chip->parallel_charger)
+		return 0;
+
+	if (chip->irq_waiting) {
+		pr_err_ratelimited("Aborting suspend, an interrupt was detected while suspending\n");
+		return -EBUSY;
+	}
+	return 0;
+}
+
+static int smb135x_resume(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smb135x_chg *chip = i2c_get_clientdata(client);
+	int i, rc;
+
+	/* no suspend resume activities for parallel charger */
+	if (chip->parallel_charger)
+		return 0;
+	/* Restore the IRQ config */
+	for (i = 0; i < 3; i++) {
+		rc = smb135x_write(chip, IRQ_CFG_REG + i,
+					chip->irq_cfg_mask[i]);
+		if (rc)
+			dev_err(chip->dev,
+				"Couldn't restore irq cfg regs rc=%d\n", rc);
+	}
+	mutex_lock(&chip->irq_complete);
+	chip->resume_completed = true;
+	if (chip->irq_waiting) {
+		mutex_unlock(&chip->irq_complete);
+		smb135x_chg_stat_handler(client->irq, chip);
+		enable_irq(client->irq);
+	} else {
+		mutex_unlock(&chip->irq_complete);
+	}
+	return 0;
+}
+
+static const struct dev_pm_ops smb135x_pm_ops = {
+	.resume		= smb135x_resume,
+	.suspend_noirq	= smb135x_suspend_noirq,
+	.suspend	= smb135x_suspend,
+};
+
+static const struct i2c_device_id smb135x_chg_id[] = {
+	{"smb135x-charger", 0},
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, smb135x_chg_id);
+
+static void smb135x_shutdown(struct i2c_client *client)
+{
+	int rc;
+	struct smb135x_chg *chip = i2c_get_clientdata(client);
+
+	if (chip->usb_pullup_vreg) {
+		/*
+		 * switch to 5V adapter to prevent any errorneous request of 12V
+		 * when USB D+ line pull-up regulator turns off.
+		 */
+		rc = smb135x_masked_write(chip, CFG_E_REG, HVDCP_5_9_BIT, 0);
+		if (rc < 0)
+			dev_err(chip->dev,
+				"Couldn't request for 5V rc=%d\n", rc);
+	}
+}
+
+static struct i2c_driver smb135x_chg_driver = {
+	.driver		= {
+		.name		= "smb135x-charger",
+		.owner		= THIS_MODULE,
+		.of_match_table	= smb135x_match_table,
+		.pm		= &smb135x_pm_ops,
+	},
+	.probe		= smb135x_chg_probe,
+	.remove		= smb135x_chg_remove,
+	.id_table	= smb135x_chg_id,
+	.shutdown	= smb135x_shutdown,
+};
+
+module_i2c_driver(smb135x_chg_driver);
+
+MODULE_DESCRIPTION("SMB135x Charger");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("i2c:smb135x-charger");
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
new file mode 100644
index 0000000..1e89a721
--- /dev/null
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -0,0 +1,1573 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "SMB138X: %s: " fmt, __func__
+
+#include <linux/device.h>
+#include <linux/iio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include "smb-reg.h"
+#include "smb-lib.h"
+#include "storm-watch.h"
+#include "pmic-voter.h"
+
+#define SMB138X_DEFAULT_FCC_UA 1000000
+#define SMB138X_DEFAULT_ICL_UA 1500000
+
+/* Registers that are not common to be mentioned in smb-reg.h */
+#define SMB2CHG_MISC_ENG_SDCDC_CFG2	(MISC_BASE + 0xC1)
+#define ENG_SDCDC_SEL_OOB_VTH_BIT	BIT(0)
+
+#define SMB2CHG_MISC_ENG_SDCDC_CFG6	(MISC_BASE + 0xC5)
+#define DEAD_TIME_MASK			GENMASK(7, 4)
+#define HIGH_DEAD_TIME_MASK		GENMASK(7, 4)
+
+#define SMB2CHG_DC_TM_SREFGEN		(DCIN_BASE + 0xE2)
+#define STACKED_DIODE_EN_BIT		BIT(2)
+
+#define TDIE_AVG_COUNT	10
+
+enum {
+	OOB_COMP_WA_BIT = BIT(0),
+};
+
+static struct smb_params v1_params = {
+	.fcc		= {
+		.name	= "fast charge current",
+		.reg	= FAST_CHARGE_CURRENT_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 6000000,
+		.step_u	= 25000,
+	},
+	.fv		= {
+		.name	= "float voltage",
+		.reg	= FLOAT_VOLTAGE_CFG_REG,
+		.min_u	= 2450000,
+		.max_u	= 4950000,
+		.step_u	= 10000,
+	},
+	.usb_icl	= {
+		.name	= "usb input current limit",
+		.reg	= USBIN_CURRENT_LIMIT_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 6000000,
+		.step_u	= 25000,
+	},
+	.dc_icl		= {
+		.name	= "dc input current limit",
+		.reg	= DCIN_CURRENT_LIMIT_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 6000000,
+		.step_u	= 25000,
+	},
+	.freq_buck	= {
+		.name	= "buck switching frequency",
+		.reg	= CFG_BUCKBOOST_FREQ_SELECT_BUCK_REG,
+		.min_u	= 500,
+		.max_u	= 2000,
+		.step_u	= 100,
+	},
+};
+
+struct smb_dt_props {
+	bool	suspend_input;
+	int	fcc_ua;
+	int	usb_icl_ua;
+	int	dc_icl_ua;
+	int	chg_temp_max_mdegc;
+	int	connector_temp_max_mdegc;
+};
+
+struct smb138x {
+	struct smb_charger	chg;
+	struct smb_dt_props	dt;
+	struct power_supply	*parallel_psy;
+	u32			wa_flags;
+};
+
+static int __debug_mask;
+module_param_named(
+	debug_mask, __debug_mask, int, 0600
+);
+
+irqreturn_t smb138x_handle_slave_chg_state_change(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb138x *chip = irq_data->parent_data;
+
+	if (chip->parallel_psy)
+		power_supply_changed(chip->parallel_psy);
+
+	return IRQ_HANDLED;
+}
+
+static int smb138x_get_prop_charger_temp(struct smb138x *chip,
+				 union power_supply_propval *val)
+{
+	union power_supply_propval pval;
+	int rc = 0, avg = 0, i;
+	struct smb_charger *chg = &chip->chg;
+
+	for (i = 0; i < TDIE_AVG_COUNT; i++) {
+		pval.intval = 0;
+		rc = smblib_get_prop_charger_temp(chg, &pval);
+		if (rc < 0) {
+			pr_err("Couldnt read chg temp at %dth iteration rc = %d\n",
+					i + 1, rc);
+			return rc;
+		}
+		avg += pval.intval;
+	}
+	val->intval = avg / TDIE_AVG_COUNT;
+	return rc;
+}
+
+static int smb138x_parse_dt(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct device_node *node = chg->dev->of_node;
+	int rc;
+
+	if (!node) {
+		pr_err("device tree node missing\n");
+		return -EINVAL;
+	}
+
+	chip->dt.suspend_input = of_property_read_bool(node,
+				"qcom,suspend-input");
+
+	rc = of_property_read_u32(node,
+				"qcom,fcc-max-ua", &chip->dt.fcc_ua);
+	if (rc < 0)
+		chip->dt.fcc_ua = SMB138X_DEFAULT_FCC_UA;
+
+	rc = of_property_read_u32(node,
+				"qcom,usb-icl-ua", &chip->dt.usb_icl_ua);
+	if (rc < 0)
+		chip->dt.usb_icl_ua = SMB138X_DEFAULT_ICL_UA;
+
+	rc = of_property_read_u32(node,
+				"qcom,dc-icl-ua", &chip->dt.dc_icl_ua);
+	if (rc < 0)
+		chip->dt.dc_icl_ua = SMB138X_DEFAULT_ICL_UA;
+
+	rc = of_property_read_u32(node,
+				"qcom,charger-temp-max-mdegc",
+				&chip->dt.chg_temp_max_mdegc);
+	if (rc < 0)
+		chip->dt.chg_temp_max_mdegc = 80000;
+
+	rc = of_property_read_u32(node,
+				"qcom,connector-temp-max-mdegc",
+				&chip->dt.connector_temp_max_mdegc);
+	if (rc < 0)
+		chip->dt.connector_temp_max_mdegc = 105000;
+
+	return 0;
+}
+
+/************************
+ * USB PSY REGISTRATION *
+ ************************/
+
+static enum power_supply_property smb138x_usb_props[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_VOLTAGE_MIN,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
+	POWER_SUPPLY_PROP_TYPE,
+	POWER_SUPPLY_PROP_TYPEC_MODE,
+	POWER_SUPPLY_PROP_TYPEC_POWER_ROLE,
+	POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION,
+};
+
+static int smb138x_usb_get_prop(struct power_supply *psy,
+				enum power_supply_property prop,
+				union power_supply_propval *val)
+{
+	struct smb138x *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_PRESENT:
+		rc = smblib_get_prop_usb_present(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		rc = smblib_get_prop_usb_online(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+		val->intval = chg->voltage_min_uv;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		val->intval = chg->voltage_max_uv;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		rc = smblib_get_prop_usb_voltage_now(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smblib_get_prop_usb_current_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TYPE:
+		val->intval = chg->usb_psy_desc.type;
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_MODE:
+		rc = smblib_get_prop_typec_mode(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
+		rc = smblib_get_prop_typec_power_role(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION:
+		rc = smblib_get_prop_typec_cc_orientation(chg, val);
+		break;
+	default:
+		pr_err("get prop %d is not supported\n", prop);
+		return -EINVAL;
+	}
+
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", prop, rc);
+		return -ENODATA;
+	}
+
+	return rc;
+}
+
+static int smb138x_usb_set_prop(struct power_supply *psy,
+				enum power_supply_property prop,
+				const union power_supply_propval *val)
+{
+	struct smb138x *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+		rc = smblib_set_prop_usb_voltage_min(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smblib_set_prop_usb_voltage_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smblib_set_prop_usb_current_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
+		rc = smblib_set_prop_typec_power_role(chg, val);
+		break;
+	default:
+		pr_err("set prop %d is not supported\n", prop);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int smb138x_usb_prop_is_writeable(struct power_supply *psy,
+					 enum power_supply_property prop)
+{
+	switch (prop) {
+	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
+		return 1;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int smb138x_init_usb_psy(struct smb138x *chip)
+{
+	struct power_supply_config usb_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+
+	chg->usb_psy_desc.name = "usb";
+	chg->usb_psy_desc.type = POWER_SUPPLY_TYPE_UNKNOWN;
+	chg->usb_psy_desc.properties = smb138x_usb_props;
+	chg->usb_psy_desc.num_properties = ARRAY_SIZE(smb138x_usb_props);
+	chg->usb_psy_desc.get_property = smb138x_usb_get_prop;
+	chg->usb_psy_desc.set_property = smb138x_usb_set_prop;
+	chg->usb_psy_desc.property_is_writeable = smb138x_usb_prop_is_writeable;
+
+	usb_cfg.drv_data = chip;
+	usb_cfg.of_node = chg->dev->of_node;
+	chg->usb_psy = devm_power_supply_register(chg->dev,
+						  &chg->usb_psy_desc,
+						  &usb_cfg);
+	if (IS_ERR(chg->usb_psy)) {
+		pr_err("Couldn't register USB power supply\n");
+		return PTR_ERR(chg->usb_psy);
+	}
+
+	return 0;
+}
+
+/*************************
+ * BATT PSY REGISTRATION *
+ *************************/
+
+static enum power_supply_property smb138x_batt_props[] = {
+	POWER_SUPPLY_PROP_INPUT_SUSPEND,
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_CHARGER_TEMP,
+	POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+	POWER_SUPPLY_PROP_SET_SHIP_MODE,
+};
+
+static int smb138x_batt_get_prop(struct power_supply *psy,
+				 enum power_supply_property prop,
+				 union power_supply_propval *val)
+{
+	struct smb138x *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_STATUS:
+		rc = smblib_get_prop_batt_status(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		rc = smblib_get_prop_batt_health(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		rc = smblib_get_prop_batt_present(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smblib_get_prop_input_suspend(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		rc = smblib_get_prop_batt_charge_type(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		rc = smblib_get_prop_batt_capacity(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGER_TEMP:
+		rc = smb138x_get_prop_charger_temp(chip, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
+		rc = smblib_get_prop_charger_temp_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+		/* Not in ship mode as long as device is active */
+		val->intval = 0;
+		break;
+	default:
+		pr_err("batt power supply get prop %d not supported\n", prop);
+		return -EINVAL;
+	}
+
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", prop, rc);
+		return -ENODATA;
+	}
+
+	return rc;
+}
+
+static int smb138x_batt_set_prop(struct power_supply *psy,
+				 enum power_supply_property prop,
+				 const union power_supply_propval *val)
+{
+	struct smb138x *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smblib_set_prop_input_suspend(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		rc = smblib_set_prop_batt_capacity(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+		/* Not in ship mode as long as the device is active */
+		if (!val->intval)
+			break;
+		rc = smblib_set_prop_ship_mode(chg, val);
+		break;
+	default:
+		pr_err("batt power supply set prop %d not supported\n", prop);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int smb138x_batt_prop_is_writeable(struct power_supply *psy,
+					  enum power_supply_property prop)
+{
+	switch (prop) {
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+	case POWER_SUPPLY_PROP_CAPACITY:
+		return 1;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static const struct power_supply_desc batt_psy_desc = {
+	.name			= "battery",
+	.type			= POWER_SUPPLY_TYPE_BATTERY,
+	.properties		= smb138x_batt_props,
+	.num_properties		= ARRAY_SIZE(smb138x_batt_props),
+	.get_property		= smb138x_batt_get_prop,
+	.set_property		= smb138x_batt_set_prop,
+	.property_is_writeable	= smb138x_batt_prop_is_writeable,
+};
+
+static int smb138x_init_batt_psy(struct smb138x *chip)
+{
+	struct power_supply_config batt_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	batt_cfg.drv_data = chip;
+	batt_cfg.of_node = chg->dev->of_node;
+	chg->batt_psy = devm_power_supply_register(chg->dev,
+						   &batt_psy_desc,
+						   &batt_cfg);
+	if (IS_ERR(chg->batt_psy)) {
+		pr_err("Couldn't register battery power supply\n");
+		return PTR_ERR(chg->batt_psy);
+	}
+
+	return rc;
+}
+
+/*****************************
+ * PARALLEL PSY REGISTRATION *
+ *****************************/
+
+static int smb138x_get_prop_connector_health(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc, lb_mdegc, ub_mdegc, rst_mdegc, connector_mdegc;
+
+	if (!chg->iio.connector_temp_chan ||
+		PTR_ERR(chg->iio.connector_temp_chan) == -EPROBE_DEFER)
+		chg->iio.connector_temp_chan = iio_channel_get(chg->dev,
+							"connector_temp");
+
+	if (IS_ERR(chg->iio.connector_temp_chan))
+		return POWER_SUPPLY_HEALTH_UNKNOWN;
+
+	rc = iio_read_channel_processed(chg->iio.connector_temp_thr1_chan,
+							&lb_mdegc);
+	if (rc < 0) {
+		pr_err("Couldn't read connector lower bound rc=%d\n", rc);
+		return POWER_SUPPLY_HEALTH_UNKNOWN;
+	}
+
+	rc = iio_read_channel_processed(chg->iio.connector_temp_thr2_chan,
+							&ub_mdegc);
+	if (rc < 0) {
+		pr_err("Couldn't read connector upper bound rc=%d\n", rc);
+		return POWER_SUPPLY_HEALTH_UNKNOWN;
+	}
+
+	rc = iio_read_channel_processed(chg->iio.connector_temp_thr3_chan,
+							&rst_mdegc);
+	if (rc < 0) {
+		pr_err("Couldn't read connector reset bound rc=%d\n", rc);
+		return POWER_SUPPLY_HEALTH_UNKNOWN;
+	}
+
+	rc = iio_read_channel_processed(chg->iio.connector_temp_chan,
+							&connector_mdegc);
+	if (rc < 0) {
+		pr_err("Couldn't read connector temperature rc=%d\n", rc);
+		return POWER_SUPPLY_HEALTH_UNKNOWN;
+	}
+
+	if (connector_mdegc < lb_mdegc)
+		return POWER_SUPPLY_HEALTH_COOL;
+	else if (connector_mdegc < ub_mdegc)
+		return POWER_SUPPLY_HEALTH_WARM;
+	else if (connector_mdegc < rst_mdegc)
+		return POWER_SUPPLY_HEALTH_HOT;
+
+	return POWER_SUPPLY_HEALTH_OVERHEAT;
+}
+
+static enum power_supply_property smb138x_parallel_props[] = {
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
+	POWER_SUPPLY_PROP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_PIN_ENABLED,
+	POWER_SUPPLY_PROP_INPUT_SUSPEND,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CHARGER_TEMP,
+	POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+	POWER_SUPPLY_PROP_MODEL_NAME,
+	POWER_SUPPLY_PROP_PARALLEL_MODE,
+	POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
+	POWER_SUPPLY_PROP_SET_SHIP_MODE,
+};
+
+static int smb138x_parallel_get_prop(struct power_supply *psy,
+				     enum power_supply_property prop,
+				     union power_supply_propval *val)
+{
+	struct smb138x *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+	u8 temp;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		rc = smblib_get_prop_batt_charge_type(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		rc = smblib_read(chg, BATTERY_CHARGER_STATUS_5_REG,
+				 &temp);
+		if (rc >= 0)
+			val->intval = (bool)(temp & CHARGING_ENABLE_BIT);
+		break;
+	case POWER_SUPPLY_PROP_PIN_ENABLED:
+		rc = smblib_read(chg, BATTERY_CHARGER_STATUS_5_REG,
+				 &temp);
+		if (rc >= 0)
+			val->intval = !(temp & DISABLE_CHARGING_BIT);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smblib_get_usb_suspend(chg, &val->intval);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smblib_get_charge_param(chg, &chg->param.fv, &val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		rc = smblib_get_charge_param(chg, &chg->param.fcc,
+					     &val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+		rc = smblib_get_prop_slave_current_now(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGER_TEMP:
+		rc = smb138x_get_prop_charger_temp(chip, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
+		rc = smblib_get_prop_charger_temp_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_MODEL_NAME:
+		val->strval = "smb138x";
+		break;
+	case POWER_SUPPLY_PROP_PARALLEL_MODE:
+		val->intval = POWER_SUPPLY_PL_USBMID_USBMID;
+		break;
+	case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
+		val->intval = smb138x_get_prop_connector_health(chip);
+		break;
+	case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+		/* Not in ship mode as long as device is active */
+		val->intval = 0;
+		break;
+	default:
+		pr_err("parallel power supply get prop %d not supported\n",
+			prop);
+		return -EINVAL;
+	}
+
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", prop, rc);
+		return -ENODATA;
+	}
+
+	return rc;
+}
+
+static int smb138x_set_parallel_suspend(struct smb138x *chip, bool suspend)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	rc = smblib_masked_write(chg, WD_CFG_REG, WDOG_TIMER_EN_BIT,
+				 suspend ? 0 : WDOG_TIMER_EN_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't %s watchdog rc=%d\n",
+		       suspend ? "disable" : "enable", rc);
+		suspend = true;
+	}
+
+	rc = smblib_masked_write(chg, USBIN_CMD_IL_REG, USBIN_SUSPEND_BIT,
+				 suspend ? USBIN_SUSPEND_BIT : 0);
+	if (rc < 0) {
+		pr_err("Couldn't %s parallel charger rc=%d\n",
+		       suspend ? "suspend" : "resume", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int smb138x_parallel_set_prop(struct power_supply *psy,
+				     enum power_supply_property prop,
+				     const union power_supply_propval *val)
+{
+	struct smb138x *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smb138x_set_parallel_suspend(chip, (bool)val->intval);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smblib_set_charge_param(chg, &chg->param.fv, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		rc = smblib_set_charge_param(chg, &chg->param.fcc, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+		/* Not in ship mode as long as the device is active */
+		if (!val->intval)
+			break;
+		rc = smblib_set_prop_ship_mode(chg, val);
+		break;
+	default:
+		pr_debug("parallel power supply set prop %d not supported\n",
+			prop);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int smb138x_parallel_prop_is_writeable(struct power_supply *psy,
+					      enum power_supply_property prop)
+{
+	return 0;
+}
+
+static const struct power_supply_desc parallel_psy_desc = {
+	.name			= "parallel",
+	.type			= POWER_SUPPLY_TYPE_PARALLEL,
+	.properties		= smb138x_parallel_props,
+	.num_properties		= ARRAY_SIZE(smb138x_parallel_props),
+	.get_property		= smb138x_parallel_get_prop,
+	.set_property		= smb138x_parallel_set_prop,
+	.property_is_writeable	= smb138x_parallel_prop_is_writeable,
+};
+
+static int smb138x_init_parallel_psy(struct smb138x *chip)
+{
+	struct power_supply_config parallel_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+
+	parallel_cfg.drv_data = chip;
+	parallel_cfg.of_node = chg->dev->of_node;
+	chip->parallel_psy = devm_power_supply_register(chg->dev,
+						   &parallel_psy_desc,
+						   &parallel_cfg);
+	if (IS_ERR(chip->parallel_psy)) {
+		pr_err("Couldn't register parallel power supply\n");
+		return PTR_ERR(chip->parallel_psy);
+	}
+
+	return 0;
+}
+
+/******************************
+ * VBUS REGULATOR REGISTRATION *
+ ******************************/
+
+struct regulator_ops smb138x_vbus_reg_ops = {
+	.enable		= smblib_vbus_regulator_enable,
+	.disable	= smblib_vbus_regulator_disable,
+	.is_enabled	= smblib_vbus_regulator_is_enabled,
+};
+
+static int smb138x_init_vbus_regulator(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct regulator_config cfg = {};
+	int rc = 0;
+
+	chg->vbus_vreg = devm_kzalloc(chg->dev, sizeof(*chg->vbus_vreg),
+				      GFP_KERNEL);
+	if (!chg->vbus_vreg)
+		return -ENOMEM;
+
+	cfg.dev = chg->dev;
+	cfg.driver_data = chip;
+
+	chg->vbus_vreg->rdesc.owner = THIS_MODULE;
+	chg->vbus_vreg->rdesc.type = REGULATOR_VOLTAGE;
+	chg->vbus_vreg->rdesc.ops = &smb138x_vbus_reg_ops;
+	chg->vbus_vreg->rdesc.of_match = "qcom,smb138x-vbus";
+	chg->vbus_vreg->rdesc.name = "qcom,smb138x-vbus";
+
+	chg->vbus_vreg->rdev = devm_regulator_register(chg->dev,
+						&chg->vbus_vreg->rdesc, &cfg);
+	if (IS_ERR(chg->vbus_vreg->rdev)) {
+		rc = PTR_ERR(chg->vbus_vreg->rdev);
+		chg->vbus_vreg->rdev = NULL;
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't register VBUS regualtor rc=%d\n", rc);
+	}
+
+	return rc;
+}
+
+/******************************
+ * VCONN REGULATOR REGISTRATION *
+ ******************************/
+
+struct regulator_ops smb138x_vconn_reg_ops = {
+	.enable		= smblib_vconn_regulator_enable,
+	.disable	= smblib_vconn_regulator_disable,
+	.is_enabled	= smblib_vconn_regulator_is_enabled,
+};
+
+static int smb138x_init_vconn_regulator(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct regulator_config cfg = {};
+	int rc = 0;
+
+	chg->vconn_vreg = devm_kzalloc(chg->dev, sizeof(*chg->vconn_vreg),
+				      GFP_KERNEL);
+	if (!chg->vconn_vreg)
+		return -ENOMEM;
+
+	cfg.dev = chg->dev;
+	cfg.driver_data = chip;
+
+	chg->vconn_vreg->rdesc.owner = THIS_MODULE;
+	chg->vconn_vreg->rdesc.type = REGULATOR_VOLTAGE;
+	chg->vconn_vreg->rdesc.ops = &smb138x_vconn_reg_ops;
+	chg->vconn_vreg->rdesc.of_match = "qcom,smb138x-vconn";
+	chg->vconn_vreg->rdesc.name = "qcom,smb138x-vconn";
+
+	chg->vconn_vreg->rdev = devm_regulator_register(chg->dev,
+						&chg->vconn_vreg->rdesc, &cfg);
+	if (IS_ERR(chg->vconn_vreg->rdev)) {
+		rc = PTR_ERR(chg->vconn_vreg->rdev);
+		chg->vconn_vreg->rdev = NULL;
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't register VCONN regualtor rc=%d\n", rc);
+	}
+
+	return rc;
+}
+
+/***************************
+ * HARDWARE INITIALIZATION *
+ ***************************/
+
+#define MDEGC_3		3000
+#define MDEGC_15	15000
+static int smb138x_init_slave_hw(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc;
+
+	if (chip->wa_flags & OOB_COMP_WA_BIT) {
+		rc = smblib_masked_write(chg, SMB2CHG_MISC_ENG_SDCDC_CFG2,
+					ENG_SDCDC_SEL_OOB_VTH_BIT,
+					ENG_SDCDC_SEL_OOB_VTH_BIT);
+		if (rc < 0) {
+			pr_err("Couldn't configure the OOB comp threshold rc = %d\n",
+									rc);
+			return rc;
+		}
+
+		rc = smblib_masked_write(chg, SMB2CHG_MISC_ENG_SDCDC_CFG6,
+				DEAD_TIME_MASK, HIGH_DEAD_TIME_MASK);
+		if (rc < 0) {
+			pr_err("Couldn't configure the sdcdc cfg 6 reg rc = %d\n",
+									rc);
+			return rc;
+		}
+	}
+
+	/* enable watchdog bark and bite interrupts, and disable the watchdog */
+	rc = smblib_masked_write(chg, WD_CFG_REG, WDOG_TIMER_EN_BIT
+			| WDOG_TIMER_EN_ON_PLUGIN_BIT | BITE_WDOG_INT_EN_BIT
+			| BARK_WDOG_INT_EN_BIT,
+			BITE_WDOG_INT_EN_BIT | BARK_WDOG_INT_EN_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't configure the watchdog rc=%d\n", rc);
+		return rc;
+	}
+
+	/* disable charging when watchdog bites */
+	rc = smblib_masked_write(chg, SNARL_BARK_BITE_WD_CFG_REG,
+				 BITE_WDOG_DISABLE_CHARGING_CFG_BIT,
+				 BITE_WDOG_DISABLE_CHARGING_CFG_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't configure the watchdog bite rc=%d\n", rc);
+		return rc;
+	}
+
+	/* suspend parallel charging */
+	rc = smb138x_set_parallel_suspend(chip, true);
+	if (rc < 0) {
+		pr_err("Couldn't suspend parallel charging rc=%d\n", rc);
+		return rc;
+	}
+
+	/* initialize FCC to 0 */
+	rc = smblib_set_charge_param(chg, &chg->param.fcc, 0);
+	if (rc < 0) {
+		pr_err("Couldn't set 0 FCC rc=%d\n", rc);
+		return rc;
+	}
+
+	/* enable the charging path */
+	rc = smblib_masked_write(chg, CHARGING_ENABLE_CMD_REG,
+				 CHARGING_ENABLE_CMD_BIT,
+				 CHARGING_ENABLE_CMD_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't enable charging rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure charge enable for software control; active high */
+	rc = smblib_masked_write(chg, CHGR_CFG2_REG,
+				 CHG_EN_POLARITY_BIT | CHG_EN_SRC_BIT, 0);
+	if (rc < 0) {
+		pr_err("Couldn't configure charge enable source rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	/* enable parallel current sensing */
+	rc = smblib_masked_write(chg, CFG_REG,
+				 VCHG_EN_CFG_BIT, VCHG_EN_CFG_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't enable parallel current sensing rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	/* enable stacked diode */
+	rc = smblib_write(chg, SMB2CHG_DC_TM_SREFGEN, STACKED_DIODE_EN_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't enable stacked diode rc=%d\n", rc);
+		return rc;
+	}
+
+	/* initialize charger temperature threshold */
+	rc = iio_write_channel_processed(chg->iio.temp_max_chan,
+					chip->dt.chg_temp_max_mdegc);
+	if (rc < 0) {
+		pr_err("Couldn't set charger temp threshold rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = iio_write_channel_processed(chg->iio.connector_temp_thr1_chan,
+				chip->dt.connector_temp_max_mdegc);
+	if (rc < 0) {
+		pr_err("Couldn't set connector temp threshold1 rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = iio_write_channel_processed(chg->iio.connector_temp_thr2_chan,
+				chip->dt.connector_temp_max_mdegc + MDEGC_3);
+	if (rc < 0) {
+		pr_err("Couldn't set connector temp threshold2 rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = iio_write_channel_processed(chg->iio.connector_temp_thr3_chan,
+				chip->dt.connector_temp_max_mdegc + MDEGC_15);
+	if (rc < 0) {
+		pr_err("Couldn't set connector temp threshold3 rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smblib_write(chg, THERMREG_SRC_CFG_REG,
+						THERMREG_SKIN_ADC_SRC_EN_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't enable connector thermreg source rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int smb138x_init_hw(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	/* votes must be cast before configuring software control */
+	vote(chg->dc_suspend_votable,
+		DEFAULT_VOTER, chip->dt.suspend_input, 0);
+	vote(chg->fcc_votable,
+		DEFAULT_VOTER, true, chip->dt.fcc_ua);
+	vote(chg->usb_icl_votable,
+		DCP_VOTER, true, chip->dt.usb_icl_ua);
+	vote(chg->dc_icl_votable,
+		DEFAULT_VOTER, true, chip->dt.dc_icl_ua);
+
+	chg->dcp_icl_ua = chip->dt.usb_icl_ua;
+
+	/* configure to a fixed 700khz freq to avoid tdie errors */
+	rc = smblib_set_charge_param(chg, &chg->param.freq_buck, 700);
+	if (rc < 0) {
+		pr_err("Couldn't configure 700Khz switch freq rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure charge enable for software control; active high */
+	rc = smblib_masked_write(chg, CHGR_CFG2_REG,
+				 CHG_EN_POLARITY_BIT | CHG_EN_SRC_BIT, 0);
+	if (rc < 0) {
+		pr_err("Couldn't configure charge enable source rc=%d\n", rc);
+		return rc;
+	}
+
+	/* enable the charging path */
+	rc = vote(chg->chg_disable_votable, DEFAULT_VOTER, false, 0);
+	if (rc < 0) {
+		pr_err("Couldn't enable charging rc=%d\n", rc);
+		return rc;
+	}
+
+	/*
+	 * trigger the usb-typec-change interrupt only when the CC state
+	 * changes, or there was a VBUS error
+	 */
+	rc = smblib_write(chg, TYPE_C_INTRPT_ENB_REG,
+			    TYPEC_CCSTATE_CHANGE_INT_EN_BIT
+			  | TYPEC_VBUS_ERROR_INT_EN_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't configure Type-C interrupts rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure VCONN for software control */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 VCONN_EN_SRC_BIT | VCONN_EN_VALUE_BIT,
+				 VCONN_EN_SRC_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't configure VCONN for SW control rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure VBUS for software control */
+	rc = smblib_masked_write(chg, OTG_CFG_REG, OTG_EN_SRC_CFG_BIT, 0);
+	if (rc < 0) {
+		pr_err("Couldn't configure VBUS for SW control rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure power role for dual-role */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 TYPEC_POWER_ROLE_CMD_MASK, 0);
+	if (rc < 0) {
+		pr_err("Couldn't configure power role for DRP rc=%d\n", rc);
+		return rc;
+	}
+
+	if (chip->wa_flags & OOB_COMP_WA_BIT) {
+		rc = smblib_masked_write(chg, SMB2CHG_MISC_ENG_SDCDC_CFG2,
+					ENG_SDCDC_SEL_OOB_VTH_BIT,
+					ENG_SDCDC_SEL_OOB_VTH_BIT);
+		if (rc < 0) {
+			pr_err("Couldn't configure the OOB comp threshold rc = %d\n",
+									rc);
+			return rc;
+		}
+
+		rc = smblib_masked_write(chg, SMB2CHG_MISC_ENG_SDCDC_CFG6,
+				DEAD_TIME_MASK, HIGH_DEAD_TIME_MASK);
+		if (rc < 0) {
+			pr_err("Couldn't configure the sdcdc cfg 6 reg rc = %d\n",
+									rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int smb138x_setup_wa_flags(struct smb138x *chip)
+{
+	struct pmic_revid_data *pmic_rev_id;
+	struct device_node *revid_dev_node;
+
+	revid_dev_node = of_parse_phandle(chip->chg.dev->of_node,
+					"qcom,pmic-revid", 0);
+	if (!revid_dev_node) {
+		pr_err("Missing qcom,pmic-revid property\n");
+		return -EINVAL;
+	}
+
+	pmic_rev_id = get_revid_data(revid_dev_node);
+	if (IS_ERR_OR_NULL(pmic_rev_id)) {
+		/*
+		 * the revid peripheral must be registered, any failure
+		 * here only indicates that the rev-id module has not
+		 * probed yet.
+		 */
+		return -EPROBE_DEFER;
+	}
+
+	switch (pmic_rev_id->pmic_subtype) {
+	case SMB1381_SUBTYPE:
+		if (pmic_rev_id->rev4 < 2) /* SMB1381 rev 1.0 */
+			chip->wa_flags |= OOB_COMP_WA_BIT;
+		break;
+	default:
+		pr_err("PMIC subtype %d not supported\n",
+				pmic_rev_id->pmic_subtype);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/****************************
+ * DETERMINE INITIAL STATUS *
+ ****************************/
+
+static irqreturn_t smb138x_handle_temperature_change(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb138x *chip = irq_data->parent_data;
+
+	power_supply_changed(chip->parallel_psy);
+	return IRQ_HANDLED;
+}
+
+static int smb138x_determine_initial_slave_status(struct smb138x *chip)
+{
+	struct smb_irq_data irq_data = {chip, "determine-initial-status"};
+
+	smb138x_handle_temperature_change(0, &irq_data);
+	return 0;
+}
+
+static int smb138x_determine_initial_status(struct smb138x *chip)
+{
+	struct smb_irq_data irq_data = {chip, "determine-initial-status"};
+
+	smblib_handle_usb_plugin(0, &irq_data);
+	smblib_handle_usb_typec_change(0, &irq_data);
+	smblib_handle_usb_source_change(0, &irq_data);
+	return 0;
+}
+
+/**************************
+ * INTERRUPT REGISTRATION *
+ **************************/
+
+static struct smb_irq_info smb138x_irqs[] = {
+/* CHARGER IRQs */
+	[CHG_ERROR_IRQ] = {
+		.name		= "chg-error",
+		.handler	= smblib_handle_debug,
+	},
+	[CHG_STATE_CHANGE_IRQ] = {
+		.name		= "chg-state-change",
+		.handler	= smb138x_handle_slave_chg_state_change,
+		.wake		= true,
+	},
+	[STEP_CHG_STATE_CHANGE_IRQ] = {
+		.name		= "step-chg-state-change",
+		.handler	= smblib_handle_debug,
+	},
+	[STEP_CHG_SOC_UPDATE_FAIL_IRQ] = {
+		.name		= "step-chg-soc-update-fail",
+		.handler	= smblib_handle_debug,
+	},
+	[STEP_CHG_SOC_UPDATE_REQ_IRQ] = {
+		.name		= "step-chg-soc-update-request",
+		.handler	= smblib_handle_debug,
+	},
+/* OTG IRQs */
+	[OTG_FAIL_IRQ] = {
+		.name		= "otg-fail",
+		.handler	= smblib_handle_debug,
+	},
+	[OTG_OVERCURRENT_IRQ] = {
+		.name		= "otg-overcurrent",
+		.handler	= smblib_handle_debug,
+	},
+	[OTG_OC_DIS_SW_STS_IRQ] = {
+		.name		= "otg-oc-dis-sw-sts",
+		.handler	= smblib_handle_debug,
+	},
+	[TESTMODE_CHANGE_DET_IRQ] = {
+		.name		= "testmode-change-detect",
+		.handler	= smblib_handle_debug,
+	},
+/* BATTERY IRQs */
+	[BATT_TEMP_IRQ] = {
+		.name		= "bat-temp",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_OCP_IRQ] = {
+		.name		= "bat-ocp",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_OV_IRQ] = {
+		.name		= "bat-ov",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_LOW_IRQ] = {
+		.name		= "bat-low",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_THERM_ID_MISS_IRQ] = {
+		.name		= "bat-therm-or-id-missing",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_TERM_MISS_IRQ] = {
+		.name		= "bat-terminal-missing",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+/* USB INPUT IRQs */
+	[USBIN_COLLAPSE_IRQ] = {
+		.name		= "usbin-collapse",
+		.handler	= smblib_handle_debug,
+	},
+	[USBIN_LT_3P6V_IRQ] = {
+		.name		= "usbin-lt-3p6v",
+		.handler	= smblib_handle_debug,
+	},
+	[USBIN_UV_IRQ] = {
+		.name		= "usbin-uv",
+		.handler	= smblib_handle_debug,
+	},
+	[USBIN_OV_IRQ] = {
+		.name		= "usbin-ov",
+		.handler	= smblib_handle_debug,
+	},
+	[USBIN_PLUGIN_IRQ] = {
+		.name		= "usbin-plugin",
+		.handler	= smblib_handle_usb_plugin,
+	},
+	[USBIN_SRC_CHANGE_IRQ] = {
+		.name		= "usbin-src-change",
+		.handler	= smblib_handle_usb_source_change,
+	},
+	[USBIN_ICL_CHANGE_IRQ] = {
+		.name		= "usbin-icl-change",
+		.handler	= smblib_handle_debug,
+	},
+	[TYPE_C_CHANGE_IRQ] = {
+		.name		= "type-c-change",
+		.handler	= smblib_handle_usb_typec_change,
+	},
+/* DC INPUT IRQs */
+	[DCIN_COLLAPSE_IRQ] = {
+		.name		= "dcin-collapse",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_LT_3P6V_IRQ] = {
+		.name		= "dcin-lt-3p6v",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_UV_IRQ] = {
+		.name		= "dcin-uv",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_OV_IRQ] = {
+		.name		= "dcin-ov",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_PLUGIN_IRQ] = {
+		.name		= "dcin-plugin",
+		.handler	= smblib_handle_debug,
+	},
+	[DIV2_EN_DG_IRQ] = {
+		.name		= "div2-en-dg",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_ICL_CHANGE_IRQ] = {
+		.name		= "dcin-icl-change",
+		.handler	= smblib_handle_debug,
+	},
+/* MISCELLANEOUS IRQs */
+	[WDOG_SNARL_IRQ] = {
+		.name		= "wdog-snarl",
+		.handler	= smblib_handle_debug,
+	},
+	[WDOG_BARK_IRQ] = {
+		.name		= "wdog-bark",
+		.handler	= smblib_handle_wdog_bark,
+		.wake		= true,
+	},
+	[AICL_FAIL_IRQ] = {
+		.name		= "aicl-fail",
+		.handler	= smblib_handle_debug,
+	},
+	[AICL_DONE_IRQ] = {
+		.name		= "aicl-done",
+		.handler	= smblib_handle_debug,
+	},
+	[HIGH_DUTY_CYCLE_IRQ] = {
+		.name		= "high-duty-cycle",
+		.handler	= smblib_handle_debug,
+	},
+	[INPUT_CURRENT_LIMIT_IRQ] = {
+		.name		= "input-current-limiting",
+		.handler	= smblib_handle_debug,
+	},
+	[TEMPERATURE_CHANGE_IRQ] = {
+		.name		= "temperature-change",
+		.handler	= smb138x_handle_temperature_change,
+	},
+	[SWITCH_POWER_OK_IRQ] = {
+		.name		= "switcher-power-ok",
+		.handler	= smblib_handle_debug,
+	},
+};
+
+static int smb138x_get_irq_index_byname(const char *irq_name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(smb138x_irqs); i++) {
+		if (strcmp(smb138x_irqs[i].name, irq_name) == 0)
+			return i;
+	}
+
+	return -ENOENT;
+}
+
+static int smb138x_request_interrupt(struct smb138x *chip,
+				     struct device_node *node,
+				     const char *irq_name)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0, irq, irq_index;
+	struct smb_irq_data *irq_data;
+
+	irq = of_irq_get_byname(node, irq_name);
+	if (irq < 0) {
+		pr_err("Couldn't get irq %s byname\n", irq_name);
+		return irq;
+	}
+
+	irq_index = smb138x_get_irq_index_byname(irq_name);
+	if (irq_index < 0) {
+		pr_err("%s is not a defined irq\n", irq_name);
+		return irq_index;
+	}
+
+	if (!smb138x_irqs[irq_index].handler)
+		return 0;
+
+	irq_data = devm_kzalloc(chg->dev, sizeof(*irq_data), GFP_KERNEL);
+	if (!irq_data)
+		return -ENOMEM;
+
+	irq_data->parent_data = chip;
+	irq_data->name = irq_name;
+	irq_data->storm_data = smb138x_irqs[irq_index].storm_data;
+	mutex_init(&irq_data->storm_data.storm_lock);
+
+	rc = devm_request_threaded_irq(chg->dev, irq, NULL,
+					smb138x_irqs[irq_index].handler,
+					IRQF_ONESHOT, irq_name, irq_data);
+	if (rc < 0) {
+		pr_err("Couldn't request irq %d\n", irq);
+		return rc;
+	}
+
+	if (smb138x_irqs[irq_index].wake)
+		enable_irq_wake(irq);
+
+	return rc;
+}
+
+static int smb138x_request_interrupts(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct device_node *node = chg->dev->of_node;
+	struct device_node *child;
+	int rc = 0;
+	const char *name;
+	struct property *prop;
+
+	for_each_available_child_of_node(node, child) {
+		of_property_for_each_string(child, "interrupt-names",
+					    prop, name) {
+			rc = smb138x_request_interrupt(chip, child, name);
+			if (rc < 0) {
+				pr_err("Couldn't request interrupt %s rc=%d\n",
+				       name, rc);
+				return rc;
+			}
+		}
+	}
+
+	return rc;
+}
+
+/*********
+ * PROBE *
+ *********/
+
+static int smb138x_master_probe(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	chg->param = v1_params;
+
+	rc = smblib_init(chg);
+	if (rc < 0) {
+		pr_err("Couldn't initialize smblib rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smb138x_parse_dt(chip);
+	if (rc < 0) {
+		pr_err("Couldn't parse device tree rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smb138x_init_vbus_regulator(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize vbus regulator rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = smb138x_init_vconn_regulator(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize vconn regulator rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = smb138x_init_usb_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize usb psy rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smb138x_init_batt_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize batt psy rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smb138x_init_hw(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize hardware rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smb138x_determine_initial_status(chip);
+	if (rc < 0) {
+		pr_err("Couldn't determine initial status rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = smb138x_request_interrupts(chip);
+	if (rc < 0) {
+		pr_err("Couldn't request interrupts rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int smb138x_slave_probe(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	chg->param = v1_params;
+
+	rc = smblib_init(chg);
+	if (rc < 0) {
+		pr_err("Couldn't initialize smblib rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	chg->iio.temp_max_chan = iio_channel_get(chg->dev, "charger_temp_max");
+	if (IS_ERR(chg->iio.temp_max_chan)) {
+		rc = PTR_ERR(chg->iio.temp_max_chan);
+		goto cleanup;
+	}
+
+	chg->iio.connector_temp_thr1_chan = iio_channel_get(chg->dev,
+							"connector_temp_thr1");
+	if (IS_ERR(chg->iio.connector_temp_thr1_chan)) {
+		rc = PTR_ERR(chg->iio.connector_temp_thr1_chan);
+		goto cleanup;
+	}
+
+	chg->iio.connector_temp_thr2_chan = iio_channel_get(chg->dev,
+							"connector_temp_thr2");
+	if (IS_ERR(chg->iio.connector_temp_thr2_chan)) {
+		rc = PTR_ERR(chg->iio.connector_temp_thr2_chan);
+		goto cleanup;
+	}
+
+	chg->iio.connector_temp_thr3_chan = iio_channel_get(chg->dev,
+							"connector_temp_thr3");
+	if (IS_ERR(chg->iio.connector_temp_thr3_chan)) {
+		rc = PTR_ERR(chg->iio.connector_temp_thr3_chan);
+		goto cleanup;
+	}
+
+	rc = smb138x_parse_dt(chip);
+	if (rc < 0) {
+		pr_err("Couldn't parse device tree rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb138x_init_slave_hw(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize hardware rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb138x_init_parallel_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize parallel psy rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb138x_determine_initial_slave_status(chip);
+	if (rc < 0) {
+		pr_err("Couldn't determine initial status rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb138x_request_interrupts(chip);
+	if (rc < 0) {
+		pr_err("Couldn't request interrupts rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	return rc;
+
+cleanup:
+	smblib_deinit(chg);
+	if (chip->parallel_psy)
+		power_supply_unregister(chip->parallel_psy);
+	return rc;
+}
+
+static const struct of_device_id match_table[] = {
+	{
+		.compatible = "qcom,smb138x-charger",
+		.data = (void *) PARALLEL_MASTER
+	},
+	{
+		.compatible = "qcom,smb138x-parallel-slave",
+		.data = (void *) PARALLEL_SLAVE
+	},
+	{ },
+};
+
+static int smb138x_probe(struct platform_device *pdev)
+{
+	struct smb138x *chip;
+	const struct of_device_id *id;
+	int rc = 0;
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->chg.dev = &pdev->dev;
+	chip->chg.debug_mask = &__debug_mask;
+	chip->chg.irq_info = smb138x_irqs;
+	chip->chg.name = "SMB";
+
+	chip->chg.regmap = dev_get_regmap(chip->chg.dev->parent, NULL);
+	if (!chip->chg.regmap) {
+		pr_err("parent regmap is missing\n");
+		return -EINVAL;
+	}
+
+	id = of_match_device(of_match_ptr(match_table), chip->chg.dev);
+	if (!id) {
+		pr_err("Couldn't find a matching device\n");
+		return -ENODEV;
+	}
+
+	platform_set_drvdata(pdev, chip);
+
+	rc = smb138x_setup_wa_flags(chip);
+	if (rc < 0) {
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't setup wa flags rc = %d\n", rc);
+		return rc;
+	}
+
+	chip->chg.mode = (enum smb_mode) id->data;
+	switch (chip->chg.mode) {
+	case PARALLEL_MASTER:
+		rc = smb138x_master_probe(chip);
+		break;
+	case PARALLEL_SLAVE:
+		rc = smb138x_slave_probe(chip);
+		break;
+	default:
+		pr_err("Couldn't find a matching mode %d\n", chip->chg.mode);
+		rc = -EINVAL;
+		goto cleanup;
+	}
+
+	if (rc < 0) {
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't probe SMB138X rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	pr_info("SMB138X probed successfully mode=%d\n", chip->chg.mode);
+	return rc;
+
+cleanup:
+	platform_set_drvdata(pdev, NULL);
+	return rc;
+}
+
+static int smb138x_remove(struct platform_device *pdev)
+{
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static struct platform_driver smb138x_driver = {
+	.driver	= {
+		.name		= "qcom,smb138x-charger",
+		.owner		= THIS_MODULE,
+		.of_match_table	= match_table,
+	},
+	.probe	= smb138x_probe,
+	.remove	= smb138x_remove,
+};
+module_platform_driver(smb138x_driver);
+
+MODULE_DESCRIPTION("QPNP SMB138X Charger Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/qcom/storm-watch.c b/drivers/power/supply/qcom/storm-watch.c
new file mode 100644
index 0000000..5275079
--- /dev/null
+++ b/drivers/power/supply/qcom/storm-watch.c
@@ -0,0 +1,66 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "storm-watch.h"
+
+/**
+ * is_storming(): Check if an event is storming
+ *
+ * @data: Data for tracking an event storm
+ *
+ * The return value will be true if a storm has been detected and
+ * false if a storm was not detected.
+ */
+bool is_storming(struct storm_watch *data)
+{
+	ktime_t curr_kt, delta_kt;
+	bool is_storming = false;
+
+	if (!data)
+		return false;
+
+	if (!data->enabled)
+		return false;
+
+	/* max storm count must be greater than 0 */
+	if (data->max_storm_count <= 0)
+		return false;
+
+	/* the period threshold must be greater than 0ms */
+	if (data->storm_period_ms <= 0)
+		return false;
+
+	mutex_lock(&data->storm_lock);
+	curr_kt = ktime_get_boottime();
+	delta_kt = ktime_sub(curr_kt, data->last_kt);
+
+	if (ktime_to_ms(delta_kt) < data->storm_period_ms)
+		data->storm_count++;
+	else
+		data->storm_count = 0;
+
+	if (data->storm_count > data->max_storm_count) {
+		is_storming = true;
+		data->storm_count = 0;
+	}
+
+	data->last_kt = curr_kt;
+	mutex_unlock(&data->storm_lock);
+	return is_storming;
+}
+
+void reset_storm_count(struct storm_watch *data)
+{
+	mutex_lock(&data->storm_lock);
+	data->storm_count = 0;
+	mutex_unlock(&data->storm_lock);
+}
diff --git a/drivers/power/supply/qcom/storm-watch.h b/drivers/power/supply/qcom/storm-watch.h
new file mode 100644
index 0000000..ff05c4a
--- /dev/null
+++ b/drivers/power/supply/qcom/storm-watch.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __STORM_WATCH_H
+#define __STORM_WATCH_H
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+
+/**
+ * Data used to track an event storm.
+ *
+ * @storm_period_ms: The maximum time interval between two events. If this limit
+ *                   is exceeded then the event chain will be broken and removed
+ *                   from consideration for a storm.
+ * @max_storm_count: The number of chained events required to trigger a storm.
+ * @storm_count:     The current number of chained events.
+ * @last_kt:         Kernel time of the last event seen.
+ * @storm_lock:      Mutex lock to protect storm_watch data.
+ */
+struct storm_watch {
+	bool		enabled;
+	int		storm_period_ms;
+	int		max_storm_count;
+	int		storm_count;
+	ktime_t		last_kt;
+	struct mutex	storm_lock;
+};
+
+bool is_storming(struct storm_watch *data);
+void reset_storm_count(struct storm_watch *data);
+#endif
diff --git a/drivers/regulator/cpr3-regulator.c b/drivers/regulator/cpr3-regulator.c
index 4752653..07a0aef 100644
--- a/drivers/regulator/cpr3-regulator.c
+++ b/drivers/regulator/cpr3-regulator.c
@@ -280,6 +280,18 @@
 	((vband) == 0 ? CPR4_REG_MARGIN_TEMP_CORE(core) \
 			: 0x3AB0 + 0x40 * ((vband) - 1) + 0x4 * (core))
 
+#define CPRH_REG_MISC_REG2	0x3AAC
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_UP_LIMIT_MASK	GENMASK(31, 29)
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_UP_LIMIT_SHIFT	29
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_DOWN_LIMIT_MASK	GENMASK(28, 24)
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_DOWN_LIMIT_SHIFT	24
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_UP_MASK	GENMASK(23, 22)
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_UP_SHIFT	22
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_MASK	GENMASK(21, 20)
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_SHIFT	20
+#define CPRH_MISC_REG2_ACD_AVG_EN_MASK	BIT(12)
+#define CPRH_MISC_REG2_ACD_AVG_ENABLE	BIT(12)
+
 /* SAW module registers */
 #define SAW_REG_AVS_CTL				0x904
 #define SAW_REG_AVS_LIMIT			0x908
@@ -1399,6 +1411,33 @@
 	}
 
 	/*
+	 * Configure CPRh ACD AVG registers on controllers
+	 * that support this feature.
+	 */
+	if (ctrl->cpr_hw_version >= CPRH_CPR_VERSION_4P5
+	    && ctrl->acd_avg_enabled) {
+		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_UP_LIMIT_MASK,
+				  ctrl->acd_adj_up_step_limit <<
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_UP_LIMIT_SHIFT);
+		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_DOWN_LIMIT_MASK,
+				  ctrl->acd_adj_down_step_limit <<
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_DOWN_LIMIT_SHIFT);
+		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_UP_MASK,
+				  ctrl->acd_adj_up_step_size <<
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_UP_SHIFT);
+		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_MASK,
+				  ctrl->acd_adj_down_step_size <<
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_SHIFT);
+		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+				  CPRH_MISC_REG2_ACD_AVG_EN_MASK,
+				  CPRH_MISC_REG2_ACD_AVG_ENABLE);
+	}
+
+	/*
 	 * Program base voltage and voltage multiplier values which
 	 * are used for floor and initial voltage calculations by the
 	 * CPRh controller.
diff --git a/drivers/regulator/cpr3-regulator.h b/drivers/regulator/cpr3-regulator.h
index 31d737ca..570ddfc 100644
--- a/drivers/regulator/cpr3-regulator.h
+++ b/drivers/regulator/cpr3-regulator.h
@@ -649,6 +649,20 @@
  *			defines the maximum number of VDD supply regulator steps
  *			that the voltage may be increased as the result of a
  *			single CPR measurement.
+ * @acd_adj_down_step_limit: Limits the number of PMIC steps to go down within
+ *			a given corner due to all ACD adjustments on some CPRh
+ *			controllers.
+ * @acd_adj_up_step_limit: Limits the number of PMIC steps to go up within a
+ *			given corner due to all ACD adjustments on some CPRh
+ *			controllers.
+ * @acd_adj_down_step_size: ACD step size in units of PMIC steps used for
+ *			target quotient adjustment due to an ACD down
+ *			recommendation.
+ * @acd_adj_up_step_size: ACD step size in units of PMIC steps used for
+ *			target quotient adjustment due to an ACD up
+ *			recommendation.
+ * @acd_avg_enabled:	Boolean defining the enable state of the ACD AVG
+ *			feature.
  * @count_mode:		CPR controller count mode
  * @count_repeat:	Number of times to perform consecutive sensor
  *			measurements when using all-at-once count modes.
@@ -804,6 +818,11 @@
 	int			step_volt;
 	u32			down_error_step_limit;
 	u32			up_error_step_limit;
+	u32			acd_adj_down_step_limit;
+	u32			acd_adj_up_step_limit;
+	u32			acd_adj_down_step_size;
+	u32			acd_adj_up_step_size;
+	bool			acd_avg_enabled;
 	enum cpr3_count_mode	count_mode;
 	u32			count_repeat;
 	u32			proc_clock_throttle;
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index 84fc703..a93e7d8 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -2221,6 +2221,46 @@
 		return rc;
 	}
 
+	ctrl->acd_avg_enabled = of_property_read_bool(ctrl->dev->of_node,
+					      "qcom,cpr-acd-avg-enable");
+	if (ctrl->acd_avg_enabled) {
+		rc = of_property_read_u32(ctrl->dev->of_node,
+					  "qcom,cpr-acd-adj-down-step-limit",
+					  &ctrl->acd_adj_down_step_limit);
+		if (rc) {
+			cpr3_err(ctrl, "error reading qcom,cpr-acd-adj-down-step-limit, rc=%d\n",
+				 rc);
+			return rc;
+		}
+
+		rc = of_property_read_u32(ctrl->dev->of_node,
+					  "qcom,cpr-acd-adj-up-step-limit",
+					  &ctrl->acd_adj_up_step_limit);
+		if (rc) {
+			cpr3_err(ctrl, "error reading qcom,cpr-acd-adj-up-step-limit, rc=%d\n",
+				 rc);
+			return rc;
+		}
+
+		rc = of_property_read_u32(ctrl->dev->of_node,
+					  "qcom,cpr-acd-adj-down-step-size",
+					  &ctrl->acd_adj_down_step_size);
+		if (rc) {
+			cpr3_err(ctrl, "error reading qcom,cpr-acd-down-step-size, rc=%d\n",
+				 rc);
+			return rc;
+		}
+
+		rc = of_property_read_u32(ctrl->dev->of_node,
+					  "qcom,cpr-acd-adj-up-step-size",
+					  &ctrl->acd_adj_up_step_size);
+		if (rc) {
+			cpr3_err(ctrl, "error reading qcom,cpr-acd-up-step-size, rc=%d\n",
+				 rc);
+			return rc;
+		}
+	}
+
 	rc = of_property_read_u32(ctrl->dev->of_node,
 				  "qcom,voltage-base",
 				  &ctrl->base_volt);
diff --git a/drivers/regulator/qpnp-labibb-regulator.c b/drivers/regulator/qpnp-labibb-regulator.c
index cf8f000..dbe2a08 100644
--- a/drivers/regulator/qpnp-labibb-regulator.c
+++ b/drivers/regulator/qpnp-labibb-regulator.c
@@ -19,16 +19,19 @@
 #include <linux/kernel.h>
 #include <linux/regmap.h>
 #include <linux/module.h>
+#include <linux/notifier.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
 #include <linux/spmi.h>
 #include <linux/platform_device.h>
 #include <linux/string.h>
+#include <linux/workqueue.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
 #include <linux/regulator/of_regulator.h>
 #include <linux/qpnp/qpnp-revid.h>
+#include <linux/regulator/qpnp-labibb-regulator.h>
 
 #define QPNP_LABIBB_REGULATOR_DRIVER_NAME	"qcom,qpnp-labibb-regulator"
 
@@ -594,6 +597,7 @@
 	const struct lab_ver_ops	*lab_ver_ops;
 	struct mutex			bus_mutex;
 	enum qpnp_labibb_mode		mode;
+	struct work_struct		lab_vreg_ok_work;
 	bool				standalone;
 	bool				ttw_en;
 	bool				in_ttw_mode;
@@ -603,10 +607,13 @@
 	bool				ttw_force_lab_on;
 	bool				skip_2nd_swire_cmd;
 	bool				pfm_enable;
+	bool				notify_lab_vreg_ok_sts;
 	u32				swire_2nd_cmd_delay;
 	u32				swire_ibb_ps_enable_delay;
 };
 
+static RAW_NOTIFIER_HEAD(labibb_notifier);
+
 struct ibb_ver_ops {
 	int (*set_default_voltage)(struct qpnp_labibb *labibb,
 			bool use_default);
@@ -2124,6 +2131,36 @@
 	return rc;
 }
 
+static void qpnp_lab_vreg_notifier_work(struct work_struct *work)
+{
+	int rc = 0;
+	u16 retries = 1000, dly = 5000;
+	u8 val;
+	struct qpnp_labibb *labibb  = container_of(work, struct qpnp_labibb,
+							lab_vreg_ok_work);
+
+	while (retries--) {
+		rc = qpnp_labibb_read(labibb, labibb->lab_base +
+					REG_LAB_STATUS1, &val, 1);
+		if (rc < 0) {
+			pr_err("read register %x failed rc = %d\n",
+				REG_LAB_STATUS1, rc);
+			return;
+		}
+
+		if (val & LAB_STATUS1_VREG_OK) {
+			raw_notifier_call_chain(&labibb_notifier,
+						LAB_VREG_OK, NULL);
+			break;
+		}
+
+		usleep_range(dly, dly + 100);
+	}
+
+	if (!retries)
+		pr_err("LAB_VREG_OK not set, failed to notify\n");
+}
+
 static int qpnp_labibb_regulator_enable(struct qpnp_labibb *labibb)
 {
 	int rc;
@@ -2326,6 +2363,9 @@
 		labibb->lab_vreg.vreg_enabled = 1;
 	}
 
+	if (labibb->notify_lab_vreg_ok_sts)
+		schedule_work(&labibb->lab_vreg_ok_work);
+
 	return 0;
 }
 
@@ -2578,6 +2618,9 @@
 		return rc;
 	}
 
+	labibb->notify_lab_vreg_ok_sts = of_property_read_bool(of_node,
+					"qcom,notify-lab-vreg-ok-sts");
+
 	rc = of_property_read_u32(of_node, "qcom,qpnp-lab-soft-start",
 					&(labibb->lab_vreg.soft_start));
 	if (!rc) {
@@ -3817,6 +3860,8 @@
 			goto fail_registration;
 		}
 	}
+
+	INIT_WORK(&labibb->lab_vreg_ok_work, qpnp_lab_vreg_notifier_work);
 	dev_set_drvdata(&pdev->dev, labibb);
 	pr_info("LAB/IBB registered successfully, lab_vreg enable=%d ibb_vreg enable=%d swire_control=%d\n",
 						labibb->lab_vreg.vreg_enabled,
@@ -3834,6 +3879,18 @@
 	return rc;
 }
 
+int qpnp_labibb_notifier_register(struct notifier_block *nb)
+{
+	return raw_notifier_chain_register(&labibb_notifier, nb);
+}
+EXPORT_SYMBOL(qpnp_labibb_notifier_register);
+
+int qpnp_labibb_notifier_unregister(struct notifier_block *nb)
+{
+	return raw_notifier_chain_unregister(&labibb_notifier, nb);
+}
+EXPORT_SYMBOL(qpnp_labibb_notifier_unregister);
+
 static int qpnp_labibb_regulator_remove(struct platform_device *pdev)
 {
 	struct qpnp_labibb *labibb = dev_get_drvdata(&pdev->dev);
@@ -3843,6 +3900,8 @@
 			regulator_unregister(labibb->lab_vreg.rdev);
 		if (labibb->ibb_vreg.rdev)
 			regulator_unregister(labibb->ibb_vreg.rdev);
+
+		cancel_work_sync(&labibb->lab_vreg_ok_work);
 	}
 	return 0;
 }
diff --git a/drivers/regulator/qpnp-lcdb-regulator.c b/drivers/regulator/qpnp-lcdb-regulator.c
index a08ade6..aef28db 100644
--- a/drivers/regulator/qpnp-lcdb-regulator.c
+++ b/drivers/regulator/qpnp-lcdb-regulator.c
@@ -16,6 +16,7 @@
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/interrupt.h>
+#include <linux/ktime.h>
 #include <linux/module.h>
 #include <linux/of_irq.h>
 #include <linux/platform_device.h>
@@ -31,6 +32,13 @@
 
 #define INT_RT_STATUS_REG		0x10
 #define VREG_OK_RT_STS_BIT		BIT(0)
+#define SC_ERROR_RT_STS_BIT		BIT(1)
+
+#define LCDB_STS3_REG			0x0A
+#define LDO_VREG_OK_BIT			BIT(7)
+
+#define LCDB_STS4_REG			0x0B
+#define NCP_VREG_OK_BIT			BIT(7)
 
 #define LCDB_AUTO_TOUCH_WAKE_CTL_REG	0x40
 #define EN_AUTO_TOUCH_WAKE_BIT		BIT(7)
@@ -185,14 +193,21 @@
 	struct platform_device		*pdev;
 	struct regmap			*regmap;
 	u32				base;
+	int				sc_irq;
 
 	/* TTW params */
 	bool				ttw_enable;
 	bool				ttw_mode_sw;
 
+	/* top level DT params */
+	bool				force_module_reenable;
+
 	/* status parameters */
 	bool				lcdb_enabled;
 	bool				settings_saved;
+	bool				lcdb_sc_disable;
+	int				sc_count;
+	ktime_t				sc_module_enable_time;
 
 	struct mutex			lcdb_mutex;
 	struct mutex			read_write_mutex;
@@ -569,8 +584,11 @@
 	int rc = 0, timeout, delay;
 	u8 val = 0;
 
-	if (lcdb->lcdb_enabled)
+	if (lcdb->lcdb_enabled || lcdb->lcdb_sc_disable) {
+		pr_debug("lcdb_enabled=%d lcdb_sc_disable=%d\n",
+			lcdb->lcdb_enabled, lcdb->lcdb_sc_disable);
 		return 0;
+	}
 
 	if (lcdb->ttw_enable) {
 		rc = qpnp_lcdb_ttw_exit(lcdb);
@@ -588,6 +606,23 @@
 		goto fail_enable;
 	}
 
+	if (lcdb->force_module_reenable) {
+		val = 0;
+		rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_ENABLE_CTL1_REG,
+								&val, 1);
+		if (rc < 0) {
+			pr_err("Failed to enable lcdb rc= %d\n", rc);
+			goto fail_enable;
+		}
+		val = MODULE_EN_BIT;
+		rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_ENABLE_CTL1_REG,
+								&val, 1);
+		if (rc < 0) {
+			pr_err("Failed to disable lcdb rc= %d\n", rc);
+			goto fail_enable;
+		}
+	}
+
 	/* poll for vreg_ok */
 	timeout = 10;
 	delay = lcdb->bst.soft_start_us + lcdb->ldo.soft_start_us +
@@ -656,6 +691,111 @@
 	return rc;
 }
 
+#define LCDB_SC_RESET_CNT_DLY_US	1000000
+#define LCDB_SC_CNT_MAX			10
+static int qpnp_lcdb_handle_sc_event(struct qpnp_lcdb *lcdb)
+{
+	int rc = 0;
+	s64 elapsed_time_us;
+
+	mutex_lock(&lcdb->lcdb_mutex);
+	rc = qpnp_lcdb_disable(lcdb);
+	if (rc < 0) {
+		pr_err("Failed to disable lcdb rc=%d\n", rc);
+		goto unlock_mutex;
+	}
+
+	/* Check if the SC re-occurred immediately */
+	elapsed_time_us = ktime_us_delta(ktime_get(),
+			lcdb->sc_module_enable_time);
+	if (elapsed_time_us > LCDB_SC_RESET_CNT_DLY_US) {
+		lcdb->sc_count = 0;
+	} else if (lcdb->sc_count > LCDB_SC_CNT_MAX) {
+		pr_err("SC trigged %d times, disabling LCDB forever!\n",
+						lcdb->sc_count);
+		lcdb->lcdb_sc_disable = true;
+		goto unlock_mutex;
+	}
+	lcdb->sc_count++;
+	lcdb->sc_module_enable_time = ktime_get();
+
+	/* delay for SC to clear */
+	usleep_range(10000, 10100);
+
+	rc = qpnp_lcdb_enable(lcdb);
+	if (rc < 0)
+		pr_err("Failed to enable lcdb rc=%d\n", rc);
+
+unlock_mutex:
+	mutex_unlock(&lcdb->lcdb_mutex);
+	return rc;
+}
+
+static irqreturn_t qpnp_lcdb_sc_irq_handler(int irq, void *data)
+{
+	struct qpnp_lcdb *lcdb = data;
+	int rc;
+	u8 val, val2[2] = {0};
+
+	rc = qpnp_lcdb_read(lcdb, lcdb->base + INT_RT_STATUS_REG, &val, 1);
+	if (rc < 0)
+		goto irq_handled;
+
+	if (val & SC_ERROR_RT_STS_BIT) {
+		rc = qpnp_lcdb_read(lcdb,
+			lcdb->base + LCDB_MISC_CTL_REG, &val, 1);
+		if (rc < 0)
+			goto irq_handled;
+
+		if (val & EN_TOUCH_WAKE_BIT) {
+			/* blanking time */
+			usleep_range(300, 310);
+			/*
+			 * The status registers need to written with any value
+			 * before reading
+			 */
+			rc = qpnp_lcdb_write(lcdb,
+				lcdb->base + LCDB_STS3_REG, val2, 2);
+			if (rc < 0)
+				goto irq_handled;
+
+			rc = qpnp_lcdb_read(lcdb,
+				lcdb->base + LCDB_STS3_REG, val2, 2);
+			if (rc < 0)
+				goto irq_handled;
+
+			if (!(val2[0] & LDO_VREG_OK_BIT) ||
+					!(val2[1] & NCP_VREG_OK_BIT)) {
+				rc = qpnp_lcdb_handle_sc_event(lcdb);
+				if (rc < 0) {
+					pr_err("Failed to handle SC rc=%d\n",
+								rc);
+					goto irq_handled;
+				}
+			}
+		} else {
+			/* blanking time */
+			usleep_range(2000, 2100);
+			/* Read the SC status again to confirm true SC */
+			rc = qpnp_lcdb_read(lcdb,
+				lcdb->base + INT_RT_STATUS_REG, &val, 1);
+			if (rc < 0)
+				goto irq_handled;
+
+			if (val & SC_ERROR_RT_STS_BIT) {
+				rc = qpnp_lcdb_handle_sc_event(lcdb);
+				if (rc < 0) {
+					pr_err("Failed to handle SC rc=%d\n",
+								rc);
+					goto irq_handled;
+				}
+			}
+		}
+	}
+irq_handled:
+	return IRQ_HANDLED;
+}
+
 #define MIN_BST_VOLTAGE_MV			4700
 #define MAX_BST_VOLTAGE_MV			6250
 #define MIN_VOLTAGE_MV				4000
@@ -1534,6 +1674,18 @@
 		return rc;
 	}
 
+	if (lcdb->sc_irq >= 0) {
+		lcdb->sc_count = 0;
+		rc = devm_request_threaded_irq(lcdb->dev, lcdb->sc_irq,
+				NULL, qpnp_lcdb_sc_irq_handler, IRQF_ONESHOT,
+				"qpnp_lcdb_sc_irq", lcdb);
+		if (rc < 0) {
+			pr_err("Unable to request sc(%d) irq rc=%d\n",
+						lcdb->sc_irq, rc);
+			return rc;
+		}
+	}
+
 	if (!is_lcdb_enabled(lcdb)) {
 		rc = qpnp_lcdb_read(lcdb, lcdb->base +
 				LCDB_MODULE_RDY_REG, &val, 1);
@@ -1590,6 +1742,9 @@
 		}
 	}
 
+	lcdb->force_module_reenable = of_property_read_bool(node,
+					"qcom,force-module-reenable");
+
 	if (of_property_read_bool(node, "qcom,ttw-enable")) {
 		rc = qpnp_lcdb_parse_ttw(lcdb);
 		if (rc < 0) {
@@ -1599,6 +1754,10 @@
 		lcdb->ttw_enable = true;
 	}
 
+	lcdb->sc_irq = platform_get_irq_byname(lcdb->pdev, "sc-irq");
+	if (lcdb->sc_irq < 0)
+		pr_debug("sc irq is not defined\n");
+
 	return rc;
 }
 
diff --git a/drivers/regulator/qpnp-oledb-regulator.c b/drivers/regulator/qpnp-oledb-regulator.c
index 8d017fb..c012f37 100644
--- a/drivers/regulator/qpnp-oledb-regulator.c
+++ b/drivers/regulator/qpnp-oledb-regulator.c
@@ -17,6 +17,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/notifier.h>
 #include <linux/of.h>
 #include <linux/regmap.h>
 #include <linux/spmi.h>
@@ -24,6 +25,8 @@
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
 #include <linux/regulator/of_regulator.h>
+#include <linux/regulator/qpnp-labibb-regulator.h>
+#include <linux/qpnp/qpnp-pbs.h>
 
 #define QPNP_OLEDB_REGULATOR_DRIVER_NAME	"qcom,qpnp-oledb-regulator"
 #define OLEDB_VOUT_STEP_MV				100
@@ -91,6 +94,12 @@
 #define OLEDB_ENABLE_NLIMIT_BIT_SHIFT			7
 #define OLEDB_NLIMIT_PGM_MASK				GENMASK(1, 0)
 
+#define OLEDB_SPARE_CTL					0xE9
+#define OLEDB_FORCE_PD_CTL_SPARE_BIT			BIT(7)
+
+#define OLEDB_PD_PBS_TRIGGER_BIT			BIT(0)
+
+#define OLEDB_SEC_UNLOCK_CODE				0xA5
 #define OLEDB_PSM_HYS_CTRL_MIN				13
 #define OLEDB_PSM_HYS_CTRL_MAX				26
 
@@ -150,6 +159,9 @@
 	struct qpnp_oledb_psm_ctl		psm_ctl;
 	struct qpnp_oledb_pfm_ctl		pfm_ctl;
 	struct qpnp_oledb_fast_precharge_ctl	fast_prechg_ctl;
+	struct notifier_block			oledb_nb;
+	struct mutex				bus_lock;
+	struct device_node			*pbs_dev_node;
 
 	u32					base;
 	u8					mod_enable;
@@ -168,6 +180,7 @@
 	bool					ext_pin_control;
 	bool					dynamic_ext_pinctl_config;
 	bool					pbs_control;
+	bool					force_pd_control;
 };
 
 static const u16 oledb_warmup_dly_ns[] = {6700, 13300, 26700, 53400};
@@ -184,11 +197,13 @@
 	int rc = 0;
 	struct platform_device *pdev = oledb->pdev;
 
+	mutex_lock(&oledb->bus_lock);
 	rc = regmap_bulk_read(oledb->regmap, address, val, count);
 	if (rc)
 		pr_err("Failed to read address=0x%02x sid=0x%02x rc=%d\n",
 			address, to_spmi_device(pdev->dev.parent)->usid, rc);
 
+	mutex_unlock(&oledb->bus_lock);
 	return rc;
 }
 
@@ -197,6 +212,7 @@
 {
 	int rc;
 
+	mutex_lock(&oledb->bus_lock);
 	rc = regmap_update_bits(oledb->regmap, address, mask, val);
 	if (rc < 0)
 		pr_err("Failed to write address 0x%04X, rc = %d\n",
@@ -205,6 +221,31 @@
 		pr_debug("Wrote 0x%02X to addr 0x%04X\n",
 			val, address);
 
+	mutex_unlock(&oledb->bus_lock);
+	return rc;
+}
+
+#define OLEDB_SEC_ACCESS	0xD0
+static int qpnp_oledb_sec_masked_write(struct qpnp_oledb *oledb, u16 address,
+							 u8 mask, u8 val)
+{
+	int rc = 0;
+	u8 sec_val = OLEDB_SEC_UNLOCK_CODE;
+	u16 sec_reg_addr = (address & 0xFF00) | OLEDB_SEC_ACCESS;
+
+	mutex_lock(&oledb->bus_lock);
+	rc = regmap_write(oledb->regmap, sec_reg_addr, sec_val);
+	if (rc < 0) {
+		pr_err("register %x failed rc = %d\n", sec_reg_addr, rc);
+		goto error;
+	}
+
+	rc = regmap_update_bits(oledb->regmap, address, mask, val);
+	if (rc < 0)
+		pr_err("spmi write failed: addr=%03X, rc=%d\n", address, rc);
+
+error:
+	mutex_unlock(&oledb->bus_lock);
 	return rc;
 }
 
@@ -214,6 +255,7 @@
 	int rc = 0;
 	struct platform_device *pdev = oledb->pdev;
 
+	mutex_lock(&oledb->bus_lock);
 	rc = regmap_bulk_write(oledb->regmap, address, val, count);
 	if (rc)
 		pr_err("Failed to write address=0x%02x sid=0x%02x rc=%d\n",
@@ -222,7 +264,8 @@
 		pr_debug("Wrote 0x%02X to addr 0x%04X\n",
 			*val, address);
 
-	return 0;
+	mutex_unlock(&oledb->bus_lock);
+	return rc;
 }
 
 static int qpnp_oledb_regulator_enable(struct regulator_dev *rdev)
@@ -285,6 +328,8 @@
 static int qpnp_oledb_regulator_disable(struct regulator_dev *rdev)
 {
 	int rc = 0;
+	u8 trigger_bitmap = OLEDB_PD_PBS_TRIGGER_BIT;
+	u8 val;
 
 	struct qpnp_oledb *oledb  = rdev_get_drvdata(rdev);
 
@@ -314,6 +359,27 @@
 		pr_debug("Register-control mode, module disabled\n");
 	}
 
+	if (oledb->force_pd_control) {
+		rc = qpnp_oledb_read(oledb, oledb->base + OLEDB_SPARE_CTL,
+						&val, 1);
+		if (rc < 0) {
+			pr_err("Failed to read OLEDB_SPARE_CTL rc=%d\n", rc);
+			return rc;
+		}
+
+		if (val & OLEDB_FORCE_PD_CTL_SPARE_BIT) {
+			rc = qpnp_pbs_trigger_event(oledb->pbs_dev_node,
+							trigger_bitmap);
+			if (rc < 0) {
+				pr_err("Failed to trigger the PBS sequence\n");
+				return rc;
+			}
+			pr_debug("PBS event triggered\n");
+		} else {
+			pr_debug("OLEDB_SPARE_CTL register bit not set\n");
+		}
+	}
+
 	oledb->mod_enable = false;
 
 	return rc;
@@ -1034,6 +1100,18 @@
 	oledb->pbs_control =
 			of_property_read_bool(of_node, "qcom,pbs-control");
 
+	oledb->force_pd_control =
+			of_property_read_bool(of_node, "qcom,force-pd-control");
+
+	if (oledb->force_pd_control) {
+		oledb->pbs_dev_node = of_parse_phandle(of_node,
+						"qcom,pbs-client", 0);
+		if (!oledb->pbs_dev_node) {
+			pr_err("Missing qcom,pbs-client property\n");
+			return -EINVAL;
+		}
+	}
+
 	oledb->current_voltage = -EINVAL;
 	rc = of_property_read_u32(of_node, "qcom,oledb-init-voltage-mv",
 						&oledb->current_voltage);
@@ -1116,6 +1194,52 @@
 	return rc;
 }
 
+static int qpnp_oledb_force_pulldown_config(struct qpnp_oledb *oledb)
+{
+	int rc = 0;
+	u8 val;
+
+	rc = qpnp_oledb_sec_masked_write(oledb, oledb->base +
+		    OLEDB_SPARE_CTL, OLEDB_FORCE_PD_CTL_SPARE_BIT, 0);
+	if (rc < 0) {
+		pr_err("Failed to write SPARE_CTL rc=%d\n", rc);
+		return rc;
+	}
+
+	val = 1;
+	rc = qpnp_oledb_write(oledb, oledb->base + OLEDB_PD_CTL,
+							&val, 1);
+	if (rc < 0) {
+		pr_err("Failed to write PD_CTL rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_oledb_masked_write(oledb, oledb->base +
+		OLEDB_SWIRE_CONTROL, OLEDB_EN_SWIRE_PD_UPD_BIT, 0);
+	if (rc < 0)
+		pr_err("Failed to write SWIRE_CTL for pbs mode rc=%d\n",
+					rc);
+
+	return rc;
+}
+
+static int qpnp_labibb_notifier_cb(struct notifier_block *nb,
+					unsigned long action, void *data)
+{
+	int rc = 0;
+	struct qpnp_oledb *oledb = container_of(nb, struct qpnp_oledb,
+								oledb_nb);
+
+	if (action == LAB_VREG_OK) {
+		/* Disable SWIRE pull down control and enable via spmi mode */
+		rc = qpnp_oledb_force_pulldown_config(oledb);
+		if (rc < 0)
+			return NOTIFY_STOP;
+	}
+
+	return NOTIFY_OK;
+}
+
 static int qpnp_oledb_regulator_probe(struct platform_device *pdev)
 {
 	int rc = 0;
@@ -1143,6 +1267,7 @@
 		return rc;
 	}
 
+	mutex_init(&(oledb->bus_lock));
 	oledb->base = val;
 	rc = qpnp_oledb_parse_dt(oledb);
 	if (rc < 0) {
@@ -1156,18 +1281,47 @@
 		return rc;
 	}
 
+	if (oledb->force_pd_control) {
+		oledb->oledb_nb.notifier_call = qpnp_labibb_notifier_cb;
+		rc = qpnp_labibb_notifier_register(&oledb->oledb_nb);
+		if (rc < 0) {
+			pr_err("Failed to register qpnp_labibb_notifier_cb\n");
+			return rc;
+		}
+	}
+
 	rc = qpnp_oledb_register_regulator(oledb);
-	if (!rc)
-		pr_info("OLEDB registered successfully, ext_pin_en=%d mod_en=%d cuurent_voltage=%d mV\n",
+	if (rc < 0) {
+		pr_err("Failed to register regulator rc=%d\n", rc);
+		goto out;
+	}
+	pr_info("OLEDB registered successfully, ext_pin_en=%d mod_en=%d current_voltage=%d mV\n",
 			oledb->ext_pin_control, oledb->mod_enable,
 						oledb->current_voltage);
+	return 0;
+
+out:
+	if (oledb->force_pd_control) {
+		rc  = qpnp_labibb_notifier_unregister(&oledb->oledb_nb);
+		if (rc < 0)
+			pr_err("Failed to unregister lab_vreg_ok notifier\n");
+	}
 
 	return rc;
 }
 
 static int qpnp_oledb_regulator_remove(struct platform_device *pdev)
 {
-	return 0;
+	int rc = 0;
+	struct qpnp_oledb *oledb = platform_get_drvdata(pdev);
+
+	if (oledb->force_pd_control) {
+		rc  = qpnp_labibb_notifier_unregister(&oledb->oledb_nb);
+		if (rc < 0)
+			pr_err("Failed to unregister lab_vreg_ok notifier\n");
+	}
+
+	return rc;
 }
 
 const struct of_device_id qpnp_oledb_regulator_match_table[] = {
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index ed92fb0..76b802c 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1712,6 +1712,9 @@
 		ap_dev->queue_depth = queue_depth;
 		ap_dev->raw_hwtype = device_type;
 		ap_dev->device_type = device_type;
+		/* CEX6 toleration: map to CEX5 */
+		if (device_type == AP_DEVICE_TYPE_CEX6)
+			ap_dev->device_type = AP_DEVICE_TYPE_CEX5;
 		ap_dev->functions = device_functions;
 		spin_lock_init(&ap_dev->lock);
 		INIT_LIST_HEAD(&ap_dev->pendingq);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index d7fdf5c..fd66d2c 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -105,6 +105,7 @@
 #define AP_DEVICE_TYPE_CEX3C	9
 #define AP_DEVICE_TYPE_CEX4	10
 #define AP_DEVICE_TYPE_CEX5	11
+#define AP_DEVICE_TYPE_CEX6	12
 
 /*
  * Known function facilities
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 91dfd58..c4fe95a 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -22,7 +22,7 @@
  *
  ****************************************************************************/
 
-#define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
+#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
 
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -82,7 +82,7 @@
 		}
 	} else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
 		if (se_cmd->data_direction == DMA_TO_DEVICE) {
-			/*  residual data from an overflow write */
+			/* residual data from an overflow write */
 			rsp->flags = SRP_RSP_FLAG_DOOVER;
 			rsp->data_out_res_cnt = cpu_to_be32(residual_count);
 		} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
@@ -102,7 +102,7 @@
  * and the function returns TRUE.
  *
  * EXECUTION ENVIRONMENT:
- *      Interrupt or Process environment
+ *	Interrupt or Process environment
  */
 static bool connection_broken(struct scsi_info *vscsi)
 {
@@ -325,7 +325,7 @@
 }
 
 /**
- * ibmvscsis_send_init_message() -  send initialize message to the client
+ * ibmvscsis_send_init_message() - send initialize message to the client
  * @vscsi:	Pointer to our adapter structure
  * @format:	Which Init Message format to send
  *
@@ -383,13 +383,13 @@
 					      vscsi->cmd_q.base_addr);
 		if (crq) {
 			*format = (uint)(crq->format);
-			rc =  ERROR;
+			rc = ERROR;
 			crq->valid = INVALIDATE_CMD_RESP_EL;
 			dma_rmb();
 		}
 	} else {
 		*format = (uint)(crq->format);
-		rc =  ERROR;
+		rc = ERROR;
 		crq->valid = INVALIDATE_CMD_RESP_EL;
 		dma_rmb();
 	}
@@ -398,166 +398,6 @@
 }
 
 /**
- * ibmvscsis_establish_new_q() - Establish new CRQ queue
- * @vscsi:	Pointer to our adapter structure
- * @new_state:	New state being established after resetting the queue
- *
- * Must be called with interrupt lock held.
- */
-static long ibmvscsis_establish_new_q(struct scsi_info *vscsi,  uint new_state)
-{
-	long rc = ADAPT_SUCCESS;
-	uint format;
-
-	vscsi->flags &= PRESERVE_FLAG_FIELDS;
-	vscsi->rsp_q_timer.timer_pops = 0;
-	vscsi->debit = 0;
-	vscsi->credit = 0;
-
-	rc = vio_enable_interrupts(vscsi->dma_dev);
-	if (rc) {
-		pr_warn("reset_queue: failed to enable interrupts, rc %ld\n",
-			rc);
-		return rc;
-	}
-
-	rc = ibmvscsis_check_init_msg(vscsi, &format);
-	if (rc) {
-		dev_err(&vscsi->dev, "reset_queue: check_init_msg failed, rc %ld\n",
-			rc);
-		return rc;
-	}
-
-	if (format == UNUSED_FORMAT && new_state == WAIT_CONNECTION) {
-		rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
-		switch (rc) {
-		case H_SUCCESS:
-		case H_DROPPED:
-		case H_CLOSED:
-			rc = ADAPT_SUCCESS;
-			break;
-
-		case H_PARAMETER:
-		case H_HARDWARE:
-			break;
-
-		default:
-			vscsi->state = UNDEFINED;
-			rc = H_HARDWARE;
-			break;
-		}
-	}
-
-	return rc;
-}
-
-/**
- * ibmvscsis_reset_queue() - Reset CRQ Queue
- * @vscsi:	Pointer to our adapter structure
- * @new_state:	New state to establish after resetting the queue
- *
- * This function calls h_free_q and then calls h_reg_q and does all
- * of the bookkeeping to get us back to where we can communicate.
- *
- * Actually, we don't always call h_free_crq.  A problem was discovered
- * where one partition would close and reopen his queue, which would
- * cause his partner to get a transport event, which would cause him to
- * close and reopen his queue, which would cause the original partition
- * to get a transport event, etc., etc.  To prevent this, we don't
- * actually close our queue if the client initiated the reset, (i.e.
- * either we got a transport event or we have detected that the client's
- * queue is gone)
- *
- * EXECUTION ENVIRONMENT:
- *	Process environment, called with interrupt lock held
- */
-static void ibmvscsis_reset_queue(struct scsi_info *vscsi, uint new_state)
-{
-	int bytes;
-	long rc = ADAPT_SUCCESS;
-
-	pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
-
-	/* don't reset, the client did it for us */
-	if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
-		vscsi->flags &=  PRESERVE_FLAG_FIELDS;
-		vscsi->rsp_q_timer.timer_pops = 0;
-		vscsi->debit = 0;
-		vscsi->credit = 0;
-		vscsi->state = new_state;
-		vio_enable_interrupts(vscsi->dma_dev);
-	} else {
-		rc = ibmvscsis_free_command_q(vscsi);
-		if (rc == ADAPT_SUCCESS) {
-			vscsi->state = new_state;
-
-			bytes = vscsi->cmd_q.size * PAGE_SIZE;
-			rc = h_reg_crq(vscsi->dds.unit_id,
-				       vscsi->cmd_q.crq_token, bytes);
-			if (rc == H_CLOSED || rc == H_SUCCESS) {
-				rc = ibmvscsis_establish_new_q(vscsi,
-							       new_state);
-			}
-
-			if (rc != ADAPT_SUCCESS) {
-				pr_debug("reset_queue: reg_crq rc %ld\n", rc);
-
-				vscsi->state = ERR_DISCONNECTED;
-				vscsi->flags |=  RESPONSE_Q_DOWN;
-				ibmvscsis_free_command_q(vscsi);
-			}
-		} else {
-			vscsi->state = ERR_DISCONNECTED;
-			vscsi->flags |= RESPONSE_Q_DOWN;
-		}
-	}
-}
-
-/**
- * ibmvscsis_free_cmd_resources() - Free command resources
- * @vscsi:	Pointer to our adapter structure
- * @cmd:	Command which is not longer in use
- *
- * Must be called with interrupt lock held.
- */
-static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
-					 struct ibmvscsis_cmd *cmd)
-{
-	struct iu_entry *iue = cmd->iue;
-
-	switch (cmd->type) {
-	case TASK_MANAGEMENT:
-	case SCSI_CDB:
-		/*
-		 * When the queue goes down this value is cleared, so it
-		 * cannot be cleared in this general purpose function.
-		 */
-		if (vscsi->debit)
-			vscsi->debit -= 1;
-		break;
-	case ADAPTER_MAD:
-		vscsi->flags &= ~PROCESSING_MAD;
-		break;
-	case UNSET_TYPE:
-		break;
-	default:
-		dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
-			cmd->type);
-		break;
-	}
-
-	cmd->iue = NULL;
-	list_add_tail(&cmd->list, &vscsi->free_cmd);
-	srp_iu_put(iue);
-
-	if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
-	    list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
-		vscsi->flags &= ~WAIT_FOR_IDLE;
-		complete(&vscsi->wait_idle);
-	}
-}
-
-/**
  * ibmvscsis_disconnect() - Helper function to disconnect
  * @work:	Pointer to work_struct, gives access to our adapter structure
  *
@@ -576,7 +416,6 @@
 					       proc_work);
 	u16 new_state;
 	bool wait_idle = false;
-	long rc = ADAPT_SUCCESS;
 
 	spin_lock_bh(&vscsi->intr_lock);
 	new_state = vscsi->new_state;
@@ -590,7 +429,7 @@
 	 * should transitition to the new state
 	 */
 	switch (vscsi->state) {
-	/*  Should never be called while in this state. */
+	/* Should never be called while in this state. */
 	case NO_QUEUE:
 	/*
 	 * Can never transition from this state;
@@ -629,30 +468,24 @@
 			vscsi->state = new_state;
 		break;
 
-	/*
-	 * If this is a transition into an error state.
-	 * a client is attempting to establish a connection
-	 * and has violated the RPA protocol.
-	 * There can be nothing pending on the adapter although
-	 * there can be requests in the command queue.
-	 */
 	case WAIT_ENABLED:
-	case PART_UP_WAIT_ENAB:
 		switch (new_state) {
-		case ERR_DISCONNECT:
-			vscsi->flags |= RESPONSE_Q_DOWN;
+		case UNCONFIGURING:
 			vscsi->state = new_state;
+			vscsi->flags |= RESPONSE_Q_DOWN;
 			vscsi->flags &= ~(SCHEDULE_DISCONNECT |
 					  DISCONNECT_SCHEDULED);
-			ibmvscsis_free_command_q(vscsi);
-			break;
-		case ERR_DISCONNECT_RECONNECT:
-			ibmvscsis_reset_queue(vscsi, WAIT_ENABLED);
+			dma_rmb();
+			if (vscsi->flags & CFG_SLEEPING) {
+				vscsi->flags &= ~CFG_SLEEPING;
+				complete(&vscsi->unconfig);
+			}
 			break;
 
 		/* should never happen */
+		case ERR_DISCONNECT:
+		case ERR_DISCONNECT_RECONNECT:
 		case WAIT_IDLE:
-			rc = ERROR;
 			dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n",
 				vscsi->state);
 			break;
@@ -661,6 +494,13 @@
 
 	case WAIT_IDLE:
 		switch (new_state) {
+		case UNCONFIGURING:
+			vscsi->flags |= RESPONSE_Q_DOWN;
+			vscsi->state = new_state;
+			vscsi->flags &= ~(SCHEDULE_DISCONNECT |
+					  DISCONNECT_SCHEDULED);
+			ibmvscsis_free_command_q(vscsi);
+			break;
 		case ERR_DISCONNECT:
 		case ERR_DISCONNECT_RECONNECT:
 			vscsi->state = new_state;
@@ -789,7 +629,6 @@
 			break;
 
 		case WAIT_ENABLED:
-		case PART_UP_WAIT_ENAB:
 		case WAIT_IDLE:
 		case WAIT_CONNECTION:
 		case CONNECTED:
@@ -807,6 +646,310 @@
 }
 
 /**
+ * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
+ * @vscsi:	Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
+{
+	long rc = ADAPT_SUCCESS;
+
+	switch (vscsi->state) {
+	case NO_QUEUE:
+	case ERR_DISCONNECT:
+	case ERR_DISCONNECT_RECONNECT:
+	case ERR_DISCONNECTED:
+	case UNCONFIGURING:
+	case UNDEFINED:
+		rc = ERROR;
+		break;
+
+	case WAIT_CONNECTION:
+		vscsi->state = CONNECTED;
+		break;
+
+	case WAIT_IDLE:
+	case SRP_PROCESSING:
+	case CONNECTED:
+	case WAIT_ENABLED:
+	default:
+		rc = ERROR;
+		dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
+			vscsi->state);
+		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * ibmvscsis_handle_init_msg() - Respond to an Init Message
+ * @vscsi:	Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
+{
+	long rc = ADAPT_SUCCESS;
+
+	switch (vscsi->state) {
+	case WAIT_CONNECTION:
+		rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
+		switch (rc) {
+		case H_SUCCESS:
+			vscsi->state = CONNECTED;
+			break;
+
+		case H_PARAMETER:
+			dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+				rc);
+			ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
+			break;
+
+		case H_DROPPED:
+			dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+				rc);
+			rc = ERROR;
+			ibmvscsis_post_disconnect(vscsi,
+						  ERR_DISCONNECT_RECONNECT, 0);
+			break;
+
+		case H_CLOSED:
+			pr_warn("init_msg: failed to send, rc %ld\n", rc);
+			rc = 0;
+			break;
+		}
+		break;
+
+	case UNDEFINED:
+		rc = ERROR;
+		break;
+
+	case UNCONFIGURING:
+		break;
+
+	case WAIT_ENABLED:
+	case CONNECTED:
+	case SRP_PROCESSING:
+	case WAIT_IDLE:
+	case NO_QUEUE:
+	case ERR_DISCONNECT:
+	case ERR_DISCONNECT_RECONNECT:
+	case ERR_DISCONNECTED:
+	default:
+		rc = ERROR;
+		dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
+			vscsi->state);
+		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * ibmvscsis_init_msg() - Respond to an init message
+ * @vscsi:	Pointer to our adapter structure
+ * @crq:	Pointer to CRQ element containing the Init Message
+ *
+ * EXECUTION ENVIRONMENT:
+ *	Interrupt, interrupt lock held
+ */
+static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
+{
+	long rc = ADAPT_SUCCESS;
+
+	pr_debug("init_msg: state 0x%hx\n", vscsi->state);
+
+	rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
+		      (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
+		      0);
+	if (rc == H_SUCCESS) {
+		vscsi->client_data.partition_number =
+			be64_to_cpu(*(u64 *)vscsi->map_buf);
+		pr_debug("init_msg, part num %d\n",
+			 vscsi->client_data.partition_number);
+	} else {
+		pr_debug("init_msg h_vioctl rc %ld\n", rc);
+		rc = ADAPT_SUCCESS;
+	}
+
+	if (crq->format == INIT_MSG) {
+		rc = ibmvscsis_handle_init_msg(vscsi);
+	} else if (crq->format == INIT_COMPLETE_MSG) {
+		rc = ibmvscsis_handle_init_compl_msg(vscsi);
+	} else {
+		rc = ERROR;
+		dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
+			(uint)crq->format);
+		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+	}
+
+	return rc;
+}
+
+/**
+ * ibmvscsis_establish_new_q() - Establish new CRQ queue
+ * @vscsi:	Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
+{
+	long rc = ADAPT_SUCCESS;
+	uint format;
+
+	vscsi->flags &= PRESERVE_FLAG_FIELDS;
+	vscsi->rsp_q_timer.timer_pops = 0;
+	vscsi->debit = 0;
+	vscsi->credit = 0;
+
+	rc = vio_enable_interrupts(vscsi->dma_dev);
+	if (rc) {
+		pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n",
+			rc);
+		return rc;
+	}
+
+	rc = ibmvscsis_check_init_msg(vscsi, &format);
+	if (rc) {
+		dev_err(&vscsi->dev, "establish_new_q: check_init_msg failed, rc %ld\n",
+			rc);
+		return rc;
+	}
+
+	if (format == UNUSED_FORMAT) {
+		rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
+		switch (rc) {
+		case H_SUCCESS:
+		case H_DROPPED:
+		case H_CLOSED:
+			rc = ADAPT_SUCCESS;
+			break;
+
+		case H_PARAMETER:
+		case H_HARDWARE:
+			break;
+
+		default:
+			vscsi->state = UNDEFINED;
+			rc = H_HARDWARE;
+			break;
+		}
+	} else if (format == INIT_MSG) {
+		rc = ibmvscsis_handle_init_msg(vscsi);
+	}
+
+	return rc;
+}
+
+/**
+ * ibmvscsis_reset_queue() - Reset CRQ Queue
+ * @vscsi:	Pointer to our adapter structure
+ *
+ * This function calls h_free_q and then calls h_reg_q and does all
+ * of the bookkeeping to get us back to where we can communicate.
+ *
+ * Actually, we don't always call h_free_crq.  A problem was discovered
+ * where one partition would close and reopen his queue, which would
+ * cause his partner to get a transport event, which would cause him to
+ * close and reopen his queue, which would cause the original partition
+ * to get a transport event, etc., etc.  To prevent this, we don't
+ * actually close our queue if the client initiated the reset, (i.e.
+ * either we got a transport event or we have detected that the client's
+ * queue is gone)
+ *
+ * EXECUTION ENVIRONMENT:
+ *	Process environment, called with interrupt lock held
+ */
+static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
+{
+	int bytes;
+	long rc = ADAPT_SUCCESS;
+
+	pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
+
+	/* don't reset, the client did it for us */
+	if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
+		vscsi->flags &= PRESERVE_FLAG_FIELDS;
+		vscsi->rsp_q_timer.timer_pops = 0;
+		vscsi->debit = 0;
+		vscsi->credit = 0;
+		vscsi->state = WAIT_CONNECTION;
+		vio_enable_interrupts(vscsi->dma_dev);
+	} else {
+		rc = ibmvscsis_free_command_q(vscsi);
+		if (rc == ADAPT_SUCCESS) {
+			vscsi->state = WAIT_CONNECTION;
+
+			bytes = vscsi->cmd_q.size * PAGE_SIZE;
+			rc = h_reg_crq(vscsi->dds.unit_id,
+				       vscsi->cmd_q.crq_token, bytes);
+			if (rc == H_CLOSED || rc == H_SUCCESS) {
+				rc = ibmvscsis_establish_new_q(vscsi);
+			}
+
+			if (rc != ADAPT_SUCCESS) {
+				pr_debug("reset_queue: reg_crq rc %ld\n", rc);
+
+				vscsi->state = ERR_DISCONNECTED;
+				vscsi->flags |= RESPONSE_Q_DOWN;
+				ibmvscsis_free_command_q(vscsi);
+			}
+		} else {
+			vscsi->state = ERR_DISCONNECTED;
+			vscsi->flags |= RESPONSE_Q_DOWN;
+		}
+	}
+}
+
+/**
+ * ibmvscsis_free_cmd_resources() - Free command resources
+ * @vscsi:	Pointer to our adapter structure
+ * @cmd:	Command which is not longer in use
+ *
+ * Must be called with interrupt lock held.
+ */
+static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
+					 struct ibmvscsis_cmd *cmd)
+{
+	struct iu_entry *iue = cmd->iue;
+
+	switch (cmd->type) {
+	case TASK_MANAGEMENT:
+	case SCSI_CDB:
+		/*
+		 * When the queue goes down this value is cleared, so it
+		 * cannot be cleared in this general purpose function.
+		 */
+		if (vscsi->debit)
+			vscsi->debit -= 1;
+		break;
+	case ADAPTER_MAD:
+		vscsi->flags &= ~PROCESSING_MAD;
+		break;
+	case UNSET_TYPE:
+		break;
+	default:
+		dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
+			cmd->type);
+		break;
+	}
+
+	cmd->iue = NULL;
+	list_add_tail(&cmd->list, &vscsi->free_cmd);
+	srp_iu_put(iue);
+
+	if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
+	    list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
+		vscsi->flags &= ~WAIT_FOR_IDLE;
+		complete(&vscsi->wait_idle);
+	}
+}
+
+/**
  * ibmvscsis_trans_event() - Handle a Transport Event
  * @vscsi:	Pointer to our adapter structure
  * @crq:	Pointer to CRQ entry containing the Transport Event
@@ -864,10 +1007,6 @@
 						   TRANS_EVENT));
 			break;
 
-		case PART_UP_WAIT_ENAB:
-			vscsi->state = WAIT_ENABLED;
-			break;
-
 		case SRP_PROCESSING:
 			if ((vscsi->debit > 0) ||
 			    !list_empty(&vscsi->schedule_q) ||
@@ -896,7 +1035,7 @@
 		}
 	}
 
-	rc =  vscsi->flags & SCHEDULE_DISCONNECT;
+	rc = vscsi->flags & SCHEDULE_DISCONNECT;
 
 	pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
 		 vscsi->flags, vscsi->state, rc);
@@ -1067,16 +1206,28 @@
 		free_qs = true;
 
 	switch (vscsi->state) {
+	case UNCONFIGURING:
+		ibmvscsis_free_command_q(vscsi);
+		dma_rmb();
+		isync();
+		if (vscsi->flags & CFG_SLEEPING) {
+			vscsi->flags &= ~CFG_SLEEPING;
+			complete(&vscsi->unconfig);
+		}
+		break;
 	case ERR_DISCONNECT_RECONNECT:
-		ibmvscsis_reset_queue(vscsi, WAIT_CONNECTION);
+		ibmvscsis_reset_queue(vscsi);
 		pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags);
 		break;
 
 	case ERR_DISCONNECT:
 		ibmvscsis_free_command_q(vscsi);
-		vscsi->flags &= ~DISCONNECT_SCHEDULED;
+		vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED);
 		vscsi->flags |= RESPONSE_Q_DOWN;
-		vscsi->state = ERR_DISCONNECTED;
+		if (vscsi->tport.enabled)
+			vscsi->state = ERR_DISCONNECTED;
+		else
+			vscsi->state = WAIT_ENABLED;
 		pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n",
 			 vscsi->flags, vscsi->state);
 		break;
@@ -1221,7 +1372,7 @@
  * @iue:	Information Unit containing the Adapter Info MAD request
  *
  * EXECUTION ENVIRONMENT:
- *	Interrupt adpater lock is held
+ *	Interrupt adapter lock is held
  */
 static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
 				   struct iu_entry *iue)
@@ -1621,8 +1772,8 @@
 					be64_to_cpu(msg_hi),
 					be64_to_cpu(cmd->rsp.tag));
 
-			pr_debug("send_messages: tag 0x%llx, rc %ld\n",
-				 be64_to_cpu(cmd->rsp.tag), rc);
+			pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
+				 cmd, be64_to_cpu(cmd->rsp.tag), rc);
 
 			/* if all ok free up the command element resources */
 			if (rc == H_SUCCESS) {
@@ -1692,7 +1843,7 @@
  * @crq:	Pointer to the CRQ entry containing the MAD request
  *
  * EXECUTION ENVIRONMENT:
- *	Interrupt  called with adapter lock held
+ *	Interrupt, called with adapter lock held
  */
 static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
 {
@@ -1746,14 +1897,7 @@
 
 		pr_debug("mad: type %d\n", be32_to_cpu(mad->type));
 
-		if (be16_to_cpu(mad->length) < 0) {
-			dev_err(&vscsi->dev, "mad: length is < 0\n");
-			ibmvscsis_post_disconnect(vscsi,
-						  ERR_DISCONNECT_RECONNECT, 0);
-			rc = SRP_VIOLATION;
-		} else {
-			rc = ibmvscsis_process_mad(vscsi, iue);
-		}
+		rc = ibmvscsis_process_mad(vscsi, iue);
 
 		pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status),
 			 rc);
@@ -1865,7 +2009,7 @@
 		break;
 	case H_PERMISSION:
 		if (connection_broken(vscsi))
-			flag_bits =  RESPONSE_Q_DOWN | CLIENT_FAILED;
+			flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
 		dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
 			rc);
 		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
@@ -2188,156 +2332,6 @@
 }
 
 /**
- * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
- * @vscsi:	Pointer to our adapter structure
- *
- * Must be called with interrupt lock held.
- */
-static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
-{
-	long rc = ADAPT_SUCCESS;
-
-	switch (vscsi->state) {
-	case NO_QUEUE:
-	case ERR_DISCONNECT:
-	case ERR_DISCONNECT_RECONNECT:
-	case ERR_DISCONNECTED:
-	case UNCONFIGURING:
-	case UNDEFINED:
-		rc = ERROR;
-		break;
-
-	case WAIT_CONNECTION:
-		vscsi->state = CONNECTED;
-		break;
-
-	case WAIT_IDLE:
-	case SRP_PROCESSING:
-	case CONNECTED:
-	case WAIT_ENABLED:
-	case PART_UP_WAIT_ENAB:
-	default:
-		rc = ERROR;
-		dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
-			vscsi->state);
-		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
-		break;
-	}
-
-	return rc;
-}
-
-/**
- * ibmvscsis_handle_init_msg() - Respond to an Init Message
- * @vscsi:	Pointer to our adapter structure
- *
- * Must be called with interrupt lock held.
- */
-static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
-{
-	long rc = ADAPT_SUCCESS;
-
-	switch (vscsi->state) {
-	case WAIT_ENABLED:
-		vscsi->state = PART_UP_WAIT_ENAB;
-		break;
-
-	case WAIT_CONNECTION:
-		rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
-		switch (rc) {
-		case H_SUCCESS:
-			vscsi->state = CONNECTED;
-			break;
-
-		case H_PARAMETER:
-			dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
-				rc);
-			ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
-			break;
-
-		case H_DROPPED:
-			dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
-				rc);
-			rc = ERROR;
-			ibmvscsis_post_disconnect(vscsi,
-						  ERR_DISCONNECT_RECONNECT, 0);
-			break;
-
-		case H_CLOSED:
-			pr_warn("init_msg: failed to send, rc %ld\n", rc);
-			rc = 0;
-			break;
-		}
-		break;
-
-	case UNDEFINED:
-		rc = ERROR;
-		break;
-
-	case UNCONFIGURING:
-		break;
-
-	case PART_UP_WAIT_ENAB:
-	case CONNECTED:
-	case SRP_PROCESSING:
-	case WAIT_IDLE:
-	case NO_QUEUE:
-	case ERR_DISCONNECT:
-	case ERR_DISCONNECT_RECONNECT:
-	case ERR_DISCONNECTED:
-	default:
-		rc = ERROR;
-		dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
-			vscsi->state);
-		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
-		break;
-	}
-
-	return rc;
-}
-
-/**
- * ibmvscsis_init_msg() - Respond to an init message
- * @vscsi:	Pointer to our adapter structure
- * @crq:	Pointer to CRQ element containing the Init Message
- *
- * EXECUTION ENVIRONMENT:
- *	Interrupt, interrupt lock held
- */
-static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
-{
-	long rc = ADAPT_SUCCESS;
-
-	pr_debug("init_msg: state 0x%hx\n", vscsi->state);
-
-	rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
-		      (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
-		      0);
-	if (rc == H_SUCCESS) {
-		vscsi->client_data.partition_number =
-			be64_to_cpu(*(u64 *)vscsi->map_buf);
-		pr_debug("init_msg, part num %d\n",
-			 vscsi->client_data.partition_number);
-	} else {
-		pr_debug("init_msg h_vioctl rc %ld\n", rc);
-		rc = ADAPT_SUCCESS;
-	}
-
-	if (crq->format == INIT_MSG) {
-		rc = ibmvscsis_handle_init_msg(vscsi);
-	} else if (crq->format == INIT_COMPLETE_MSG) {
-		rc = ibmvscsis_handle_init_compl_msg(vscsi);
-	} else {
-		rc = ERROR;
-		dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
-			(uint)crq->format);
-		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
-	}
-
-	return rc;
-}
-
-/**
  * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue.
  * @vscsi:	Pointer to our adapter structure
  * @crq:	Pointer to CRQ element containing the SRP request
@@ -2392,7 +2386,7 @@
 		break;
 
 	case VALID_TRANS_EVENT:
-		rc =  ibmvscsis_trans_event(vscsi, crq);
+		rc = ibmvscsis_trans_event(vscsi, crq);
 		break;
 
 	case VALID_INIT_MSG:
@@ -2523,7 +2517,6 @@
 		dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n",
 			srp->tag);
 		goto fail;
-		return;
 	}
 
 	cmd->rsp.sol_not = srp->sol_not;
@@ -2560,6 +2553,10 @@
 			       data_len, attr, dir, 0);
 	if (rc) {
 		dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
+		spin_lock_bh(&vscsi->intr_lock);
+		list_del(&cmd->list);
+		ibmvscsis_free_cmd_resources(vscsi, cmd);
+		spin_unlock_bh(&vscsi->intr_lock);
 		goto fail;
 	}
 	return;
@@ -2639,6 +2636,9 @@
 		if (rc) {
 			dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n",
 				rc);
+			spin_lock_bh(&vscsi->intr_lock);
+			list_del(&cmd->list);
+			spin_unlock_bh(&vscsi->intr_lock);
 			cmd->se_cmd.se_tmr_req->response =
 				TMR_FUNCTION_REJECTED;
 		}
@@ -2787,36 +2787,6 @@
 }
 
 /**
- * ibmvscsis_check_q() - Helper function to Check Init Message Valid
- * @vscsi:	Pointer to our adapter structure
- *
- * Checks if a initialize message was queued by the initiatior
- * while the timing window was open.  This function is called from
- * probe after the CRQ is created and interrupts are enabled.
- * It would only be used by adapters who wait for some event before
- * completing the init handshake with the client.  For ibmvscsi, this
- * event is waiting for the port to be enabled.
- *
- * EXECUTION ENVIRONMENT:
- *	Process level only, interrupt lock held
- */
-static long ibmvscsis_check_q(struct scsi_info *vscsi)
-{
-	uint format;
-	long rc;
-
-	rc = ibmvscsis_check_init_msg(vscsi, &format);
-	if (rc)
-		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
-	else if (format == UNUSED_FORMAT)
-		vscsi->state = WAIT_ENABLED;
-	else
-		vscsi->state = PART_UP_WAIT_ENAB;
-
-	return rc;
-}
-
-/**
  * ibmvscsis_enable_change_state() - Set new state based on enabled status
  * @vscsi:	Pointer to our adapter structure
  *
@@ -2827,77 +2797,19 @@
  */
 static long ibmvscsis_enable_change_state(struct scsi_info *vscsi)
 {
+	int bytes;
 	long rc = ADAPT_SUCCESS;
 
-handle_state_change:
-	switch (vscsi->state) {
-	case WAIT_ENABLED:
-		rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
-		switch (rc) {
-		case H_SUCCESS:
-		case H_DROPPED:
-		case H_CLOSED:
-			vscsi->state =  WAIT_CONNECTION;
-			rc = ADAPT_SUCCESS;
-			break;
+	bytes = vscsi->cmd_q.size * PAGE_SIZE;
+	rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, bytes);
+	if (rc == H_CLOSED || rc == H_SUCCESS) {
+		vscsi->state = WAIT_CONNECTION;
+		rc = ibmvscsis_establish_new_q(vscsi);
+	}
 
-		case H_PARAMETER:
-			break;
-
-		case H_HARDWARE:
-			break;
-
-		default:
-			vscsi->state = UNDEFINED;
-			rc = H_HARDWARE;
-			break;
-		}
-		break;
-	case PART_UP_WAIT_ENAB:
-		rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
-		switch (rc) {
-		case H_SUCCESS:
-			vscsi->state = CONNECTED;
-			rc = ADAPT_SUCCESS;
-			break;
-
-		case H_DROPPED:
-		case H_CLOSED:
-			vscsi->state = WAIT_ENABLED;
-			goto handle_state_change;
-
-		case H_PARAMETER:
-			break;
-
-		case H_HARDWARE:
-			break;
-
-		default:
-			rc = H_HARDWARE;
-			break;
-		}
-		break;
-
-	case WAIT_CONNECTION:
-	case WAIT_IDLE:
-	case SRP_PROCESSING:
-	case CONNECTED:
-		rc = ADAPT_SUCCESS;
-		break;
-		/* should not be able to get here */
-	case UNCONFIGURING:
-		rc = ERROR;
-		vscsi->state = UNDEFINED;
-		break;
-
-		/* driver should never allow this to happen */
-	case ERR_DISCONNECT:
-	case ERR_DISCONNECT_RECONNECT:
-	default:
-		dev_err(&vscsi->dev, "in invalid state %d during enable_change_state\n",
-			vscsi->state);
-		rc = ADAPT_SUCCESS;
-		break;
+	if (rc != ADAPT_SUCCESS) {
+		vscsi->state = ERR_DISCONNECTED;
+		vscsi->flags |= RESPONSE_Q_DOWN;
 	}
 
 	return rc;
@@ -2917,7 +2829,6 @@
  */
 static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
 {
-	long rc = 0;
 	int pages;
 	struct vio_dev *vdev = vscsi->dma_dev;
 
@@ -2941,22 +2852,7 @@
 		return -ENOMEM;
 	}
 
-	rc =  h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, PAGE_SIZE);
-	if (rc) {
-		if (rc == H_CLOSED) {
-			vscsi->state = WAIT_ENABLED;
-			rc = 0;
-		} else {
-			dma_unmap_single(&vdev->dev, vscsi->cmd_q.crq_token,
-					 PAGE_SIZE, DMA_BIDIRECTIONAL);
-			free_page((unsigned long)vscsi->cmd_q.base_addr);
-			rc = -ENODEV;
-		}
-	} else {
-		vscsi->state = WAIT_ENABLED;
-	}
-
-	return rc;
+	return 0;
 }
 
 /**
@@ -3271,7 +3167,7 @@
 	/*
 	 * if we are in a path where we are waiting for all pending commands
 	 * to complete because we received a transport event and anything in
-	 * the command queue is for a new connection,  do nothing
+	 * the command queue is for a new connection, do nothing
 	 */
 	if (TARGET_STOP(vscsi)) {
 		vio_enable_interrupts(vscsi->dma_dev);
@@ -3315,7 +3211,7 @@
 				 * everything but transport events on the queue
 				 *
 				 * need to decrement the queue index so we can
-				 * look at the elment again
+				 * look at the element again
 				 */
 				if (vscsi->cmd_q.index)
 					vscsi->cmd_q.index -= 1;
@@ -3379,7 +3275,8 @@
 	INIT_LIST_HEAD(&vscsi->waiting_rsp);
 	INIT_LIST_HEAD(&vscsi->active_q);
 
-	snprintf(vscsi->tport.tport_name, 256, "%s", dev_name(&vdev->dev));
+	snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s",
+		 dev_name(&vdev->dev));
 
 	pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name);
 
@@ -3394,6 +3291,9 @@
 	strncat(vscsi->eye, vdev->name, MAX_EYE);
 
 	vscsi->dds.unit_id = vdev->unit_address;
+	strncpy(vscsi->dds.partition_name, partition_name,
+		sizeof(vscsi->dds.partition_name));
+	vscsi->dds.partition_num = partition_number;
 
 	spin_lock_bh(&ibmvscsis_dev_lock);
 	list_add_tail(&vscsi->list, &ibmvscsis_dev_list);
@@ -3470,6 +3370,7 @@
 		     (unsigned long)vscsi);
 
 	init_completion(&vscsi->wait_idle);
+	init_completion(&vscsi->unconfig);
 
 	snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev));
 	vscsi->work_q = create_workqueue(wq_name);
@@ -3486,31 +3387,12 @@
 		goto destroy_WQ;
 	}
 
-	spin_lock_bh(&vscsi->intr_lock);
-	vio_enable_interrupts(vdev);
-	if (rc) {
-		dev_err(&vscsi->dev, "enabling interrupts failed, rc %d\n", rc);
-		rc = -ENODEV;
-		spin_unlock_bh(&vscsi->intr_lock);
-		goto free_irq;
-	}
-
-	if (ibmvscsis_check_q(vscsi)) {
-		rc = ERROR;
-		dev_err(&vscsi->dev, "probe: check_q failed, rc %d\n", rc);
-		spin_unlock_bh(&vscsi->intr_lock);
-		goto disable_interrupt;
-	}
-	spin_unlock_bh(&vscsi->intr_lock);
+	vscsi->state = WAIT_ENABLED;
 
 	dev_set_drvdata(&vdev->dev, vscsi);
 
 	return 0;
 
-disable_interrupt:
-	vio_disable_interrupts(vdev);
-free_irq:
-	free_irq(vdev->irq, vscsi);
 destroy_WQ:
 	destroy_workqueue(vscsi->work_q);
 unmap_buf:
@@ -3544,10 +3426,11 @@
 
 	pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
 
-	/*
-	 * TBD: Need to handle if there are commands on the waiting_rsp q
-	 *      Actually, can there still be cmds outstanding to tcm?
-	 */
+	spin_lock_bh(&vscsi->intr_lock);
+	ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0);
+	vscsi->flags |= CFG_SLEEPING;
+	spin_unlock_bh(&vscsi->intr_lock);
+	wait_for_completion(&vscsi->unconfig);
 
 	vio_disable_interrupts(vdev);
 	free_irq(vdev->irq, vscsi);
@@ -3556,7 +3439,6 @@
 			 DMA_BIDIRECTIONAL);
 	kfree(vscsi->map_buf);
 	tasklet_kill(&vscsi->work_task);
-	ibmvscsis_unregister_command_q(vscsi);
 	ibmvscsis_destroy_command_q(vscsi);
 	ibmvscsis_freetimer(vscsi);
 	ibmvscsis_free_cmds(vscsi);
@@ -3610,7 +3492,7 @@
 
 	num = of_get_property(rootdn, "ibm,partition-no", NULL);
 	if (num)
-		partition_number = *num;
+		partition_number = of_read_number(num, 1);
 
 	of_node_put(rootdn);
 
@@ -3904,18 +3786,22 @@
 	}
 
 	if (tmp) {
-		tport->enabled = true;
 		spin_lock_bh(&vscsi->intr_lock);
+		tport->enabled = true;
 		lrc = ibmvscsis_enable_change_state(vscsi);
 		if (lrc)
 			pr_err("enable_change_state failed, rc %ld state %d\n",
 			       lrc, vscsi->state);
 		spin_unlock_bh(&vscsi->intr_lock);
 	} else {
+		spin_lock_bh(&vscsi->intr_lock);
 		tport->enabled = false;
+		/* This simulates the server going down */
+		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
+		spin_unlock_bh(&vscsi->intr_lock);
 	}
 
-	pr_debug("tpg_enable_store, state %d\n", vscsi->state);
+	pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state);
 
 	return count;
 }
@@ -3985,10 +3871,10 @@
 ATTRIBUTE_GROUPS(ibmvscsis_dev);
 
 static struct class ibmvscsis_class = {
-	.name           = "ibmvscsis",
-	.dev_release    = ibmvscsis_dev_release,
-	.class_attrs    = ibmvscsis_class_attrs,
-	.dev_groups     = ibmvscsis_dev_groups,
+	.name		= "ibmvscsis",
+	.dev_release	= ibmvscsis_dev_release,
+	.class_attrs	= ibmvscsis_class_attrs,
+	.dev_groups	= ibmvscsis_dev_groups,
 };
 
 static struct vio_device_id ibmvscsis_device_table[] = {
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
index 981a0c9..98b0ca7 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
@@ -204,8 +204,6 @@
 	struct list_head waiting_rsp;
 #define NO_QUEUE                    0x00
 #define WAIT_ENABLED                0X01
-	/* driver has received an initialize command */
-#define PART_UP_WAIT_ENAB           0x02
 #define WAIT_CONNECTION             0x04
 	/* have established a connection */
 #define CONNECTED                   0x08
@@ -259,6 +257,8 @@
 #define SCHEDULE_DISCONNECT           0x00400
 	/* disconnect handler is scheduled */
 #define DISCONNECT_SCHEDULED          0x00800
+	/* remove function is sleeping */
+#define CFG_SLEEPING                  0x01000
 	u32 flags;
 	/* adapter lock */
 	spinlock_t intr_lock;
@@ -287,6 +287,7 @@
 
 	struct workqueue_struct *work_q;
 	struct completion wait_idle;
+	struct completion unconfig;
 	struct device dev;
 	struct vio_dev *dma_dev;
 	struct srp_target target;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index f9b6fba..a530f08 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -560,8 +560,12 @@
 	WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
 	task->state = state;
 
-	if (!list_empty(&task->running))
+	spin_lock_bh(&conn->taskqueuelock);
+	if (!list_empty(&task->running)) {
+		pr_debug_once("%s while task on list", __func__);
 		list_del_init(&task->running);
+	}
+	spin_unlock_bh(&conn->taskqueuelock);
 
 	if (conn->task == task)
 		conn->task = NULL;
@@ -783,7 +787,9 @@
 		if (session->tt->xmit_task(task))
 			goto free_task;
 	} else {
+		spin_lock_bh(&conn->taskqueuelock);
 		list_add_tail(&task->running, &conn->mgmtqueue);
+		spin_unlock_bh(&conn->taskqueuelock);
 		iscsi_conn_queue_work(conn);
 	}
 
@@ -1474,8 +1480,10 @@
 	 * this may be on the requeue list already if the xmit_task callout
 	 * is handling the r2ts while we are adding new ones
 	 */
+	spin_lock_bh(&conn->taskqueuelock);
 	if (list_empty(&task->running))
 		list_add_tail(&task->running, &conn->requeue);
+	spin_unlock_bh(&conn->taskqueuelock);
 	iscsi_conn_queue_work(conn);
 }
 EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1512,22 +1520,26 @@
 	 * only have one nop-out as a ping from us and targets should not
 	 * overflow us with nop-ins
 	 */
+	spin_lock_bh(&conn->taskqueuelock);
 check_mgmt:
 	while (!list_empty(&conn->mgmtqueue)) {
 		conn->task = list_entry(conn->mgmtqueue.next,
 					 struct iscsi_task, running);
 		list_del_init(&conn->task->running);
+		spin_unlock_bh(&conn->taskqueuelock);
 		if (iscsi_prep_mgmt_task(conn, conn->task)) {
 			/* regular RX path uses back_lock */
 			spin_lock_bh(&conn->session->back_lock);
 			__iscsi_put_task(conn->task);
 			spin_unlock_bh(&conn->session->back_lock);
 			conn->task = NULL;
+			spin_lock_bh(&conn->taskqueuelock);
 			continue;
 		}
 		rc = iscsi_xmit_task(conn);
 		if (rc)
 			goto done;
+		spin_lock_bh(&conn->taskqueuelock);
 	}
 
 	/* process pending command queue */
@@ -1535,19 +1547,24 @@
 		conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
 					running);
 		list_del_init(&conn->task->running);
+		spin_unlock_bh(&conn->taskqueuelock);
 		if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
 			fail_scsi_task(conn->task, DID_IMM_RETRY);
+			spin_lock_bh(&conn->taskqueuelock);
 			continue;
 		}
 		rc = iscsi_prep_scsi_cmd_pdu(conn->task);
 		if (rc) {
 			if (rc == -ENOMEM || rc == -EACCES) {
+				spin_lock_bh(&conn->taskqueuelock);
 				list_add_tail(&conn->task->running,
 					      &conn->cmdqueue);
 				conn->task = NULL;
+				spin_unlock_bh(&conn->taskqueuelock);
 				goto done;
 			} else
 				fail_scsi_task(conn->task, DID_ABORT);
+			spin_lock_bh(&conn->taskqueuelock);
 			continue;
 		}
 		rc = iscsi_xmit_task(conn);
@@ -1558,6 +1575,7 @@
 		 * we need to check the mgmt queue for nops that need to
 		 * be sent to aviod starvation
 		 */
+		spin_lock_bh(&conn->taskqueuelock);
 		if (!list_empty(&conn->mgmtqueue))
 			goto check_mgmt;
 	}
@@ -1577,12 +1595,15 @@
 		conn->task = task;
 		list_del_init(&conn->task->running);
 		conn->task->state = ISCSI_TASK_RUNNING;
+		spin_unlock_bh(&conn->taskqueuelock);
 		rc = iscsi_xmit_task(conn);
 		if (rc)
 			goto done;
+		spin_lock_bh(&conn->taskqueuelock);
 		if (!list_empty(&conn->mgmtqueue))
 			goto check_mgmt;
 	}
+	spin_unlock_bh(&conn->taskqueuelock);
 	spin_unlock_bh(&conn->session->frwd_lock);
 	return -ENODATA;
 
@@ -1738,7 +1759,9 @@
 			goto prepd_reject;
 		}
 	} else {
+		spin_lock_bh(&conn->taskqueuelock);
 		list_add_tail(&task->running, &conn->cmdqueue);
+		spin_unlock_bh(&conn->taskqueuelock);
 		iscsi_conn_queue_work(conn);
 	}
 
@@ -2897,6 +2920,7 @@
 	INIT_LIST_HEAD(&conn->mgmtqueue);
 	INIT_LIST_HEAD(&conn->cmdqueue);
 	INIT_LIST_HEAD(&conn->requeue);
+	spin_lock_init(&conn->taskqueuelock);
 	INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
 
 	/* allocate login_task used for the login/text sequences */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 734a042..f7e3f27 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -11393,6 +11393,7 @@
 	.id_table	= lpfc_id_table,
 	.probe		= lpfc_pci_probe_one,
 	.remove		= lpfc_pci_remove_one,
+	.shutdown	= lpfc_pci_remove_one,
 	.suspend        = lpfc_pci_suspend_one,
 	.resume		= lpfc_pci_resume_one,
 	.err_handler    = &lpfc_err_handler,
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index bff9689..feab7ea 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -5375,16 +5375,22 @@
 
 static int
 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
-	struct atio_from_isp *atio)
+	struct atio_from_isp *atio, bool ha_locked)
 {
 	struct qla_hw_data *ha = vha->hw;
 	uint16_t status;
+	unsigned long flags;
 
 	if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
 		return 0;
 
+	if (!ha_locked)
+		spin_lock_irqsave(&ha->hardware_lock, flags);
 	status = temp_sam_status;
 	qlt_send_busy(vha, atio, status);
+	if (!ha_locked)
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
 	return 1;
 }
 
@@ -5429,7 +5435,7 @@
 
 
 		if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
-			rc = qlt_chk_qfull_thresh_hold(vha, atio);
+			rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked);
 			if (rc != 0) {
 				tgt->atio_irq_cmd_count--;
 				return;
@@ -5552,7 +5558,7 @@
 			break;
 		}
 
-		rc = qlt_chk_qfull_thresh_hold(vha, atio);
+		rc = qlt_chk_qfull_thresh_hold(vha, atio, true);
 		if (rc != 0) {
 			tgt->irq_cmd_count--;
 			return;
@@ -6794,6 +6800,8 @@
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	kfree(op);
 }
 
 void
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
index 6607fd4..bc2d2d4 100644
--- a/drivers/scsi/ufs/ufs-debugfs.c
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1412,8 +1412,10 @@
 	struct ufs_hba *hba = filp->f_mapping->host->i_private;
 	unsigned long flags;
 
-	spin_lock_irqsave(hba->host->host_lock, flags);
+	pm_runtime_get_sync(hba->dev);
+	ufshcd_hold(hba, false);
 
+	spin_lock_irqsave(hba->host->host_lock, flags);
 	/*
 	 * simulating a dummy error in order to "convince"
 	 * eh_work to actually reset the controller
@@ -1421,9 +1423,13 @@
 	hba->saved_err |= INT_FATAL_ERRORS;
 	hba->silence_err_logs = true;
 	schedule_work(&hba->eh_work);
-
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
+	flush_work(&hba->eh_work);
+
+	ufshcd_release(hba, false);
+	pm_runtime_put_sync(hba->dev);
+
 	return cnt;
 }
 
@@ -1471,8 +1477,8 @@
 void ufsdbg_add_debugfs(struct ufs_hba *hba)
 {
 	if (!hba) {
-		dev_err(hba->dev, "%s: NULL hba, exiting", __func__);
-		goto err_no_root;
+		pr_err("%s: NULL hba, exiting", __func__);
+		return;
 	}
 
 	hba->debugfs_files.debugfs_root = debugfs_create_dir(dev_name(hba->dev),
diff --git a/drivers/scsi/ufs/ufs-qcom-debugfs.c b/drivers/scsi/ufs/ufs-qcom-debugfs.c
index 8532439..4547a6d 100644
--- a/drivers/scsi/ufs/ufs-qcom-debugfs.c
+++ b/drivers/scsi/ufs/ufs-qcom-debugfs.c
@@ -67,6 +67,7 @@
 static int ufs_qcom_dbg_testbus_en_set(void *data, u64 attr_id)
 {
 	struct ufs_qcom_host *host = data;
+	int ret = 0;
 
 	if (!host)
 		return -EINVAL;
@@ -76,7 +77,13 @@
 	else
 		host->dbg_print_en &= ~UFS_QCOM_DBG_PRINT_TEST_BUS_EN;
 
-	return ufs_qcom_testbus_config(host);
+	pm_runtime_get_sync(host->hba->dev);
+	ufshcd_hold(host->hba, false);
+	ret = ufs_qcom_testbus_config(host);
+	ufshcd_release(host->hba, false);
+	pm_runtime_put_sync(host->hba->dev);
+
+	return ret;
 }
 
 DEFINE_SIMPLE_ATTRIBUTE(ufs_qcom_dbg_testbus_en_ops,
@@ -142,7 +149,11 @@
 	 * Sanity check of the {major, minor} tuple is done in the
 	 * config function
 	 */
+	pm_runtime_get_sync(host->hba->dev);
+	ufshcd_hold(host->hba, false);
 	ret = ufs_qcom_testbus_config(host);
+	ufshcd_release(host->hba, false);
+	pm_runtime_put_sync(host->hba->dev);
 	if (!ret)
 		dev_dbg(host->hba->dev,
 				"%s: New configuration: major=%d, minor=%d\n",
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c
index 1ba4f2b..814d1dc 100644
--- a/drivers/scsi/ufs/ufs-qcom-ice.c
+++ b/drivers/scsi/ufs/ufs-qcom-ice.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/blkdev.h>
+#include <linux/spinlock.h>
 #include <crypto/ice.h>
 
 #include "ufs-qcom-ice.h"
@@ -168,13 +169,23 @@
 
 static void ufs_qcom_ice_cfg_work(struct work_struct *work)
 {
+	unsigned long flags;
 	struct ice_data_setting ice_set;
 	struct ufs_qcom_host *qcom_host =
 		container_of(work, struct ufs_qcom_host, ice_cfg_work);
+	struct request *req_pending = NULL;
 
-	if (!qcom_host->ice.vops->config_start || !qcom_host->req_pending)
+	if (!qcom_host->ice.vops->config_start)
 		return;
 
+	spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
+	req_pending = qcom_host->req_pending;
+	if (!req_pending) {
+		spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
+
 	/*
 	 * config_start is called again as previous attempt returned -EAGAIN,
 	 * this call shall now take care of the necessary key setup.
@@ -185,12 +196,17 @@
 	qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
 		qcom_host->req_pending, &ice_set, false);
 
+	spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
+	qcom_host->req_pending = NULL;
+	spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
+
 	/*
 	 * Resume with requests processing. We assume config_start has been
 	 * successful, but even if it wasn't we still must resume in order to
 	 * allow for the request to be retried.
 	 */
 	ufshcd_scsi_unblock_requests(qcom_host->hba);
+
 }
 
 /**
@@ -246,6 +262,7 @@
 	struct ice_data_setting ice_set;
 	char cmd_op = cmd->cmnd[0];
 	int err;
+	unsigned long flags;
 
 	if (!qcom_host->ice.pdev || !qcom_host->ice.vops) {
 		dev_dbg(qcom_host->hba->dev, "%s: ice device is not enabled\n",
@@ -255,6 +272,10 @@
 
 	if (qcom_host->ice.vops->config_start) {
 		memset(&ice_set, 0, sizeof(ice_set));
+
+		spin_lock_irqsave(
+			&qcom_host->ice_work_lock, flags);
+
 		err = qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
 			cmd->request, &ice_set, true);
 		if (err) {
@@ -272,19 +293,41 @@
 				dev_dbg(qcom_host->hba->dev,
 					"%s: scheduling task for ice setup\n",
 					__func__);
-				qcom_host->req_pending = cmd->request;
-				if (schedule_work(&qcom_host->ice_cfg_work))
+
+				if (!qcom_host->req_pending) {
 					ufshcd_scsi_block_requests(
 						qcom_host->hba);
+					qcom_host->req_pending = cmd->request;
+
+					if (!schedule_work(
+						&qcom_host->ice_cfg_work)) {
+						qcom_host->req_pending = NULL;
+
+						spin_unlock_irqrestore(
+						&qcom_host->ice_work_lock,
+						flags);
+
+						ufshcd_scsi_unblock_requests(
+							qcom_host->hba);
+						return err;
+					}
+				}
+
 			} else {
-				dev_err(qcom_host->hba->dev,
-					"%s: error in ice_vops->config %d\n",
-					__func__, err);
+				if (err != -EBUSY)
+					dev_err(qcom_host->hba->dev,
+						"%s: error in ice_vops->config %d\n",
+						__func__, err);
 			}
 
+			spin_unlock_irqrestore(&qcom_host->ice_work_lock,
+				flags);
+
 			return err;
 		}
 
+		spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
+
 		if (ufs_qcom_is_data_cmd(cmd_op, true))
 			*enable = !ice_set.encr_bypass;
 		else if (ufs_qcom_is_data_cmd(cmd_op, false))
@@ -320,6 +363,7 @@
 	unsigned int bypass = 0;
 	struct request *req;
 	char cmd_op;
+	unsigned long flags;
 
 	if (!qcom_host->ice.pdev || !qcom_host->ice.vops) {
 		dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
@@ -339,7 +383,8 @@
 
 	req = cmd->request;
 	if (req->bio)
-		lba = req->bio->bi_iter.bi_sector;
+		lba = (req->bio->bi_iter.bi_sector) >>
+			UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
 
 	slot = req->tag;
 	if (slot < 0 || slot > qcom_host->hba->nutrs) {
@@ -348,8 +393,13 @@
 		return -EINVAL;
 	}
 
-	memset(&ice_set, 0, sizeof(ice_set));
+
 	if (qcom_host->ice.vops->config_start) {
+		memset(&ice_set, 0, sizeof(ice_set));
+
+		spin_lock_irqsave(
+			&qcom_host->ice_work_lock, flags);
+
 		err = qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
 							req, &ice_set, true);
 		if (err) {
@@ -364,13 +414,44 @@
 			 * request processing.
 			 */
 			if (err == -EAGAIN) {
-				qcom_host->req_pending = req;
-				if (schedule_work(&qcom_host->ice_cfg_work))
+
+				dev_dbg(qcom_host->hba->dev,
+					"%s: scheduling task for ice setup\n",
+					__func__);
+
+				if (!qcom_host->req_pending) {
 					ufshcd_scsi_block_requests(
+						qcom_host->hba);
+					qcom_host->req_pending = cmd->request;
+					if (!schedule_work(
+						&qcom_host->ice_cfg_work)) {
+						qcom_host->req_pending = NULL;
+
+						spin_unlock_irqrestore(
+						&qcom_host->ice_work_lock,
+						flags);
+
+						ufshcd_scsi_unblock_requests(
 							qcom_host->hba);
+						return err;
+					}
+				}
+
+			} else {
+				if (err != -EBUSY)
+					dev_err(qcom_host->hba->dev,
+						"%s: error in ice_vops->config %d\n",
+						__func__, err);
 			}
-			goto out;
+
+			spin_unlock_irqrestore(
+				&qcom_host->ice_work_lock, flags);
+
+			return err;
 		}
+
+		spin_unlock_irqrestore(
+			&qcom_host->ice_work_lock, flags);
 	}
 
 	cmd_op = cmd->cmnd[0];
@@ -390,6 +471,7 @@
 		bypass = ice_set.decr_bypass ? UFS_QCOM_ICE_ENABLE_BYPASS :
 						UFS_QCOM_ICE_DISABLE_BYPASS;
 
+
 	/* Configure ICE index */
 	ctrl_info_val =
 		(ice_set.crypto_data.key_index &
@@ -398,8 +480,7 @@
 
 	/* Configure data unit size of transfer request */
 	ctrl_info_val |=
-		(UFS_QCOM_ICE_TR_DATA_UNIT_4_KB &
-		 MASK_UFS_QCOM_ICE_CTRL_INFO_CDU)
+		UFS_QCOM_ICE_TR_DATA_UNIT_4_KB
 		 << OFFSET_UFS_QCOM_ICE_CTRL_INFO_CDU;
 
 	/* Configure ICE bypass mode */
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 9706273..d326b80 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -33,6 +33,9 @@
 #include "ufs-qcom-ice.h"
 #include "ufs-qcom-debugfs.h"
 
+#define MAX_PROP_SIZE		   32
+#define VDDP_REF_CLK_MIN_UV        1200000
+#define VDDP_REF_CLK_MAX_UV        1200000
 /* TODO: further tuning for this parameter may be required */
 #define UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US	(10000) /* microseconds */
 
@@ -97,13 +100,10 @@
 	int err = 0;
 
 	clk = devm_clk_get(dev, name);
-	if (IS_ERR(clk)) {
+	if (IS_ERR(clk))
 		err = PTR_ERR(clk);
-		dev_err(dev, "%s: failed to get %s err %d",
-				__func__, name, err);
-	} else {
+	else
 		*clk_out = clk;
-	}
 
 	return err;
 }
@@ -182,20 +182,29 @@
 
 	err = ufs_qcom_host_clk_get(dev,
 			"rx_lane0_sync_clk", &host->rx_l0_sync_clk);
-	if (err)
+	if (err) {
+		dev_err(dev, "%s: failed to get rx_lane0_sync_clk, err %d",
+				__func__, err);
 		goto out;
+	}
 
 	err = ufs_qcom_host_clk_get(dev,
 			"tx_lane0_sync_clk", &host->tx_l0_sync_clk);
-	if (err)
+	if (err) {
+		dev_err(dev, "%s: failed to get tx_lane0_sync_clk, err %d",
+				__func__, err);
 		goto out;
+	}
 
 	/* In case of single lane per direction, don't read lane1 clocks */
 	if (host->hba->lanes_per_direction > 1) {
 		err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
 			&host->rx_l1_sync_clk);
-		if (err)
+		if (err) {
+			dev_err(dev, "%s: failed to get rx_lane1_sync_clk, err %d",
+					__func__, err);
 			goto out;
+		}
 
 		/* The tx lane1 clk could be muxed, hence keep this optional */
 		ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
@@ -387,8 +396,9 @@
 /**
  * Returns zero for success and non-zero in case of a failure
  */
-static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
-			       u32 hs, u32 rate, bool update_link_startup_timer)
+static int __ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
+			       u32 hs, u32 rate, bool update_link_startup_timer,
+			       bool is_pre_scale_up)
 {
 	int ret = 0;
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -435,8 +445,12 @@
 	}
 
 	list_for_each_entry(clki, &hba->clk_list_head, list) {
-		if (!strcmp(clki->name, "core_clk"))
-			core_clk_rate = clk_get_rate(clki->clk);
+		if (!strcmp(clki->name, "core_clk")) {
+			if (is_pre_scale_up)
+				core_clk_rate = clki->max_freq;
+			else
+				core_clk_rate = clk_get_rate(clki->clk);
+		}
 	}
 
 	/* If frequency is smaller than 1MHz, set to 1MHz */
@@ -533,6 +547,13 @@
 	return ret;
 }
 
+static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
+			       u32 hs, u32 rate, bool update_link_startup_timer)
+{
+	return  __ufs_qcom_cfg_timers(hba, gear, hs, rate,
+				      update_link_startup_timer, false);
+}
+
 static int ufs_qcom_link_startup_pre_change(struct ufs_hba *hba)
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -666,40 +687,105 @@
 	return err;
 }
 
+
+static int ufs_qcom_config_vreg(struct device *dev,
+		struct ufs_vreg *vreg, bool on)
+{
+	int ret = 0;
+	struct regulator *reg;
+	int min_uV, uA_load;
+
+	if (!vreg) {
+		WARN_ON(1);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	reg = vreg->reg;
+	if (regulator_count_voltages(reg) > 0) {
+		min_uV = on ? vreg->min_uV : 0;
+		ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+		if (ret) {
+			dev_err(dev, "%s: %s set voltage failed, err=%d\n",
+					__func__, vreg->name, ret);
+			goto out;
+		}
+
+		uA_load = on ? vreg->max_uA : 0;
+		ret = regulator_set_load(vreg->reg, uA_load);
+		if (ret)
+			goto out;
+	}
+out:
+	return ret;
+}
+
+static int ufs_qcom_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+	int ret = 0;
+
+	if (vreg->enabled)
+		return ret;
+
+	ret = ufs_qcom_config_vreg(dev, vreg, true);
+	if (ret)
+		goto out;
+
+	ret = regulator_enable(vreg->reg);
+	if (ret)
+		goto out;
+
+	vreg->enabled = true;
+out:
+	return ret;
+}
+
+static int ufs_qcom_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+	int ret = 0;
+
+	if (!vreg->enabled)
+		return ret;
+
+	ret = regulator_disable(vreg->reg);
+	if (ret)
+		goto out;
+
+	ret = ufs_qcom_config_vreg(dev, vreg, false);
+	if (ret)
+		goto out;
+
+	vreg->enabled = false;
+out:
+	return ret;
+}
+
 static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 	struct phy *phy = host->generic_phy;
 	int ret = 0;
 
-	if (ufs_qcom_is_link_off(hba)) {
-		/*
-		 * Disable the tx/rx lane symbol clocks before PHY is
-		 * powered down as the PLL source should be disabled
-		 * after downstream clocks are disabled.
-		 */
-		ufs_qcom_disable_lane_clks(host);
-		phy_power_off(phy);
-		ret = ufs_qcom_ice_suspend(host);
-		if (ret)
-			dev_err(hba->dev, "%s: failed ufs_qcom_ice_suspend %d\n",
-					__func__, ret);
-
-		/* Assert PHY soft reset */
-		ufs_qcom_assert_reset(hba);
-		goto out;
-	}
-
 	/*
-	 * If UniPro link is not active, PHY ref_clk, main PHY analog power
-	 * rail and low noise analog power rail for PLL can be switched off.
+	 * If UniPro link is not active or OFF, PHY ref_clk, main PHY analog
+	 * power rail and low noise analog power rail for PLL can be
+	 * switched off.
 	 */
 	if (!ufs_qcom_is_link_active(hba)) {
 		ufs_qcom_disable_lane_clks(host);
 		phy_power_off(phy);
-		ufs_qcom_ice_suspend(host);
-	}
 
+		if (host->vddp_ref_clk && ufs_qcom_is_link_off(hba))
+			ret = ufs_qcom_disable_vreg(hba->dev,
+					host->vddp_ref_clk);
+		ufs_qcom_ice_suspend(host);
+
+		if (ufs_qcom_is_link_off(hba)) {
+			/* Assert PHY soft reset */
+			ufs_qcom_assert_reset(hba);
+			goto out;
+		}
+	}
 	/* Unvote PM QoS */
 	ufs_qcom_pm_qos_suspend(host);
 
@@ -720,6 +806,11 @@
 		goto out;
 	}
 
+	if (host->vddp_ref_clk && (hba->rpm_lvl > UFS_PM_LVL_3 ||
+				   hba->spm_lvl > UFS_PM_LVL_3))
+		ufs_qcom_enable_vreg(hba->dev,
+				      host->vddp_ref_clk);
+
 	err = ufs_qcom_enable_lane_clks(host);
 	if (err)
 		goto out;
@@ -739,7 +830,35 @@
 
 static int ufs_qcom_full_reset(struct ufs_hba *hba)
 {
-	return -ENOTSUPP;
+	int ret = -ENOTSUPP;
+
+	if (!hba->core_reset) {
+		dev_err(hba->dev, "%s: failed, err = %d\n", __func__,
+			ret);
+		goto out;
+	}
+
+	ret = reset_control_assert(hba->core_reset);
+	if (ret) {
+		dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
+				__func__, ret);
+		goto out;
+	}
+
+	/*
+	 * The hardware requirement for delay between assert/deassert
+	 * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
+	 * ~125us (4/32768). To be on the safe side add 200us delay.
+	 */
+	usleep_range(200, 210);
+
+	ret = reset_control_deassert(hba->core_reset);
+	if (ret)
+		dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
+				__func__, ret);
+
+out:
+	return ret;
 }
 
 #ifdef CONFIG_SCSI_UFS_QCOM_ICE
@@ -757,7 +876,8 @@
 
 	/* Use request LBA as the DUN value */
 	if (req->bio)
-		*dun = req->bio->bi_iter.bi_sector;
+		*dun = (req->bio->bi_iter.bi_sector) >>
+				UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
 
 	ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
 
@@ -978,7 +1098,7 @@
 	}
 }
 
-static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
+static int __ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
 {
 	int err = 0;
 
@@ -1009,7 +1129,7 @@
 
 	vote = ufs_qcom_get_bus_vote(host, mode);
 	if (vote >= 0)
-		err = ufs_qcom_set_bus_vote(host, vote);
+		err = __ufs_qcom_set_bus_vote(host, vote);
 	else
 		err = vote;
 
@@ -1020,6 +1140,35 @@
 	return err;
 }
 
+static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	int vote, err;
+
+	/*
+	 * In case ufs_qcom_init() is not yet done, simply ignore.
+	 * This ufs_qcom_set_bus_vote() shall be called from
+	 * ufs_qcom_init() after init is done.
+	 */
+	if (!host)
+		return 0;
+
+	if (on) {
+		vote = host->bus_vote.saved_vote;
+		if (vote == host->bus_vote.min_bw_vote)
+			ufs_qcom_update_bus_bw_vote(host);
+	} else {
+		vote = host->bus_vote.min_bw_vote;
+	}
+
+	err = __ufs_qcom_set_bus_vote(host, vote);
+	if (err)
+		dev_err(hba->dev, "%s: set bus vote failed %d\n",
+				__func__, err);
+
+	return err;
+}
+
 static ssize_t
 show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
 			char *buf)
@@ -1096,7 +1245,7 @@
 	return 0;
 }
 
-static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
+static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
 {
 	return 0;
 }
@@ -1373,7 +1522,6 @@
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 	int err;
-	int vote = 0;
 
 	/*
 	 * In case ufs_qcom_init() is not yet done, simply ignore.
@@ -1398,9 +1546,6 @@
 		/* enable the device ref clock for HS mode*/
 		if (ufshcd_is_hs_mode(&hba->pwr_info))
 			ufs_qcom_dev_ref_clk_ctrl(host, true);
-		vote = host->bus_vote.saved_vote;
-		if (vote == host->bus_vote.min_bw_vote)
-			ufs_qcom_update_bus_bw_vote(host);
 
 		err = ufs_qcom_ice_resume(host);
 		if (err)
@@ -1412,21 +1557,19 @@
 
 		/* M-PHY RMMI interface clocks can be turned off */
 		ufs_qcom_phy_disable_iface_clk(host->generic_phy);
-		if (!ufs_qcom_is_link_active(hba)) {
-			if (!is_gating_context)
-				/* turn off UFS local PHY ref_clk */
-				ufs_qcom_phy_disable_ref_clk(host->generic_phy);
+		/*
+		 * If auto hibern8 is supported then the link will already
+		 * be in hibern8 state and the ref clock can be gated.
+		 */
+		if (ufshcd_is_auto_hibern8_supported(hba) ||
+		    !ufs_qcom_is_link_active(hba)) {
+			/* turn off UFS local PHY ref_clk */
+			ufs_qcom_phy_disable_ref_clk(host->generic_phy);
 			/* disable device ref_clk */
 			ufs_qcom_dev_ref_clk_ctrl(host, false);
 		}
-		vote = host->bus_vote.min_bw_vote;
 	}
 
-	err = ufs_qcom_set_bus_vote(host, vote);
-	if (err)
-		dev_err(hba->dev, "%s: set bus vote failed %d\n",
-				__func__, err);
-
 out:
 	return err;
 }
@@ -1850,6 +1993,57 @@
 		dev_err(hba->dev, "invalid host index %d\n", id);
 }
 
+static int ufs_qcom_parse_reg_info(struct ufs_qcom_host *host, char *name,
+				   struct ufs_vreg **out_vreg)
+{
+	int ret = 0;
+	char prop_name[MAX_PROP_SIZE];
+	struct ufs_vreg *vreg = NULL;
+	struct device *dev = host->hba->dev;
+	struct device_node *np = dev->of_node;
+
+	if (!np) {
+		dev_err(dev, "%s: non DT initialization\n", __func__);
+		goto out;
+	}
+
+	snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
+	if (!of_parse_phandle(np, prop_name, 0)) {
+		dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
+			 __func__, prop_name);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+	if (!vreg)
+		return -ENOMEM;
+
+	vreg->name = name;
+
+	snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
+	ret = of_property_read_u32(np, prop_name, &vreg->max_uA);
+	if (ret) {
+		dev_err(dev, "%s: unable to find %s err %d\n",
+			__func__, prop_name, ret);
+		goto out;
+	}
+
+	vreg->reg = devm_regulator_get(dev, vreg->name);
+	if (IS_ERR(vreg->reg)) {
+		ret = PTR_ERR(vreg->reg);
+		dev_err(dev, "%s: %s get failed, err=%d\n",
+			__func__, vreg->name, ret);
+	}
+	vreg->min_uV = VDDP_REF_CLK_MIN_UV;
+	vreg->max_uV = VDDP_REF_CLK_MAX_UV;
+
+out:
+	if (!ret)
+		*out_vreg = vreg;
+	return ret;
+}
+
 /**
  * ufs_qcom_init - bind phy with controller
  * @hba: host controller instance
@@ -1877,14 +2071,9 @@
 
 	/* Make a two way bind between the qcom host and the hba */
 	host->hba = hba;
-	ufshcd_set_variant(hba, host);
+	spin_lock_init(&host->ice_work_lock);
 
-	/*
-	 * voting/devoting device ref_clk source is time consuming hence
-	 * skip devoting it during aggressive clock gating. This clock
-	 * will still be gated off during runtime suspend.
-	 */
-	hba->no_ref_clk_gating = true;
+	ufshcd_set_variant(hba, host);
 
 	err = ufs_qcom_ice_get_dev(host);
 	if (err == -EPROBE_DEFER) {
@@ -1969,14 +2158,24 @@
 	ufs_qcom_phy_save_controller_version(host->generic_phy,
 		host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
 
+	err = ufs_qcom_parse_reg_info(host, "qcom,vddp-ref-clk",
+				      &host->vddp_ref_clk);
 	phy_init(host->generic_phy);
 	err = phy_power_on(host->generic_phy);
 	if (err)
 		goto out_unregister_bus;
+	if (host->vddp_ref_clk) {
+		err = ufs_qcom_enable_vreg(dev, host->vddp_ref_clk);
+		if (err) {
+			dev_err(dev, "%s: failed enabling ref clk supply: %d\n",
+				__func__, err);
+			goto out_disable_phy;
+		}
+	}
 
 	err = ufs_qcom_init_lane_clks(host);
 	if (err)
-		goto out_disable_phy;
+		goto out_disable_vddp;
 
 	ufs_qcom_parse_lpm(host);
 	if (host->disable_lpm)
@@ -1984,6 +2183,7 @@
 	ufs_qcom_set_caps(hba);
 	ufs_qcom_advertise_quirks(hba);
 
+	ufs_qcom_set_bus_vote(hba, true);
 	ufs_qcom_setup_clocks(hba, true, false);
 
 	host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
@@ -1999,6 +2199,9 @@
 
 	goto out;
 
+out_disable_vddp:
+	if (host->vddp_ref_clk)
+		ufs_qcom_disable_vreg(dev, host->vddp_ref_clk);
 out_disable_phy:
 	phy_power_off(host->generic_phy);
 out_unregister_bus:
@@ -2049,79 +2252,21 @@
 	return err;
 }
 
-static inline int ufs_qcom_configure_lpm(struct ufs_hba *hba, bool enable)
-{
-	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-	struct phy *phy = host->generic_phy;
-	int err = 0;
-
-	/* The default low power mode configuration is SVS2 */
-	if (!ufs_qcom_cap_svs2(host))
-		goto out;
-
-	/*
-	 * The link should be put in hibern8 state before
-	 * configuring the PHY to enter/exit SVS2 mode.
-	 */
-	err = ufshcd_uic_hibern8_enter(hba);
-	if (err)
-		goto out;
-
-	err = ufs_qcom_phy_configure_lpm(phy, enable);
-	if (err)
-		goto out;
-
-	err = ufshcd_uic_hibern8_exit(hba);
-out:
-	return err;
-}
-
 static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
-	if (!ufs_qcom_cap_qunipro(host))
-		return 0;
-
-	return ufs_qcom_configure_lpm(hba, false);
-}
-
-static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
-{
-	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
-	if (!ufs_qcom_cap_qunipro(host))
-		return 0;
-
-	/* set unipro core clock cycles to 150 and clear clock divider */
-	return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
-}
-
-static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
-{
-	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-	u32 core_clk_ctrl_reg;
+	struct ufs_pa_layer_attr *attr = &host->dev_req_params;
 	int err = 0;
 
 	if (!ufs_qcom_cap_qunipro(host))
 		goto out;
 
-	err = ufs_qcom_configure_lpm(hba, true);
-	if (err)
-		goto out;
+	if (attr)
+		__ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
+				      attr->hs_rate, false, true);
 
-	err = ufshcd_dme_get(hba,
-			    UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
-			    &core_clk_ctrl_reg);
-
-	/* make sure CORE_CLK_DIV_EN is cleared */
-	if (!err &&
-	    (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
-		core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
-		err = ufshcd_dme_set(hba,
-				    UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
-				    core_clk_ctrl_reg);
-	}
+	/* set unipro core clock cycles to 150 and clear clock divider */
+	err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
 out:
 	return err;
 }
@@ -2129,11 +2274,16 @@
 static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	struct ufs_pa_layer_attr *attr = &host->dev_req_params;
 	int err = 0;
 
 	if (!ufs_qcom_cap_qunipro(host))
 		return 0;
 
+	if (attr)
+		ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
+				    attr->hs_rate, false);
+
 	if (ufs_qcom_cap_svs2(host))
 		/*
 		 * For SVS2 set unipro core clock cycles to 37 and
@@ -2154,30 +2304,17 @@
 		bool scale_up, enum ufs_notify_change_status status)
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-	struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
 	int err = 0;
 
 	switch (status) {
 	case PRE_CHANGE:
 		if (scale_up)
 			err = ufs_qcom_clk_scale_up_pre_change(hba);
-		else
-			err = ufs_qcom_clk_scale_down_pre_change(hba);
 		break;
 	case POST_CHANGE:
-		if (scale_up)
-			err = ufs_qcom_clk_scale_up_post_change(hba);
-		else
+		if (!scale_up)
 			err = ufs_qcom_clk_scale_down_post_change(hba);
 
-		if (err || !dev_req_params)
-			goto out;
-
-		ufs_qcom_cfg_timers(hba,
-				    dev_req_params->gear_rx,
-				    dev_req_params->pwr_rx,
-				    dev_req_params->hs_rate,
-				    false);
 		ufs_qcom_update_bus_bw_vote(host);
 		break;
 	default:
@@ -2186,7 +2323,6 @@
 		break;
 	}
 
-out:
 	return err;
 }
 
@@ -2277,17 +2413,21 @@
 
 static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
 {
-	if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
+	if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
+		ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
+				UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
 		ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
-	else
+	} else {
+		ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
 		ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
+	}
 }
 
 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
 {
 	/* provide a legal default configuration */
-	host->testbus.select_major = TSTBUS_UAWM;
-	host->testbus.select_minor = 1;
+	host->testbus.select_major = TSTBUS_UNIPRO;
+	host->testbus.select_minor = 37;
 }
 
 static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
@@ -2304,7 +2444,7 @@
 	 * mappings of select_minor, since there is no harm in
 	 * configuring a non-existent select_minor
 	 */
-	if (host->testbus.select_minor > 0x1F) {
+	if (host->testbus.select_minor > 0xFF) {
 		dev_err(host->hba->dev,
 			"%s: 0x%05X is not a legal testbus option\n",
 			__func__, host->testbus.select_minor);
@@ -2314,6 +2454,11 @@
 	return true;
 }
 
+/*
+ * The caller of this function must make sure that the controller
+ * is out of runtime suspend and appropriate clocks are enabled
+ * before accessing.
+ */
 int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
 {
 	int reg;
@@ -2373,7 +2518,8 @@
 		break;
 	case TSTBUS_UNIPRO:
 		reg = UFS_UNIPRO_CFG;
-		offset = 1;
+		offset = 20;
+		mask = 0xFFF;
 		break;
 	/*
 	 * No need for a default case, since
@@ -2383,8 +2529,6 @@
 	}
 	mask <<= offset;
 
-	pm_runtime_get_sync(host->hba->dev);
-	ufshcd_hold(host->hba, false);
 	ufshcd_rmwl(host->hba, TEST_BUS_SEL,
 		    (u32)host->testbus.select_major << 19,
 		    REG_UFS_CFG1);
@@ -2392,8 +2536,11 @@
 		    (u32)host->testbus.select_minor << offset,
 		    reg);
 	ufs_qcom_enable_test_bus(host);
-	ufshcd_release(host->hba, false);
-	pm_runtime_put_sync(host->hba->dev);
+	/*
+	 * Make sure the test bus configuration is
+	 * committed before returning.
+	 */
+	mb();
 
 	return 0;
 }
@@ -2403,15 +2550,47 @@
 	ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
 }
 
-static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
+static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	u32 *testbus = NULL;
+	int i, nminor = 256, testbus_len = nminor * sizeof(u32);
+
+	testbus = kmalloc(testbus_len, GFP_KERNEL);
+	if (!testbus)
+		return;
+
+	host->testbus.select_major = TSTBUS_UNIPRO;
+	for (i = 0; i < nminor; i++) {
+		host->testbus.select_minor = i;
+		ufs_qcom_testbus_config(host);
+		testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
+	}
+	print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
+			16, 4, testbus, testbus_len, false);
+	kfree(testbus);
+}
+
+static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba, bool no_sleep)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	struct phy *phy = host->generic_phy;
 
 	ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
 			"HCI Vendor Specific Registers ");
-
 	ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
+
+	if (no_sleep)
+		return;
+
+	/* sleep a bit intermittently as we are dumping too much data */
+	usleep_range(1000, 1100);
 	ufs_qcom_testbus_read(hba);
+	usleep_range(1000, 1100);
+	ufs_qcom_print_unipro_testbus(hba);
+	usleep_range(1000, 1100);
+	ufs_qcom_phy_dbg_register_dump(phy);
+	usleep_range(1000, 1100);
 	ufs_qcom_ice_print_regs(host);
 }
 
@@ -2436,6 +2615,7 @@
 	.full_reset		= ufs_qcom_full_reset,
 	.update_sec_cfg		= ufs_qcom_update_sec_cfg,
 	.get_scale_down_gear	= ufs_qcom_get_scale_down_gear,
+	.set_bus_vote		= ufs_qcom_set_bus_vote,
 	.dbg_register_dump	= ufs_qcom_dump_dbg_regs,
 #ifdef CONFIG_DEBUG_FS
 	.add_debugfs		= ufs_qcom_dbg_add_debugfs,
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 42e7aad8..792ae42 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -100,6 +100,7 @@
 #define QUNIPRO_SEL	UFS_BIT(0)
 #define TEST_BUS_EN		BIT(18)
 #define TEST_BUS_SEL		GENMASK(22, 19)
+#define UFS_REG_TEST_BUS_EN	BIT(30)
 
 /* bit definitions for REG_UFS_CFG2 register */
 #define UAWM_HW_CGC_EN		(1 << 0)
@@ -369,8 +370,10 @@
 	u32 dbg_print_en;
 	struct ufs_qcom_testbus testbus;
 
+	spinlock_t ice_work_lock;
 	struct work_struct ice_cfg_work;
 	struct request *req_pending;
+	struct ufs_vreg *vddp_ref_clk;
 };
 
 static inline u32
diff --git a/drivers/scsi/ufs/ufs_quirks.c b/drivers/scsi/ufs/ufs_quirks.c
index b22a4c4..3210d60 100644
--- a/drivers/scsi/ufs/ufs_quirks.c
+++ b/drivers/scsi/ufs/ufs_quirks.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -30,6 +30,20 @@
 	UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
 		UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
 	UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+	UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1",
+		UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+	UFS_FIX(UFS_VENDOR_SKHYNIX, "hC8aL1",
+		UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+	UFS_FIX(UFS_VENDOR_SKHYNIX, "hD8aL1",
+		UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+	UFS_FIX(UFS_VENDOR_SKHYNIX, "hC8aM1",
+		UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+	UFS_FIX(UFS_VENDOR_SKHYNIX, "h08aM1",
+		UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+	UFS_FIX(UFS_VENDOR_SKHYNIX, "hC8GL1",
+		UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+	UFS_FIX(UFS_VENDOR_SKHYNIX, "hC8HL1",
+		UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
 
 	END_FIX
 };
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index f7182ed..e7a59d4 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -130,6 +130,14 @@
  */
 #define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME	(1 << 7)
 
+/*
+ * Some UFS devices may stop responding after switching from HS-G1 to HS-G3.
+ * Also, it is found that these devices work fine if we do 2 steps switch:
+ * HS-G1 to HS-G2 followed by HS-G2 to HS-G3. Enabling this quirk for such
+ * device would apply this 2 steps gear switch workaround.
+ */
+#define UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH (1 << 8)
+
 struct ufs_hba;
 void ufs_advertise_fixup_device(struct ufs_hba *hba);
 
diff --git a/drivers/scsi/ufs/ufs_test.c b/drivers/scsi/ufs/ufs_test.c
index 8953722e8..d41871a 100644
--- a/drivers/scsi/ufs/ufs_test.c
+++ b/drivers/scsi/ufs/ufs_test.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -689,13 +689,13 @@
 	__blk_put_request(test_iosched->req_q, test_rq->rq);
 	spin_unlock_irqrestore(&test_iosched->lock, flags);
 
-	test_iosched_free_test_req_data_buffer(test_rq);
-	kfree(test_rq);
-
 	if (err)
 		pr_err("%s: request %d completed, err=%d", __func__,
 			test_rq->req_id, err);
 
+	test_iosched_free_test_req_data_buffer(test_rq);
+	kfree(test_rq);
+
 	check_test_completion(test_iosched);
 }
 
@@ -984,14 +984,14 @@
 		return;
 	}
 
-	test_iosched_free_test_req_data_buffer(test_rq);
-	kfree(test_rq);
-	utd->completed_req_count++;
-
 	if (err)
 		pr_err("%s: request %d completed, err=%d", __func__,
 			test_rq->req_id, err);
 
+	test_iosched_free_test_req_data_buffer(test_rq);
+	kfree(test_rq);
+	utd->completed_req_count++;
+
 	check_test_completion(test_iosched);
 }
 
@@ -1007,7 +1007,7 @@
 static int run_long_test(struct test_iosched *test_iosched)
 {
 	int ret = 0;
-	int direction, num_bios_per_request;
+	int direction, num_bios_per_request = 1;
 	static unsigned int inserted_requests;
 	u32 sector, seed, num_bios, seq_sector_delta;
 	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
@@ -1028,14 +1028,12 @@
 	/* Set test parameters */
 	switch (test_iosched->test_info.testcase) {
 	case  UFS_TEST_LONG_RANDOM_READ:
-		num_bios_per_request = 1;
 		utd->long_test_num_reqs = (utd->sector_range * SECTOR_SIZE) /
 			(LONG_RAND_TEST_REQ_RATIO * TEST_BIO_SIZE *
 					num_bios_per_request);
 		direction = READ;
 		break;
 	case  UFS_TEST_LONG_RANDOM_WRITE:
-		num_bios_per_request = 1;
 		utd->long_test_num_reqs = (utd->sector_range * SECTOR_SIZE) /
 			(LONG_RAND_TEST_REQ_RATIO * TEST_BIO_SIZE *
 					num_bios_per_request);
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 9c8473e..de6ecbd 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -40,6 +40,22 @@
 #include "ufshcd.h"
 #include "ufshcd-pltfrm.h"
 
+static int ufshcd_parse_reset_info(struct ufs_hba *hba)
+{
+	int ret = 0;
+
+	hba->core_reset = devm_reset_control_get(hba->dev,
+				"core_reset");
+	if (IS_ERR(hba->core_reset)) {
+		ret = PTR_ERR(hba->core_reset);
+		dev_err(hba->dev, "core_reset unavailable,err = %d\n",
+				ret);
+		hba->core_reset = NULL;
+	}
+
+	return ret;
+}
+
 static int ufshcd_parse_clock_info(struct ufs_hba *hba)
 {
 	int ret = 0;
@@ -297,6 +313,20 @@
 		hba->dev_ref_clk_freq = REF_CLK_FREQ_26_MHZ;
 }
 
+static int ufshcd_parse_pinctrl_info(struct ufs_hba *hba)
+{
+	int ret = 0;
+
+	/* Try to obtain pinctrl handle */
+	hba->pctrl = devm_pinctrl_get(hba->dev);
+	if (IS_ERR(hba->pctrl)) {
+		ret = PTR_ERR(hba->pctrl);
+		hba->pctrl = NULL;
+	}
+
+	return ret;
+}
+
 #ifdef CONFIG_SMP
 /**
  * ufshcd_pltfrm_suspend - suspend power management function
@@ -401,6 +431,20 @@
 		goto dealloc_host;
 	}
 
+	err = ufshcd_parse_reset_info(hba);
+	if (err) {
+		dev_err(&pdev->dev, "%s: reset parse failed %d\n",
+				__func__, err);
+		goto dealloc_host;
+	}
+
+	err = ufshcd_parse_pinctrl_info(hba);
+	if (err) {
+		dev_dbg(&pdev->dev, "%s: unable to parse pinctrl data %d\n",
+				__func__, err);
+		/* let's not fail the probe */
+	}
+
 	ufshcd_parse_dev_ref_clk_freq(hba);
 	ufshcd_parse_pm_levels(hba);
 	ufshcd_parse_gear_limits(hba);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 7552357..7b91717 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -47,6 +47,7 @@
 #include "ufshci.h"
 #include "ufs_quirks.h"
 #include "ufs-debugfs.h"
+#include "ufs-qcom.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/ufs.h>
@@ -367,7 +368,7 @@
 }
 
 static irqreturn_t ufshcd_intr(int irq, void *__hba);
-static void ufshcd_tmc_handler(struct ufs_hba *hba);
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
@@ -389,6 +390,28 @@
 static void ufshcd_release_all(struct ufs_hba *hba);
 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
+static int ufshcd_devfreq_target(struct device *dev,
+				unsigned long *freq, u32 flags);
+static int ufshcd_devfreq_get_dev_status(struct device *dev,
+		struct devfreq_dev_status *stat);
+
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
+static struct devfreq_simple_ondemand_data ufshcd_ondemand_data = {
+	.upthreshold = 35,
+	.downdifferential = 30,
+	.simple_scaling = 1,
+};
+
+static void *gov_data = &ufshcd_ondemand_data;
+#else
+static void *gov_data;
+#endif
+
+static struct devfreq_dev_profile ufs_devfreq_profile = {
+	.polling_ms	= 40,
+	.target		= ufshcd_devfreq_target,
+	.get_dev_status	= ufshcd_devfreq_get_dev_status,
+};
 
 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
 {
@@ -441,10 +464,67 @@
 }
 EXPORT_SYMBOL(ufshcd_scsi_block_requests);
 
+static int ufshcd_device_reset_ctrl(struct ufs_hba *hba, bool ctrl)
+{
+	int ret = 0;
+
+	if (!hba->pctrl)
+		return 0;
+
+	/* Assert reset if ctrl == true */
+	if (ctrl)
+		ret = pinctrl_select_state(hba->pctrl,
+			pinctrl_lookup_state(hba->pctrl, "dev-reset-assert"));
+	else
+		ret = pinctrl_select_state(hba->pctrl,
+			pinctrl_lookup_state(hba->pctrl, "dev-reset-deassert"));
+
+	if (ret < 0)
+		dev_err(hba->dev, "%s: %s failed with err %d\n",
+			__func__, ctrl ? "Assert" : "Deassert", ret);
+
+	return ret;
+}
+
+static inline int ufshcd_assert_device_reset(struct ufs_hba *hba)
+{
+	return ufshcd_device_reset_ctrl(hba, true);
+}
+
+static inline int ufshcd_deassert_device_reset(struct ufs_hba *hba)
+{
+	return ufshcd_device_reset_ctrl(hba, false);
+}
+
+static int ufshcd_reset_device(struct ufs_hba *hba)
+{
+	int ret;
+
+	/* reset the connected UFS device */
+	ret = ufshcd_assert_device_reset(hba);
+	if (ret)
+		goto out;
+	/*
+	 * The reset signal is active low.
+	 * The UFS device shall detect more than or equal to 1us of positive
+	 * or negative RST_n pulse width.
+	 * To be on safe side, keep the reset low for atleast 10us.
+	 */
+	usleep_range(10, 15);
+
+	ret = ufshcd_deassert_device_reset(hba);
+	if (ret)
+		goto out;
+	/* same as assert, wait for atleast 10us after deassert */
+	usleep_range(10, 15);
+out:
+	return ret;
+}
+
 /* replace non-printable or non-ASCII characters with spaces */
 static inline void ufshcd_remove_non_printable(char *val)
 {
-	if (!val)
+	if (!val || !*val)
 		return;
 
 	if (*val < 0x20 || *val > 0x7e)
@@ -534,7 +614,7 @@
 	}
 }
 
-static void ufshcd_print_host_regs(struct ufs_hba *hba)
+static inline void __ufshcd_print_host_regs(struct ufs_hba *hba, bool no_sleep)
 {
 	if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_REGS_EN))
 		return;
@@ -567,7 +647,12 @@
 
 	ufshcd_print_clk_freqs(hba);
 
-	ufshcd_vops_dbg_register_dump(hba);
+	ufshcd_vops_dbg_register_dump(hba, no_sleep);
+}
+
+static void ufshcd_print_host_regs(struct ufs_hba *hba)
+{
+	__ufshcd_print_host_regs(hba, false);
 }
 
 static
@@ -1172,6 +1257,12 @@
 	return ret;
 }
 
+static inline void ufshcd_cancel_gate_work(struct ufs_hba *hba)
+{
+	hrtimer_cancel(&hba->clk_gating.gate_hrtimer);
+	cancel_work_sync(&hba->clk_gating.gate_work);
+}
+
 static void ufshcd_ungate_work(struct work_struct *work)
 {
 	int ret;
@@ -1179,7 +1270,7 @@
 	struct ufs_hba *hba = container_of(work, struct ufs_hba,
 			clk_gating.ungate_work);
 
-	cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+	ufshcd_cancel_gate_work(hba);
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	if (hba->clk_gating.state == CLKS_ON) {
@@ -1250,14 +1341,18 @@
 		}
 		break;
 	case REQ_CLKS_OFF:
-		if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
+		/*
+		 * If the timer was active but the callback was not running
+		 * we have nothing to do, just change state and return.
+		 */
+		if (hrtimer_try_to_cancel(&hba->clk_gating.gate_hrtimer) == 1) {
 			hba->clk_gating.state = CLKS_ON;
 			trace_ufshcd_clk_gating(dev_name(hba->dev),
 				hba->clk_gating.state);
 			break;
 		}
 		/*
-		 * If we here, it means gating work is either done or
+		 * If we are here, it means gating work is either done or
 		 * currently running. Hence, fall through to cancel gating
 		 * work and to enable clocks.
 		 */
@@ -1266,7 +1361,8 @@
 		hba->clk_gating.state = REQ_CLKS_ON;
 		trace_ufshcd_clk_gating(dev_name(hba->dev),
 			hba->clk_gating.state);
-		schedule_work(&hba->clk_gating.ungate_work);
+		queue_work(hba->clk_gating.ungating_workq,
+				&hba->clk_gating.ungate_work);
 		/*
 		 * fall through to check if we should wait for this
 		 * work to be done or not.
@@ -1297,11 +1393,18 @@
 static void ufshcd_gate_work(struct work_struct *work)
 {
 	struct ufs_hba *hba = container_of(work, struct ufs_hba,
-			clk_gating.gate_work.work);
+						clk_gating.gate_work);
 	unsigned long flags;
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
-	if (hba->clk_gating.is_suspended) {
+	/*
+	 * In case you are here to cancel this work the gating state
+	 * would be marked as REQ_CLKS_ON. In this case save time by
+	 * skipping the gating work and exit after changing the clock
+	 * state to CLKS_ON.
+	 */
+	if (hba->clk_gating.is_suspended ||
+		(hba->clk_gating.state == REQ_CLKS_ON)) {
 		hba->clk_gating.state = CLKS_ON;
 		trace_ufshcd_clk_gating(dev_name(hba->dev),
 			hba->clk_gating.state);
@@ -1335,7 +1438,12 @@
 		ufshcd_set_link_hibern8(hba);
 	}
 
-	if (!ufshcd_is_link_active(hba) && !hba->no_ref_clk_gating)
+	/*
+	 * If auto hibern8 is supported then the link will already
+	 * be in hibern8 state and the ref clock can be gated.
+	 */
+	if ((ufshcd_is_auto_hibern8_supported(hba) ||
+	     !ufshcd_is_link_active(hba)) && !hba->no_ref_clk_gating)
 		ufshcd_disable_clocks(hba, true);
 	else
 		/* If link is active, device ref_clk can't be switched off */
@@ -1383,8 +1491,9 @@
 	hba->clk_gating.state = REQ_CLKS_OFF;
 	trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
 
-	schedule_delayed_work(&hba->clk_gating.gate_work,
-			      msecs_to_jiffies(hba->clk_gating.delay_ms));
+	hrtimer_start(&hba->clk_gating.gate_hrtimer,
+			ms_to_ktime(hba->clk_gating.delay_ms),
+			HRTIMER_MODE_REL);
 }
 
 void ufshcd_release(struct ufs_hba *hba, bool no_sched)
@@ -1512,36 +1621,57 @@
 	return count;
 }
 
+static enum hrtimer_restart ufshcd_clkgate_hrtimer_handler(
+					struct hrtimer *timer)
+{
+	struct ufs_hba *hba = container_of(timer, struct ufs_hba,
+					   clk_gating.gate_hrtimer);
+
+	schedule_work(&hba->clk_gating.gate_work);
+
+	return HRTIMER_NORESTART;
+}
+
 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
 {
 	struct ufs_clk_gating *gating = &hba->clk_gating;
+	char wq_name[sizeof("ufs_clk_ungating_00")];
 
 	hba->clk_gating.state = CLKS_ON;
 
 	if (!ufshcd_is_clkgating_allowed(hba))
 		return;
 
-	INIT_DELAYED_WORK(&gating->gate_work, ufshcd_gate_work);
+	/*
+	 * Disable hibern8 during clk gating if
+	 * auto hibern8 is supported
+	 */
+	if (ufshcd_is_auto_hibern8_supported(hba))
+		hba->caps &= ~UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
+
+	INIT_WORK(&gating->gate_work, ufshcd_gate_work);
 	INIT_WORK(&gating->ungate_work, ufshcd_ungate_work);
+	/*
+	 * Clock gating work must be executed only after auto hibern8
+	 * timeout has expired in the hardware or after aggressive
+	 * hibern8 on idle software timeout. Using jiffy based low
+	 * resolution delayed work is not reliable to guarantee this,
+	 * hence use a high resolution timer to make sure we schedule
+	 * the gate work precisely more than hibern8 timeout.
+	 *
+	 * Always make sure gating->delay_ms > hibern8_on_idle->delay_ms
+	 */
+	hrtimer_init(&gating->gate_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	gating->gate_hrtimer.function = ufshcd_clkgate_hrtimer_handler;
+
+	snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_ungating_%d",
+			hba->host->host_no);
+	hba->clk_gating.ungating_workq = create_singlethread_workqueue(wq_name);
 
 	gating->is_enabled = true;
 
-	/*
-	 * Scheduling the delayed work after 1 jiffies will make the work to
-	 * get schedule any time from 0ms to 1000/HZ ms which is not desirable
-	 * for hibern8 enter work as it may impact the performance if it gets
-	 * scheduled almost immediately. Hence make sure that hibern8 enter
-	 * work gets scheduled atleast after 2 jiffies (any time between
-	 * 1000/HZ ms to 2000/HZ ms).
-	 */
-	gating->delay_ms_pwr_save = jiffies_to_msecs(
-		max_t(unsigned long,
-		      msecs_to_jiffies(UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE),
-		      2));
-	gating->delay_ms_perf = jiffies_to_msecs(
-		max_t(unsigned long,
-		      msecs_to_jiffies(UFSHCD_CLK_GATING_DELAY_MS_PERF),
-		      2));
+	gating->delay_ms_pwr_save = UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE;
+	gating->delay_ms_perf = UFSHCD_CLK_GATING_DELAY_MS_PERF;
 
 	/* start with performance mode */
 	gating->delay_ms = gating->delay_ms_perf;
@@ -1598,8 +1728,9 @@
 		device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
 	}
 	device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
+	ufshcd_cancel_gate_work(hba);
 	cancel_work_sync(&hba->clk_gating.ungate_work);
-	cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+	destroy_workqueue(hba->clk_gating.ungating_workq);
 }
 
 static void ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, u32 delay)
@@ -1910,6 +2041,7 @@
 		return;
 
 	if (ufshcd_is_auto_hibern8_supported(hba)) {
+		hba->hibern8_on_idle.delay_ms = 1;
 		hba->hibern8_on_idle.state = AUTO_HIBERN8;
 		/*
 		 * Disable SW hibern8 enter on idle in case
@@ -1917,13 +2049,13 @@
 		 */
 		hba->caps &= ~UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
 	} else {
+		hba->hibern8_on_idle.delay_ms = 10;
 		INIT_DELAYED_WORK(&hba->hibern8_on_idle.enter_work,
 				  ufshcd_hibern8_enter_work);
 		INIT_WORK(&hba->hibern8_on_idle.exit_work,
 			  ufshcd_hibern8_exit_work);
 	}
 
-	hba->hibern8_on_idle.delay_ms = 10;
 	hba->hibern8_on_idle.is_enabled = true;
 
 	hba->hibern8_on_idle.delay_attr.show =
@@ -2360,9 +2492,6 @@
 		goto out;
 
 	req_desc->header.dword_0 |= cc_index | UTRD_CRYPTO_ENABLE;
-	if (lrbp->cmd->request && lrbp->cmd->request->bio)
-		dun = lrbp->cmd->request->bio->bi_iter.bi_sector;
-
 	req_desc->header.dword_1 = (u32)(dun & 0xFFFFFFFF);
 	req_desc->header.dword_3 = (u32)((dun >> 32) & 0xFFFFFFFF);
 out:
@@ -2587,6 +2716,61 @@
 }
 
 /**
+ * ufshcd_get_write_lock - synchronize between shutdown, scaling &
+ * arrival of requests
+ * @hba: ufs host
+ *
+ * Lock is predominantly held by shutdown context thus, ensuring
+ * that no requests from any other context may sneak through.
+ */
+static inline void ufshcd_get_write_lock(struct ufs_hba *hba)
+{
+	down_write(&hba->lock);
+}
+
+/**
+ * ufshcd_get_read_lock - synchronize between shutdown, scaling &
+ * arrival of requests
+ * @hba: ufs host
+ *
+ * Returns 1 if acquired, < 0 on contention
+ *
+ * After shutdown's initiated, allow requests only directed to the
+ * well known device lun. The sync between scaling & issue is maintained
+ * as is and this restructuring syncs shutdown with these too.
+ */
+static int ufshcd_get_read_lock(struct ufs_hba *hba, u64 lun)
+{
+	int err = 0;
+
+	err = down_read_trylock(&hba->lock);
+	if (err > 0)
+		goto out;
+	/* let requests for well known device lun to go through */
+	if (ufshcd_scsi_to_upiu_lun(lun) == UFS_UPIU_UFS_DEVICE_WLUN)
+		return 0;
+	else if (!ufshcd_is_shutdown_ongoing(hba))
+		return -EAGAIN;
+	else
+		return -EPERM;
+
+out:
+	return err;
+}
+
+/**
+ * ufshcd_put_read_lock - synchronize between shutdown, scaling &
+ * arrival of requests
+ * @hba: ufs host
+ *
+ * Returns none
+ */
+static inline void ufshcd_put_read_lock(struct ufs_hba *hba)
+{
+	up_read(&hba->lock);
+}
+
+/**
  * ufshcd_queuecommand - main entry point for SCSI requests
  * @cmd: command from SCSI Midlayer
  * @done: call back function
@@ -2600,9 +2784,13 @@
 	unsigned long flags;
 	int tag;
 	int err = 0;
+	bool has_read_lock = false;
 
 	hba = shost_priv(host);
 
+	if (!cmd || !cmd->request || !hba)
+		return -EINVAL;
+
 	tag = cmd->request->tag;
 	if (!ufshcd_valid_tag(hba, tag)) {
 		dev_err(hba->dev,
@@ -2611,10 +2799,27 @@
 		BUG();
 	}
 
-	if (!down_read_trylock(&hba->clk_scaling_lock))
-		return SCSI_MLQUEUE_HOST_BUSY;
+	err = ufshcd_get_read_lock(hba, cmd->device->lun);
+	if (unlikely(err < 0)) {
+		if (err == -EPERM) {
+			set_host_byte(cmd, DID_ERROR);
+			cmd->scsi_done(cmd);
+			return 0;
+		}
+		if (err == -EAGAIN)
+			return SCSI_MLQUEUE_HOST_BUSY;
+	} else if (err == 1) {
+		has_read_lock = true;
+	}
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
+
+	/* if error handling is in progress, return host busy */
+	if (ufshcd_eh_in_progress(hba)) {
+		err = SCSI_MLQUEUE_HOST_BUSY;
+		goto out_unlock;
+	}
+
 	switch (hba->ufshcd_state) {
 	case UFSHCD_STATE_OPERATIONAL:
 		break;
@@ -2632,13 +2837,6 @@
 		cmd->scsi_done(cmd);
 		goto out_unlock;
 	}
-
-	/* if error handling is in progress, don't issue commands */
-	if (ufshcd_eh_in_progress(hba)) {
-		set_host_byte(cmd, DID_ERROR);
-		cmd->scsi_done(cmd);
-		goto out_unlock;
-	}
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
 	hba->req_abort_count = 0;
@@ -2679,13 +2877,12 @@
 	ufshcd_vops_pm_qos_req_start(hba, cmd->request);
 
 	/* IO svc time latency histogram */
-	if (hba != NULL && cmd->request != NULL) {
-		if (hba->latency_hist_enabled &&
-		    (cmd->request->cmd_type == REQ_TYPE_FS)) {
-			cmd->request->lat_hist_io_start = ktime_get();
-			cmd->request->lat_hist_enabled = 1;
-		} else
-			cmd->request->lat_hist_enabled = 0;
+	if (hba->latency_hist_enabled &&
+	    (cmd->request->cmd_type == REQ_TYPE_FS)) {
+		cmd->request->lat_hist_io_start = ktime_get();
+		cmd->request->lat_hist_enabled = 1;
+	} else {
+		cmd->request->lat_hist_enabled = 0;
 	}
 
 	WARN_ON(hba->clk_gating.state != CLKS_ON);
@@ -2764,7 +2961,8 @@
 out_unlock:
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 out:
-	up_read(&hba->clk_scaling_lock);
+	if (has_read_lock)
+		ufshcd_put_read_lock(hba);
 	return err;
 }
 
@@ -2956,7 +3154,12 @@
 	struct completion wait;
 	unsigned long flags;
 
-	down_read(&hba->clk_scaling_lock);
+	/*
+	 * May get invoked from shutdown and IOCTL contexts.
+	 * In shutdown context, it comes in with lock acquired.
+	 */
+	if (!ufshcd_is_shutdown_ongoing(hba))
+		down_read(&hba->lock);
 
 	/*
 	 * Get free slot, sleep if slots are unavailable.
@@ -2989,7 +3192,8 @@
 out_put_tag:
 	ufshcd_put_dev_cmd_tag(hba, tag);
 	wake_up(&hba->dev_cmd.tag_wq);
-	up_read(&hba->clk_scaling_lock);
+	if (!ufshcd_is_shutdown_ongoing(hba))
+		up_read(&hba->lock);
 	return err;
 }
 
@@ -3466,7 +3670,7 @@
 			goto out;
 		}
 
-		buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
+		buff_ascii = kzalloc(ascii_len, GFP_KERNEL);
 		if (!buff_ascii) {
 			dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
 					__func__, ascii_len);
@@ -3929,8 +4133,12 @@
 		ret = (status != PWR_OK) ? status : -1;
 	}
 out:
-	if (ret)
+	if (ret) {
 		ufsdbg_set_err_state(hba);
+		ufshcd_print_host_state(hba);
+		ufshcd_print_pwr_info(hba);
+		ufshcd_print_host_regs(hba);
+	}
 
 	ufshcd_save_tstamp_of_last_dme_cmd(hba);
 	spin_lock_irqsave(hba->host->host_lock, flags);
@@ -3954,17 +4162,17 @@
 
 	ufshcd_hold_all(hba);
 	spin_lock_irqsave(hba->host->host_lock, flags);
-	if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
-		ret = -EBUSY;
-		goto out;
-	}
-
 	/*
 	 * Wait for all the outstanding tasks/transfer requests.
 	 * Verify by checking the doorbell registers are clear.
 	 */
 	start = ktime_get();
 	do {
+		if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
+			ret = -EBUSY;
+			goto out;
+		}
+
 		tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
 		tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 		if (!tm_doorbell && !tr_doorbell) {
@@ -4036,32 +4244,50 @@
 
 static int ufshcd_link_recovery(struct ufs_hba *hba)
 {
-	int ret;
+	int ret = 0;
 	unsigned long flags;
 
-	spin_lock_irqsave(hba->host->host_lock, flags);
-	hba->ufshcd_state = UFSHCD_STATE_RESET;
-	ufshcd_set_eh_in_progress(hba);
+	/*
+	 * Check if there is any race with fatal error handling.
+	 * If so, wait for it to complete. Even though fatal error
+	 * handling does reset and restore in some cases, don't assume
+	 * anything out of it. We are just avoiding race here.
+	 */
+	do {
+		spin_lock_irqsave(hba->host->host_lock, flags);
+		if (!(work_pending(&hba->eh_work) ||
+				hba->ufshcd_state == UFSHCD_STATE_RESET))
+			break;
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+		flush_work(&hba->eh_work);
+	} while (1);
+
+
+	/*
+	 * we don't know if previous reset had really reset the host controller
+	 * or not. So let's force reset here to be sure.
+	 */
+	hba->ufshcd_state = UFSHCD_STATE_ERROR;
+	hba->force_host_reset = true;
+	schedule_work(&hba->eh_work);
+
+	/* wait for the reset work to finish */
+	do {
+		if (!(work_pending(&hba->eh_work) ||
+				hba->ufshcd_state == UFSHCD_STATE_RESET))
+			break;
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+		flush_work(&hba->eh_work);
+		spin_lock_irqsave(hba->host->host_lock, flags);
+	} while (1);
+
+	if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
+	      ufshcd_is_link_active(hba)))
+		ret = -ENOLINK;
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
-	ret = ufshcd_vops_full_reset(hba);
-	if (ret)
-		dev_warn(hba->dev,
-			"full reset returned %d, trying to recover the link\n",
-			ret);
-
-	ret = ufshcd_host_reset_and_restore(hba);
-
-	spin_lock_irqsave(hba->host->host_lock, flags);
-	if (ret)
-		hba->ufshcd_state = UFSHCD_STATE_ERROR;
-	ufshcd_clear_eh_in_progress(hba);
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
-
-	if (ret)
-		dev_err(hba->dev, "%s: link recovery failed, err %d",
-			__func__, ret);
-
 	return ret;
 }
 
@@ -4076,16 +4302,31 @@
 	trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
 			     ktime_to_us(ktime_sub(ktime_get(), start)), ret);
 
-	if (ret) {
+	/*
+	 * Do full reinit if enter failed or if LINERESET was detected during
+	 * Hibern8 operation. After LINERESET, link moves to default PWM-G1
+	 * mode hence full reinit is required to move link to HS speeds.
+	 */
+	if (ret || hba->full_init_linereset) {
+		int err;
+
+		hba->full_init_linereset = false;
 		ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_ENTER);
 		dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d",
 			__func__, ret);
 		/*
-		 * If link recovery fails then return error so that caller
-		 * don't retry the hibern8 enter again.
+		 * If link recovery fails then return error code (-ENOLINK)
+		 * returned ufshcd_link_recovery().
+		 * If link recovery succeeds then return -EAGAIN to attempt
+		 * hibern8 enter retry again.
 		 */
-		if (ufshcd_link_recovery(hba))
-			ret = -ENOLINK;
+		err = ufshcd_link_recovery(hba);
+		if (err) {
+			dev_err(hba->dev, "%s: link recovery failed", __func__);
+			ret = err;
+		} else {
+			ret = -EAGAIN;
+		}
 	} else {
 		dev_dbg(hba->dev, "%s: Hibern8 Enter at %lld us", __func__,
 			ktime_to_us(ktime_get()));
@@ -4102,8 +4343,8 @@
 		ret = __ufshcd_uic_hibern8_enter(hba);
 		if (!ret)
 			goto out;
-		/* Unable to recover the link, so no point proceeding */
-		 if (ret == -ENOLINK)
+		else if (ret != -EAGAIN)
+			/* Unable to recover the link, so no point proceeding */
 			BUG();
 	}
 out:
@@ -4121,6 +4362,7 @@
 	trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
 			     ktime_to_us(ktime_sub(ktime_get(), start)), ret);
 
+	/* Do full reinit if exit failed */
 	if (ret) {
 		ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_EXIT);
 		dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d",
@@ -4642,6 +4884,7 @@
 out:
 	if (ret)
 		dev_err(hba->dev, "link startup failed %d\n", ret);
+
 	return ret;
 }
 
@@ -5023,7 +5266,12 @@
 		dev_err(hba->dev,
 				"OCS error from controller = %x for tag %d\n",
 				ocs, lrbp->task_tag);
-		ufshcd_print_host_regs(hba);
+		/*
+		 * This is called in interrupt context, hence avoid sleep
+		 * while printing debug registers. Also print only the minimum
+		 * debug registers needed to debug OCS failure.
+		 */
+		__ufshcd_print_host_regs(hba, true);
 		ufshcd_print_host_state(hba);
 		break;
 	} /* end of switch */
@@ -5045,19 +5293,48 @@
  * ufshcd_uic_cmd_compl - handle completion of uic command
  * @hba: per adapter instance
  * @intr_status: interrupt status generated by the controller
+ *
+ * Returns
+ *  IRQ_HANDLED - If interrupt is valid
+ *  IRQ_NONE    - If invalid interrupt
  */
-static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
+static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
 {
+	irqreturn_t retval = IRQ_NONE;
+
 	if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
 		hba->active_uic_cmd->argument2 |=
 			ufshcd_get_uic_cmd_result(hba);
 		hba->active_uic_cmd->argument3 =
 			ufshcd_get_dme_attr_val(hba);
 		complete(&hba->active_uic_cmd->done);
+		retval = IRQ_HANDLED;
 	}
 
-	if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
-		complete(hba->uic_async_done);
+	if (intr_status & UFSHCD_UIC_PWR_MASK) {
+		if (hba->uic_async_done) {
+			complete(hba->uic_async_done);
+			retval = IRQ_HANDLED;
+		} else if (ufshcd_is_auto_hibern8_supported(hba)) {
+			/*
+			 * If uic_async_done flag is not set then this
+			 * is an Auto hibern8 err interrupt.
+			 * Perform a host reset followed by a full
+			 * link recovery.
+			 */
+			hba->ufshcd_state = UFSHCD_STATE_ERROR;
+			hba->force_host_reset = true;
+			dev_err(hba->dev, "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
+				__func__, (intr_status & UIC_HIBERNATE_ENTER) ?
+				"Enter" : "Exit",
+				intr_status, ufshcd_get_upmcrs(hba));
+			__ufshcd_print_host_regs(hba, true);
+			ufshcd_print_host_state(hba);
+			schedule_work(&hba->eh_work);
+			retval = IRQ_HANDLED;
+		}
+	}
+	return retval;
 }
 
 /**
@@ -5201,8 +5478,12 @@
 /**
  * ufshcd_transfer_req_compl - handle SCSI and query command completion
  * @hba: per adapter instance
+ *
+ * Returns
+ *  IRQ_HANDLED - If interrupt is valid
+ *  IRQ_NONE    - If invalid interrupt
  */
-static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
+static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
 {
 	unsigned long completed_reqs;
 	u32 tr_doorbell;
@@ -5220,7 +5501,12 @@
 	tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 	completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
 
-	__ufshcd_transfer_req_compl(hba, completed_reqs);
+	if (completed_reqs) {
+		__ufshcd_transfer_req_compl(hba, completed_reqs);
+		return IRQ_HANDLED;
+	} else {
+		return IRQ_NONE;
+	}
 }
 
 /**
@@ -5637,17 +5923,32 @@
 	int err = 0;
 	int tag;
 	bool needs_reset = false;
+	bool clks_enabled = false;
 
 	hba = container_of(work, struct ufs_hba, eh_work);
 
-	ufsdbg_set_err_state(hba);
-	pm_runtime_get_sync(hba->dev);
-	ufshcd_hold_all(hba);
-
 	spin_lock_irqsave(hba->host->host_lock, flags);
+	ufsdbg_set_err_state(hba);
+
 	if (hba->ufshcd_state == UFSHCD_STATE_RESET)
 		goto out;
 
+	/*
+	 * Make sure the clocks are ON before we proceed with err
+	 * handling. For the majority of cases err handler would be
+	 * run with clocks ON. There is a possibility that the err
+	 * handler was scheduled due to auto hibern8 error interrupt,
+	 * in which case the clocks could be gated or be in the
+	 * process of gating when the err handler runs.
+	 */
+	if (unlikely((hba->clk_gating.state != CLKS_ON) &&
+	    ufshcd_is_auto_hibern8_supported(hba))) {
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		ufshcd_hold(hba, false);
+		spin_lock_irqsave(hba->host->host_lock, flags);
+		clks_enabled = true;
+	}
+
 	hba->ufshcd_state = UFSHCD_STATE_RESET;
 	ufshcd_set_eh_in_progress(hba);
 
@@ -5674,14 +5975,18 @@
 		dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x",
 			__func__, hba->saved_err, hba->saved_uic_err);
 		if (!hba->silence_err_logs) {
+			/* release lock as print host regs sleeps */
+			spin_unlock_irqrestore(hba->host->host_lock, flags);
 			ufshcd_print_host_regs(hba);
 			ufshcd_print_host_state(hba);
 			ufshcd_print_pwr_info(hba);
 			ufshcd_print_tmrs(hba, hba->outstanding_tasks);
+			spin_lock_irqsave(hba->host->host_lock, flags);
 		}
 	}
 
-	if ((hba->saved_err & INT_FATAL_ERRORS) || hba->saved_ce_err ||
+	if ((hba->saved_err & INT_FATAL_ERRORS)
+	    || hba->saved_ce_err || hba->force_host_reset ||
 	    ((hba->saved_err & UIC_ERROR) &&
 	    (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
 				   UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
@@ -5769,6 +6074,7 @@
 		hba->saved_err = 0;
 		hba->saved_uic_err = 0;
 		hba->saved_ce_err = 0;
+		hba->force_host_reset = false;
 	}
 
 skip_err_handling:
@@ -5780,12 +6086,12 @@
 	}
 
 	hba->silence_err_logs = false;
-	ufshcd_clear_eh_in_progress(hba);
+
+	if (clks_enabled)
+		__ufshcd_release(hba, false);
 out:
+	ufshcd_clear_eh_in_progress(hba);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
-	ufshcd_scsi_unblock_requests(hba);
-	ufshcd_release_all(hba);
-	pm_runtime_put_sync(hba->dev);
 }
 
 static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
@@ -5799,16 +6105,20 @@
 /**
  * ufshcd_update_uic_error - check and set fatal UIC error flags.
  * @hba: per-adapter instance
+ *
+ * Returns
+ *  IRQ_HANDLED - If interrupt is valid
+ *  IRQ_NONE    - If invalid interrupt
  */
-static void ufshcd_update_uic_error(struct ufs_hba *hba)
+static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
 {
 	u32 reg;
+	irqreturn_t retval = IRQ_NONE;
 
 	/* PHY layer lane error */
 	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
-	/* Ignore LINERESET indication, as this is not an error */
 	if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
-			(reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
+	    (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
 		/*
 		 * To know whether this error is fatal or not, DB timeout
 		 * must be checked but this error is handled separately.
@@ -5816,61 +6126,95 @@
 		dev_dbg(hba->dev, "%s: UIC Lane error reported, reg 0x%x\n",
 				__func__, reg);
 		ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
+
+		/*
+		 * Don't ignore LINERESET indication during hibern8
+		 * enter operation.
+		 */
+		if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
+			struct uic_command *cmd = hba->active_uic_cmd;
+
+			if (cmd) {
+				if (cmd->command == UIC_CMD_DME_HIBER_ENTER) {
+					dev_err(hba->dev, "%s: LINERESET during hibern8 enter, reg 0x%x\n",
+						__func__, reg);
+					hba->full_init_linereset = true;
+				}
+			}
+		}
+		retval |= IRQ_HANDLED;
 	}
 
 	/* PA_INIT_ERROR is fatal and needs UIC reset */
 	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
-	if (reg)
+	if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
+	    (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
 		ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
 
-	if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) {
-		hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
-	} else if (hba->dev_info.quirks &
-		   UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
-		if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
-			hba->uic_error |=
-				UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
-		else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
-			hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+		if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) {
+			hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
+		} else if (hba->dev_info.quirks &
+			   UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+			if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
+				hba->uic_error |=
+					UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
+			else if (reg &
+				 UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
+				hba->uic_error |=
+					UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+		}
+		retval |= IRQ_HANDLED;
 	}
 
 	/* UIC NL/TL/DME errors needs software retry */
 	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
-	if (reg) {
+	if ((reg & UIC_NETWORK_LAYER_ERROR) &&
+	    (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
 		ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
 		hba->uic_error |= UFSHCD_UIC_NL_ERROR;
+		retval |= IRQ_HANDLED;
 	}
 
 	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
-	if (reg) {
+	if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
+	    (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
 		ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
 		hba->uic_error |= UFSHCD_UIC_TL_ERROR;
+		retval |= IRQ_HANDLED;
 	}
 
 	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
-	if (reg) {
+	if ((reg & UIC_DME_ERROR) &&
+	    (reg & UIC_DME_ERROR_CODE_MASK)) {
 		ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
 		hba->uic_error |= UFSHCD_UIC_DME_ERROR;
+		retval |= IRQ_HANDLED;
 	}
 
 	dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
 			__func__, hba->uic_error);
+	return retval;
 }
 
 /**
  * ufshcd_check_errors - Check for errors that need s/w attention
  * @hba: per-adapter instance
+ *
+ * Returns
+ *  IRQ_HANDLED - If interrupt is valid
+ *  IRQ_NONE    - If invalid interrupt
  */
-static void ufshcd_check_errors(struct ufs_hba *hba)
+static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
 {
 	bool queue_eh_work = false;
+	irqreturn_t retval = IRQ_NONE;
 
 	if (hba->errors & INT_FATAL_ERRORS || hba->ce_error)
 		queue_eh_work = true;
 
 	if (hba->errors & UIC_ERROR) {
 		hba->uic_error = 0;
-		ufshcd_update_uic_error(hba);
+		retval = ufshcd_update_uic_error(hba);
 		if (hba->uic_error)
 			queue_eh_work = true;
 	}
@@ -5886,12 +6230,16 @@
 
 		/* handle fatal errors only when link is functional */
 		if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
-			/* block commands from scsi mid-layer */
-			__ufshcd_scsi_block_requests(hba);
+			/*
+			 * Set error handling in progress flag early so that we
+			 * don't issue new requests any more.
+			 */
+			ufshcd_set_eh_in_progress(hba);
 
 			hba->ufshcd_state = UFSHCD_STATE_ERROR;
 			schedule_work(&hba->eh_work);
 		}
+		retval |= IRQ_HANDLED;
 	}
 	/*
 	 * if (!queue_eh_work) -
@@ -5899,28 +6247,44 @@
 	 * itself without s/w intervention or errors that will be
 	 * handled by the SCSI core layer.
 	 */
+	return retval;
 }
 
 /**
  * ufshcd_tmc_handler - handle task management function completion
  * @hba: per adapter instance
+ *
+ * Returns
+ *  IRQ_HANDLED - If interrupt is valid
+ *  IRQ_NONE    - If invalid interrupt
  */
-static void ufshcd_tmc_handler(struct ufs_hba *hba)
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
 {
 	u32 tm_doorbell;
 
 	tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
 	hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
-	wake_up(&hba->tm_wq);
+	if (hba->tm_condition) {
+		wake_up(&hba->tm_wq);
+		return IRQ_HANDLED;
+	} else {
+		return IRQ_NONE;
+	}
 }
 
 /**
  * ufshcd_sl_intr - Interrupt service routine
  * @hba: per adapter instance
  * @intr_status: contains interrupts generated by the controller
+ *
+ * Returns
+ *  IRQ_HANDLED - If interrupt is valid
+ *  IRQ_NONE    - If invalid interrupt
  */
-static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
+static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
 {
+	irqreturn_t retval = IRQ_NONE;
+
 	ufsdbg_error_inject_dispatcher(hba,
 		ERR_INJECT_INTR, intr_status, &intr_status);
 
@@ -5928,16 +6292,18 @@
 
 	hba->errors = UFSHCD_ERROR_MASK & intr_status;
 	if (hba->errors || hba->ce_error)
-		ufshcd_check_errors(hba);
+		retval |= ufshcd_check_errors(hba);
 
 	if (intr_status & UFSHCD_UIC_MASK)
-		ufshcd_uic_cmd_compl(hba, intr_status);
+		retval |= ufshcd_uic_cmd_compl(hba, intr_status);
 
 	if (intr_status & UTP_TASK_REQ_COMPL)
-		ufshcd_tmc_handler(hba);
+		retval |= ufshcd_tmc_handler(hba);
 
 	if (intr_status & UTP_TRANSFER_REQ_COMPL)
-		ufshcd_transfer_req_compl(hba);
+		retval |= ufshcd_transfer_req_compl(hba);
+
+	return retval;
 }
 
 /**
@@ -5945,27 +6311,44 @@
  * @irq: irq number
  * @__hba: pointer to adapter instance
  *
- * Returns IRQ_HANDLED - If interrupt is valid
- *		IRQ_NONE - If invalid interrupt
+ * Returns
+ *  IRQ_HANDLED - If interrupt is valid
+ *  IRQ_NONE    - If invalid interrupt
  */
 static irqreturn_t ufshcd_intr(int irq, void *__hba)
 {
 	u32 intr_status, enabled_intr_status;
 	irqreturn_t retval = IRQ_NONE;
 	struct ufs_hba *hba = __hba;
+	int retries = hba->nutrs;
 
 	spin_lock(hba->host->host_lock);
 	intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
-	enabled_intr_status =
-		intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
 
-	if (intr_status)
-		ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+	/*
+	 * There could be max of hba->nutrs reqs in flight and in worst case
+	 * if the reqs get finished 1 by 1 after the interrupt status is
+	 * read, make sure we handle them by checking the interrupt status
+	 * again in a loop until we process all of the reqs before returning.
+	 */
+	do {
+		enabled_intr_status =
+			intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+		if (intr_status)
+			ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+		if (enabled_intr_status)
+			retval |= ufshcd_sl_intr(hba, enabled_intr_status);
 
-	if (enabled_intr_status) {
-		ufshcd_sl_intr(hba, enabled_intr_status);
-		retval = IRQ_HANDLED;
+		intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+	} while (intr_status && --retries);
+
+	if (retval == IRQ_NONE) {
+		dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
+					__func__, intr_status);
+		ufshcd_hex_dump(hba, "host regs: ", hba->mmio_base,
+					UFSHCI_REG_SPACE_SIZE);
 	}
+
 	spin_unlock(hba->host->host_lock);
 	return retval;
 }
@@ -6391,6 +6774,16 @@
 	int retries = MAX_HOST_RESET_RETRIES;
 
 	do {
+		err = ufshcd_vops_full_reset(hba);
+		if (err)
+			dev_warn(hba->dev, "%s: full reset returned %d\n",
+				 __func__, err);
+
+		err = ufshcd_reset_device(hba);
+		if (err)
+			dev_warn(hba->dev, "%s: device reset failed. err %d\n",
+				 __func__, err);
+
 		err = ufshcd_host_reset_and_restore(hba);
 	} while (err && --retries);
 
@@ -6420,13 +6813,12 @@
  */
 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
 {
-	int err;
+	int err = SUCCESS;
 	unsigned long flags;
 	struct ufs_hba *hba;
 
 	hba = shost_priv(cmd->device->host);
 
-	ufshcd_hold_all(hba);
 	/*
 	 * Check if there is any race with fatal error handling.
 	 * If so, wait for it to complete. Even though fatal error
@@ -6439,29 +6831,37 @@
 				hba->ufshcd_state == UFSHCD_STATE_RESET))
 			break;
 		spin_unlock_irqrestore(hba->host->host_lock, flags);
-		dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+		dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
 		flush_work(&hba->eh_work);
 	} while (1);
 
-	hba->ufshcd_state = UFSHCD_STATE_RESET;
-	ufshcd_set_eh_in_progress(hba);
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	/*
+	 * we don't know if previous reset had really reset the host controller
+	 * or not. So let's force reset here to be sure.
+	 */
+	hba->ufshcd_state = UFSHCD_STATE_ERROR;
+	hba->force_host_reset = true;
+	schedule_work(&hba->eh_work);
 
-	ufshcd_update_error_stats(hba, UFS_ERR_EH);
-	err = ufshcd_reset_and_restore(hba);
+	/* wait for the reset work to finish */
+	do {
+		if (!(work_pending(&hba->eh_work) ||
+				hba->ufshcd_state == UFSHCD_STATE_RESET))
+			break;
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		dev_err(hba->dev, "%s: reset in progress - 2\n", __func__);
+		flush_work(&hba->eh_work);
+		spin_lock_irqsave(hba->host->host_lock, flags);
+	} while (1);
 
-	spin_lock_irqsave(hba->host->host_lock, flags);
-	if (!err) {
-		err = SUCCESS;
-		hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
-	} else {
+	if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
+	      ufshcd_is_link_active(hba))) {
 		err = FAILED;
 		hba->ufshcd_state = UFSHCD_STATE_ERROR;
 	}
-	ufshcd_clear_eh_in_progress(hba);
+
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
-	ufshcd_release_all(hba);
 	return err;
 }
 
@@ -6978,11 +7378,6 @@
 	if (ret)
 		goto out;
 
-	/* Enable auto hibern8 if supported */
-	if (ufshcd_is_auto_hibern8_supported(hba))
-		ufshcd_set_auto_hibern8_timer(hba,
-					      hba->hibern8_on_idle.delay_ms);
-
 	/* Debug counters initialization */
 	ufshcd_clear_dbg_ufs_stats(hba);
 	/* set the default level for urgent bkops */
@@ -7064,20 +7459,38 @@
 		if (ufshcd_scsi_add_wlus(hba))
 			goto out;
 
+		/* Initialize devfreq after UFS device is detected */
+		if (ufshcd_is_clkscaling_supported(hba)) {
+			memcpy(&hba->clk_scaling.saved_pwr_info.info,
+			    &hba->pwr_info, sizeof(struct ufs_pa_layer_attr));
+			hba->clk_scaling.saved_pwr_info.is_valid = true;
+			hba->clk_scaling.is_scaled_up = true;
+			if (!hba->devfreq) {
+				hba->devfreq = devfreq_add_device(hba->dev,
+							&ufs_devfreq_profile,
+							"simple_ondemand",
+							gov_data);
+				if (IS_ERR(hba->devfreq)) {
+					ret = PTR_ERR(hba->devfreq);
+					dev_err(hba->dev, "Unable to register with devfreq %d\n",
+						ret);
+					goto out;
+				}
+			}
+			hba->clk_scaling.is_allowed = true;
+		}
+
 		scsi_scan_host(hba->host);
 		pm_runtime_put_sync(hba->dev);
 	}
 
-	/* Resume devfreq after UFS device is detected */
-	if (ufshcd_is_clkscaling_supported(hba)) {
-		memcpy(&hba->clk_scaling.saved_pwr_info.info, &hba->pwr_info,
-		       sizeof(struct ufs_pa_layer_attr));
-		hba->clk_scaling.saved_pwr_info.is_valid = true;
-		hba->clk_scaling.is_scaled_up = true;
-		ufshcd_resume_clkscaling(hba);
-		hba->clk_scaling.is_allowed = true;
-	}
-
+	/*
+	 * Enable auto hibern8 if supported, after full host and
+	 * device initialization.
+	 */
+	if (ufshcd_is_auto_hibern8_supported(hba))
+		ufshcd_set_auto_hibern8_timer(hba,
+				      hba->hibern8_on_idle.delay_ms);
 out:
 	/*
 	 * If we failed to initialize the device or the device is not
@@ -7670,6 +8083,13 @@
 	if (!head || list_empty(head))
 		goto out;
 
+	/* call vendor specific bus vote before enabling the clocks */
+	if (on) {
+		ret = ufshcd_vops_set_bus_vote(hba, on);
+		if (ret)
+			return ret;
+	}
+
 	/*
 	 * vendor specific setup_clocks ops may depend on clocks managed by
 	 * this standard driver hence call the vendor specific setup_clocks
@@ -7708,11 +8128,24 @@
 	 * this standard driver hence call the vendor specific setup_clocks
 	 * after enabling the clocks managed here.
 	 */
-	if (on)
+	if (on) {
 		ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
+		if (ret)
+			goto out;
+	}
+
+	/*
+	 * call vendor specific bus vote to remove the vote after
+	 * disabling the clocks.
+	 */
+	if (!on)
+		ret = ufshcd_vops_set_bus_vote(hba, on);
 
 out:
 	if (ret) {
+		if (on)
+			/* Can't do much if this fails */
+			(void) ufshcd_vops_set_bus_vote(hba, false);
 		list_for_each_entry(clki, head, list) {
 			if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
 				clk_disable_unprepare(clki->clk);
@@ -7884,7 +8317,8 @@
 		ufshcd_variant_hba_exit(hba);
 		ufshcd_setup_vreg(hba, false);
 		if (ufshcd_is_clkscaling_supported(hba)) {
-			ufshcd_suspend_clkscaling(hba);
+			if (hba->devfreq)
+				ufshcd_suspend_clkscaling(hba);
 			destroy_workqueue(hba->clk_scaling.workq);
 		}
 		ufshcd_disable_clocks(hba, false);
@@ -8335,9 +8769,13 @@
 			goto vendor_suspend;
 		}
 	} else if (ufshcd_is_link_off(hba)) {
-		ret = ufshcd_host_reset_and_restore(hba);
 		/*
-		 * ufshcd_host_reset_and_restore() should have already
+		 * A full initialization of the host and the device is required
+		 * since the link was put to off during suspend.
+		 */
+		ret = ufshcd_reset_and_restore(hba);
+		/*
+		 * ufshcd_reset_and_restore() should have already
 		 * set the link state as active
 		 */
 		if (ret || !ufshcd_is_link_active(hba))
@@ -8676,6 +9114,35 @@
 	ufshcd_add_spm_lvl_sysfs_nodes(hba);
 }
 
+static void ufshcd_shutdown_clkscaling(struct ufs_hba *hba)
+{
+	bool suspend = false;
+	unsigned long flags;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (hba->clk_scaling.is_allowed) {
+		hba->clk_scaling.is_allowed = false;
+		suspend = true;
+	}
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	/**
+	 * Scaling may be scheduled before, hence make sure it
+	 * doesn't race with shutdown
+	 */
+	if (ufshcd_is_clkscaling_supported(hba)) {
+		device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
+		cancel_work_sync(&hba->clk_scaling.suspend_work);
+		cancel_work_sync(&hba->clk_scaling.resume_work);
+		if (suspend)
+			ufshcd_suspend_clkscaling(hba);
+	}
+
+	/* Unregister so that devfreq_monitor can't race with shutdown */
+	if (hba->devfreq)
+		devfreq_remove_device(hba->devfreq);
+}
+
 /**
  * ufshcd_shutdown - shutdown routine
  * @hba: per adapter instance
@@ -8686,11 +9153,35 @@
  */
 int ufshcd_shutdown(struct ufs_hba *hba)
 {
-	/*
-	 * TODO: This function should send the power down notification to
-	 * UFS device and then power off the UFS link. But we need to be sure
-	 * that there will not be any new UFS requests issued after this.
+	int ret = 0;
+
+	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
+		goto out;
+
+	pm_runtime_get_sync(hba->dev);
+	ufshcd_hold_all(hba);
+	ufshcd_mark_shutdown_ongoing(hba);
+	ufshcd_shutdown_clkscaling(hba);
+	/**
+	 * (1) Acquire the lock to stop any more requests
+	 * (2) Wait for all issued requests to complete
 	 */
+	ufshcd_get_write_lock(hba);
+	ufshcd_scsi_block_requests(hba);
+	ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
+	if (ret)
+		dev_err(hba->dev, "%s: waiting for DB clear: failed: %d\n",
+			__func__, ret);
+	/* Requests may have errored out above, let it be handled */
+	flush_work(&hba->eh_work);
+	/* reqs issued from contexts other than shutdown will fail from now */
+	ufshcd_scsi_unblock_requests(hba);
+	ufshcd_release_all(hba);
+	ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
+out:
+	if (ret)
+		dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
+	/* allow force shutdown even in case of errors */
 	return 0;
 }
 EXPORT_SYMBOL(ufshcd_shutdown);
@@ -8886,6 +9377,32 @@
 	if (scale_up) {
 		memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
 		       sizeof(struct ufs_pa_layer_attr));
+		/*
+		 * Some UFS devices may stop responding after switching from
+		 * HS-G1 to HS-G3. Also, it is found that these devices work
+		 * fine if we do 2 steps switch: HS-G1 to HS-G2 followed by
+		 * HS-G2 to HS-G3. If UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH
+		 * quirk is enabled for such devices, this 2 steps gear switch
+		 * workaround will be applied.
+		 */
+		if ((hba->dev_info.quirks &
+		     UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH)
+		    && (hba->pwr_info.gear_tx == UFS_HS_G1)
+		    && (new_pwr_info.gear_tx == UFS_HS_G3)) {
+			/* scale up to G2 first */
+			new_pwr_info.gear_tx = UFS_HS_G2;
+			new_pwr_info.gear_rx = UFS_HS_G2;
+			ret = ufshcd_change_power_mode(hba, &new_pwr_info);
+			if (ret)
+				goto out;
+
+			/* scale up to G3 now */
+			new_pwr_info.gear_tx = UFS_HS_G3;
+			new_pwr_info.gear_rx = UFS_HS_G3;
+			ret = ufshcd_change_power_mode(hba, &new_pwr_info);
+			if (ret)
+				goto out;
+		}
 	} else {
 		memcpy(&new_pwr_info, &hba->pwr_info,
 		       sizeof(struct ufs_pa_layer_attr));
@@ -8905,10 +9422,10 @@
 				new_pwr_info.pwr_rx = FASTAUTO_MODE;
 			}
 		}
+		ret = ufshcd_change_power_mode(hba, &new_pwr_info);
 	}
 
-	ret = ufshcd_change_power_mode(hba, &new_pwr_info);
-
+out:
 	if (ret)
 		dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d), scale_up = %d",
 			__func__, ret,
@@ -8928,10 +9445,10 @@
 	 * clock scaling is in progress
 	 */
 	ufshcd_scsi_block_requests(hba);
-	down_write(&hba->clk_scaling_lock);
+	down_write(&hba->lock);
 	if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
 		ret = -EBUSY;
-		up_write(&hba->clk_scaling_lock);
+		up_write(&hba->lock);
 		ufshcd_scsi_unblock_requests(hba);
 	}
 
@@ -8940,7 +9457,7 @@
 
 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
 {
-	up_write(&hba->clk_scaling_lock);
+	up_write(&hba->lock);
 	ufshcd_scsi_unblock_requests(hba);
 }
 
@@ -8971,10 +9488,29 @@
 			goto clk_scaling_unprepare;
 	}
 
+	/*
+	 * If auto hibern8 is supported then put the link in
+	 * hibern8 manually, this is to avoid auto hibern8
+	 * racing during clock frequency scaling sequence.
+	 */
+	if (ufshcd_is_auto_hibern8_supported(hba)) {
+		ret = ufshcd_uic_hibern8_enter(hba);
+		if (ret)
+			/* link will be bad state so no need to scale_up_gear */
+			return ret;
+	}
+
 	ret = ufshcd_scale_clks(hba, scale_up);
 	if (ret)
 		goto scale_up_gear;
 
+	if (ufshcd_is_auto_hibern8_supported(hba)) {
+		ret = ufshcd_uic_hibern8_exit(hba);
+		if (ret)
+			/* link will be bad state so no need to scale_up_gear */
+			return ret;
+	}
+
 	/* scale up the gear after scaling up clocks */
 	if (scale_up) {
 		ret = ufshcd_scale_gear(hba, true);
@@ -9220,23 +9756,6 @@
 	return 0;
 }
 
-#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
-static struct devfreq_simple_ondemand_data ufshcd_ondemand_data = {
-	.upthreshold = 35,
-	.downdifferential = 30,
-	.simple_scaling = 1,
-};
-
-static void *gov_data = &ufshcd_ondemand_data;
-#else
-static void *gov_data;
-#endif
-
-static struct devfreq_dev_profile ufs_devfreq_profile = {
-	.polling_ms	= 40,
-	.target		= ufshcd_devfreq_target,
-	.get_dev_status	= ufshcd_devfreq_get_dev_status,
-};
 static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
 {
 	hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
@@ -9352,7 +9871,7 @@
 	/* Initialize mutex for device management commands */
 	mutex_init(&hba->dev_cmd.lock);
 
-	init_rwsem(&hba->clk_scaling_lock);
+	init_rwsem(&hba->lock);
 
 	/* Initialize device management tag acquire wait queue */
 	init_waitqueue_head(&hba->dev_cmd.tag_wq);
@@ -9389,6 +9908,15 @@
 		goto exit_gating;
 	}
 
+	/* Reset controller to power on reset (POR) state */
+	ufshcd_vops_full_reset(hba);
+
+	/* reset connected UFS device */
+	err = ufshcd_reset_device(hba);
+	if (err)
+		dev_warn(hba->dev, "%s: device reset failed. err %d\n",
+			 __func__, err);
+
 	/* Host controller enable */
 	err = ufshcd_hba_enable(hba);
 	if (err) {
@@ -9401,16 +9929,6 @@
 	if (ufshcd_is_clkscaling_supported(hba)) {
 		char wq_name[sizeof("ufs_clkscaling_00")];
 
-		hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
-						   "simple_ondemand", gov_data);
-		if (IS_ERR(hba->devfreq)) {
-			dev_err(hba->dev, "Unable to register with devfreq %ld\n",
-					PTR_ERR(hba->devfreq));
-			err = PTR_ERR(hba->devfreq);
-			goto out_remove_scsi_host;
-		}
-		hba->clk_scaling.is_suspended = false;
-
 		INIT_WORK(&hba->clk_scaling.suspend_work,
 			  ufshcd_clk_scaling_suspend_work);
 		INIT_WORK(&hba->clk_scaling.resume_work,
@@ -9420,8 +9938,6 @@
 			 host->host_no);
 		hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
 
-		/* Suspend devfreq until the UFS device is detected */
-		ufshcd_suspend_clkscaling(hba);
 		ufshcd_clkscaling_init_sysfs(hba);
 	}
 
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 709801f..b70606b 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -3,7 +3,7 @@
  *
  * This code is based on drivers/scsi/ufs/ufshcd.h
  * Copyright (C) 2011-2013 Samsung India Software Operations
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * Authors:
  *	Santosh Yaraganavi <santosh.sy@samsung.com>
@@ -39,6 +39,7 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/hrtimer.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
@@ -55,6 +56,7 @@
 #include <linux/clk.h>
 #include <linux/completion.h>
 #include <linux/regulator/consumer.h>
+#include <linux/reset.h>
 #include "unipro.h"
 
 #include <asm/irq.h>
@@ -309,6 +311,7 @@
  * @update_sec_cfg: called to restore host controller secure configuration
  * @get_scale_down_gear: called to get the minimum supported gear to
  *			 scale down
+ * @set_bus_vote: called to vote for the required bus bandwidth
  * @add_debugfs: used to add debugfs entries
  * @remove_debugfs: used to remove debugfs entries
  */
@@ -332,9 +335,10 @@
 	int	(*suspend)(struct ufs_hba *, enum ufs_pm_op);
 	int	(*resume)(struct ufs_hba *, enum ufs_pm_op);
 	int	(*full_reset)(struct ufs_hba *);
-	void	(*dbg_register_dump)(struct ufs_hba *hba);
+	void	(*dbg_register_dump)(struct ufs_hba *hba, bool no_sleep);
 	int	(*update_sec_cfg)(struct ufs_hba *hba, bool restore_sec_cfg);
 	u32	(*get_scale_down_gear)(struct ufs_hba *);
+	int	(*set_bus_vote)(struct ufs_hba *, bool);
 #ifdef CONFIG_DEBUG_FS
 	void	(*add_debugfs)(struct ufs_hba *hba, struct dentry *root);
 	void	(*remove_debugfs)(struct ufs_hba *hba);
@@ -393,8 +397,9 @@
 
 /**
  * struct ufs_clk_gating - UFS clock gating related info
- * @gate_work: worker to turn off clocks after some delay as specified in
- * delay_ms
+ * @gate_hrtimer: hrtimer to invoke @gate_work after some delay as
+ * specified in @delay_ms
+ * @gate_work: worker to turn off clocks
  * @ungate_work: worker to turn on clocks that will be used in case of
  * interrupt context
  * @state: the current clocks state
@@ -412,7 +417,8 @@
  * completion before gating clocks.
  */
 struct ufs_clk_gating {
-	struct delayed_work gate_work;
+	struct hrtimer gate_hrtimer;
+	struct work_struct gate_work;
 	struct work_struct ungate_work;
 	enum clk_gating_state state;
 	unsigned long delay_ms;
@@ -425,6 +431,7 @@
 	struct device_attribute enable_attr;
 	bool is_enabled;
 	int active_reqs;
+	struct workqueue_struct *ungating_workq;
 };
 
 /* Hibern8 state  */
@@ -801,6 +808,7 @@
 	u32 saved_uic_err;
 	u32 saved_ce_err;
 	bool silence_err_logs;
+	bool force_host_reset;
 
 	/* Device management request data */
 	struct ufs_dev_cmd dev_cmd;
@@ -882,17 +890,33 @@
 	enum bkops_status urgent_bkops_lvl;
 	bool is_urgent_bkops_lvl_checked;
 
-	struct rw_semaphore clk_scaling_lock;
+	/* sync b/w diff contexts */
+	struct rw_semaphore lock;
+	unsigned long shutdown_in_prog;
 
+	struct reset_control *core_reset;
 	/* If set, don't gate device ref_clk during clock gating */
 	bool no_ref_clk_gating;
 
 	int scsi_block_reqs_cnt;
 
+	bool full_init_linereset;
+	struct pinctrl *pctrl;
+
 	int latency_hist_enabled;
 	struct io_latency_state io_lat_s;
 };
 
+static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba)
+{
+	set_bit(0, &hba->shutdown_in_prog);
+}
+
+static inline bool ufshcd_is_shutdown_ongoing(struct ufs_hba *hba)
+{
+	return !!(test_bit(0, &hba->shutdown_in_prog));
+}
+
 /* Returns true if clocks can be gated. Otherwise false */
 static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
 {
@@ -1233,10 +1257,11 @@
 }
 
 
-static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
+static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba,
+						 bool no_sleep)
 {
 	if (hba->var && hba->var->vops && hba->var->vops->dbg_register_dump)
-		hba->var->vops->dbg_register_dump(hba);
+		hba->var->vops->dbg_register_dump(hba, no_sleep);
 }
 
 static inline int ufshcd_vops_update_sec_cfg(struct ufs_hba *hba,
@@ -1255,6 +1280,13 @@
 	return UFS_HS_G1;
 }
 
+static inline int ufshcd_vops_set_bus_vote(struct ufs_hba *hba, bool on)
+{
+	if (hba->var && hba->var->vops && hba->var->vops->set_bus_vote)
+		return hba->var->vops->set_bus_vote(hba, on);
+	return 0;
+}
+
 #ifdef CONFIG_DEBUG_FS
 static inline void ufshcd_vops_add_debugfs(struct ufs_hba *hba,
 						struct dentry *root)
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index d65dad0..c0e4650 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -190,6 +190,7 @@
 
 /* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
 #define UIC_PHY_ADAPTER_LAYER_ERROR			UFS_BIT(31)
+#define UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR		UFS_BIT(4)
 #define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK		0x1F
 #define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK		0xF
 
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index cf715e5..0f8d9b6 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -214,6 +214,15 @@
 	  deadlocks. It does not run during the bootup process, so it will
 	  not catch any early lockups.
 
+config QPNP_PBS
+	tristate "PBS trigger support for QPNP PMIC"
+	depends on SPMI
+	help
+	  This driver supports configuring software PBS trigger event through PBS
+	  RAM on Qualcomm Technologies, Inc. QPNP PMICs. This module provides
+	  the APIs to the client drivers that wants to send the PBS trigger
+	  event to the PBS RAM.
+
 config QCOM_MEMORY_DUMP_V2
 	bool "QCOM Memory Dump V2 Support"
 	help
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 45384668..00a1284 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -3,6 +3,7 @@
 obj-$(CONFIG_QCOM_LLCC) += llcc-core.o llcc-slice.o
 obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o
 obj-$(CONFIG_QCOM_LLCC_AMON) += llcc-amon.o
+obj-$(CONFIG_QPNP_PBS) += qpnp-pbs.o
 obj-$(CONFIG_QCOM_PM)	+=	spm.o
 obj-$(CONFIG_QCOM_SMD) +=	smd.o
 obj-$(CONFIG_QCOM_SMD_RPM)	+= smd-rpm.o
diff --git a/drivers/soc/qcom/avtimer.c b/drivers/soc/qcom/avtimer.c
index 757cdb0..1819d46 100644
--- a/drivers/soc/qcom/avtimer.c
+++ b/drivers/soc/qcom/avtimer.c
@@ -64,6 +64,7 @@
 	void __iomem *p_avtimer_msw;
 	void __iomem *p_avtimer_lsw;
 	uint32_t clk_div;
+	uint32_t clk_mult;
 	atomic_t adsp_ready;
 	int num_retries;
 };
@@ -292,7 +293,6 @@
 int avcs_core_query_timer(uint64_t *avtimer_tick)
 {
 	uint32_t avtimer_msw = 0, avtimer_lsw = 0;
-	uint32_t res = 0;
 	uint64_t avtimer_tick_temp;
 
 	if (!atomic_read(&avtimer.adsp_ready)) {
@@ -302,12 +302,12 @@
 	avtimer_lsw = ioread32(avtimer.p_avtimer_lsw);
 	avtimer_msw = ioread32(avtimer.p_avtimer_msw);
 
-	avtimer_tick_temp =
-		(uint64_t)((uint64_t)avtimer_msw << 32)
-			| avtimer_lsw;
-	res = do_div(avtimer_tick_temp, avtimer.clk_div);
-	*avtimer_tick = avtimer_tick_temp;
-	pr_debug("%s:Avtimer: msw: %u, lsw: %u, tick: %llu\n", __func__,
+	avtimer_tick_temp = (uint64_t)((uint64_t)avtimer_msw << 32)
+			    | avtimer_lsw;
+	*avtimer_tick = mul_u64_u32_div(avtimer_tick_temp, avtimer.clk_mult,
+					avtimer.clk_div);
+	pr_debug_ratelimited("%s:Avtimer: msw: %u, lsw: %u, tick: %llu\n",
+			__func__,
 			avtimer_msw, avtimer_lsw, *avtimer_tick);
 	return 0;
 }
@@ -332,22 +332,19 @@
 	switch (ioctl_num) {
 	case IOCTL_GET_AVTIMER_TICK:
 	{
-		uint32_t avtimer_msw_1st = 0, avtimer_lsw = 0;
-		uint32_t avtimer_msw_2nd = 0;
-		uint64_t avtimer_tick;
+		uint64_t avtimer_tick = 0;
+		int rc;
 
-		do {
-			avtimer_msw_1st = ioread32(avtimer.p_avtimer_msw);
-			avtimer_lsw = ioread32(avtimer.p_avtimer_lsw);
-			avtimer_msw_2nd = ioread32(avtimer.p_avtimer_msw);
-		} while (avtimer_msw_1st != avtimer_msw_2nd);
+		rc = avcs_core_query_timer(&avtimer_tick);
 
-		avtimer_lsw = avtimer_lsw/avtimer.clk_div;
-		avtimer_tick =
-		((uint64_t) avtimer_msw_1st << 32) | avtimer_lsw;
+		if (rc) {
+			pr_err("%s: Error: Invalid AV Timer tick, rc = %d\n",
+				__func__, rc);
+			return rc;
+		}
 
-		pr_debug("%s: AV Timer tick: msw: %x, lsw: %x time %llx\n",
-			 __func__, avtimer_msw_1st, avtimer_lsw, avtimer_tick);
+		pr_debug_ratelimited("%s: AV Timer tick: time %llx\n",
+		__func__, avtimer_tick);
 		if (copy_to_user((void *) ioctl_param, &avtimer_tick,
 		    sizeof(avtimer_tick))) {
 			pr_err("%s: copy_to_user failed\n", __func__);
@@ -377,6 +374,7 @@
 	struct device *device_handle;
 	struct resource *reg_lsb = NULL, *reg_msb = NULL;
 	uint32_t clk_div_val;
+	uint32_t clk_mult_val;
 
 	if (!pdev) {
 		pr_err("%s: Invalid params\n", __func__);
@@ -465,7 +463,14 @@
 	else
 		avtimer.clk_div = clk_div_val;
 
-	pr_debug("avtimer.clk_div = %d\n", avtimer.clk_div);
+	if (of_property_read_u32(pdev->dev.of_node,
+			"qcom,clk-mult", &clk_mult_val))
+		avtimer.clk_mult = 1;
+	else
+		avtimer.clk_mult = clk_mult_val;
+
+	pr_debug("%s: avtimer.clk_div = %d, avtimer.clk_mult = %d\n",
+		 __func__, avtimer.clk_div, avtimer.clk_mult);
 	return 0;
 
 class_destroy:
diff --git a/drivers/soc/qcom/debug_core.c b/drivers/soc/qcom/debug_core.c
index 019360a..164a866 100644
--- a/drivers/soc/qcom/debug_core.c
+++ b/drivers/soc/qcom/debug_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -290,8 +290,8 @@
 
 int msm_core_debug_init(void)
 {
-	struct dentry *dir;
-	struct dentry *file;
+	struct dentry *dir = NULL;
+	struct dentry *file = NULL;
 	int i;
 
 	msm_core_data = get_cpu_pwr_stats();
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index 7b93a00..8cd5d3c 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -30,6 +30,7 @@
 #include "glink_private.h"
 #include "glink_xprt_if.h"
 
+#define GLINK_CTX_CANARY 0x58544324 /* "$CTX" */
 /* Number of internal IPC Logging log pages */
 #define NUM_LOG_PAGES	10
 #define GLINK_PM_QOS_HOLDOFF_MS		10
@@ -38,6 +39,8 @@
 #define GLINK_QOS_DEF_MTU		2048
 
 #define GLINK_KTHREAD_PRIO 1
+
+static rwlock_t magic_lock;
 /**
  * struct glink_qos_priority_bin - Packet Scheduler's priority bucket
  * @max_rate_kBps:	Maximum rate supported by the priority bucket.
@@ -309,6 +312,7 @@
 	unsigned long req_rate_kBps;
 	uint32_t tx_intent_cnt;
 	uint32_t tx_cnt;
+	uint32_t magic_number;
 };
 
 static struct glink_core_if core_impl;
@@ -437,6 +441,37 @@
 #define GLINK_GET_CH_TX_STATE(ctx) \
 		((ctx)->tx_intent_cnt || (ctx)->tx_cnt)
 
+static int glink_get_ch_ctx(struct channel_ctx *ctx)
+{
+	unsigned long flags;
+
+	if (!ctx)
+		return -EINVAL;
+	read_lock_irqsave(&magic_lock, flags);
+	if (ctx->magic_number != GLINK_CTX_CANARY) {
+		read_unlock_irqrestore(&magic_lock, flags);
+		return -EINVAL;
+	}
+	rwref_get(&ctx->ch_state_lhb2);
+	read_unlock_irqrestore(&magic_lock, flags);
+	return 0;
+}
+
+static int glink_put_ch_ctx(struct channel_ctx *ctx, bool update_magic)
+{
+	unsigned long flags;
+
+	if (!update_magic) {
+		rwref_put(&ctx->ch_state_lhb2);
+		return 0;
+	}
+	write_lock_irqsave(&magic_lock, flags);
+	ctx->magic_number = 0;
+	rwref_put(&ctx->ch_state_lhb2);
+	write_unlock_irqrestore(&magic_lock, flags);
+	return 0;
+}
+
 /**
  * glink_ssr() - Clean up locally for SSR by simulating remote close
  * @subsystem:	The name of the subsystem being restarted
@@ -548,6 +583,9 @@
 	}
 	ctx->rcid = 0;
 
+	ctx->int_req_ack = false;
+	complete_all(&ctx->int_req_ack_complete);
+	complete_all(&ctx->int_req_complete);
 	if (ctx->local_open_state != GLINK_CHANNEL_CLOSED &&
 		ctx->local_open_state != GLINK_CHANNEL_CLOSING) {
 		if (ctx->notify_state)
@@ -564,9 +602,6 @@
 			"Did not send GLINK_REMOTE_DISCONNECTED",
 			"local state is already CLOSED");
 
-	ctx->int_req_ack = false;
-	complete_all(&ctx->int_req_ack_complete);
-	complete_all(&ctx->int_req_complete);
 	ch_purge_intent_lists(ctx);
 
 	return is_fully_closed;
@@ -1024,7 +1059,8 @@
  *                              it is not found.
  * @xprt_ctx:	Transport to search for a matching edge.
  *
- * Return: The edge ctx corresponding to edge of @xprt_ctx.
+ * Return: The edge ctx corresponding to edge of @xprt_ctx or
+ *	NULL if memory allocation fails.
  */
 static struct glink_core_edge_ctx *edge_name_to_ctx_create(
 				struct glink_core_xprt_ctx *xprt_ctx)
@@ -1040,6 +1076,10 @@
 		}
 	}
 	edge_ctx = kzalloc(sizeof(struct glink_core_edge_ctx), GFP_KERNEL);
+	if (!edge_ctx) {
+		mutex_unlock(&edge_list_lock_lhd0);
+		return NULL;
+	}
 	strlcpy(edge_ctx->name, xprt_ctx->edge, GLINK_NAME_SIZE);
 	rwref_lock_init(&edge_ctx->edge_ref_lock_lhd1, glink_edge_ctx_release);
 	mutex_init(&edge_ctx->edge_migration_lock_lhd2);
@@ -1631,6 +1671,14 @@
 	}
 	spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
 
+	spin_lock_irqsave(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+	list_for_each_entry_safe(tx_info, tx_info_temp,
+				 &ctx->tx_pending_remote_done, list_done) {
+		ctx->notify_tx_abort(ctx, ctx->user_priv, tx_info->pkt_priv);
+		rwref_put(&tx_info->pkt_ref);
+	}
+	spin_unlock_irqrestore(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+
 	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
 	list_for_each_entry_safe(ptr_intent, tmp_intent,
 				&ctx->local_rx_intent_list, list) {
@@ -2548,6 +2596,7 @@
 	ctx->notify_tx_abort = cfg->notify_tx_abort;
 	ctx->notify_rx_tracer_pkt = cfg->notify_rx_tracer_pkt;
 	ctx->notify_remote_rx_intent = cfg->notify_remote_rx_intent;
+	ctx->magic_number = GLINK_CTX_CANARY;
 
 	if (!ctx->notify_rx_intent_req)
 		ctx->notify_rx_intent_req = glink_dummy_notify_rx_intent_req;
@@ -2583,7 +2632,6 @@
 
 	GLINK_INFO_CH(ctx, "%s: Created channel, sent OPEN command. ctx %p\n",
 			__func__, ctx);
-
 	return ctx;
 }
 EXPORT_SYMBOL(glink_open);
@@ -2683,15 +2731,19 @@
 	unsigned long flags;
 	bool is_empty = false;
 
-	if (!ctx)
-		return -EINVAL;
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 
 	GLINK_INFO_CH(ctx, "%s: Closing channel, ctx: %p\n", __func__, ctx);
-	if (ctx->local_open_state == GLINK_CHANNEL_CLOSED)
+	if (ctx->local_open_state == GLINK_CHANNEL_CLOSED) {
+		glink_put_ch_ctx(ctx, false);
 		return 0;
+	}
 
 	if (ctx->local_open_state == GLINK_CHANNEL_CLOSING) {
 		/* close already pending */
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
@@ -2756,6 +2808,7 @@
 
 	rwref_put(&ctx->ch_state_lhb2);
 	rwref_read_put(&xprt_ctx->xprt_state_lhb0);
+	glink_put_ch_ctx(ctx, true);
 	return ret;
 }
 EXPORT_SYMBOL(glink_close);
@@ -2814,29 +2867,30 @@
 	if (!size)
 		return -EINVAL;
 
-	if (!ctx)
-		return -EINVAL;
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 
 	rwref_read_get_atomic(&ctx->ch_state_lhb2, is_atomic);
 	if (!(vbuf_provider || pbuf_provider)) {
-		rwref_read_put(&ctx->ch_state_lhb2);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto glink_tx_common_err;
 	}
 
 	if (!ch_is_fully_opened(ctx)) {
-		rwref_read_put(&ctx->ch_state_lhb2);
-		return -EBUSY;
+		ret = -EBUSY;
+		goto glink_tx_common_err;
 	}
 
 	if (size > GLINK_MAX_PKT_SIZE) {
-		rwref_read_put(&ctx->ch_state_lhb2);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto glink_tx_common_err;
 	}
 
 	if (unlikely(tx_flags & GLINK_TX_TRACER_PKT)) {
 		if (!(ctx->transport_ptr->capabilities & GCAP_TRACER_PKT)) {
-			rwref_read_put(&ctx->ch_state_lhb2);
-			return -EOPNOTSUPP;
+			ret = -EOPNOTSUPP;
+			goto glink_tx_common_err;
 		}
 		tracer_pkt_log_event(data, GLINK_CORE_TX);
 	}
@@ -2848,16 +2902,16 @@
 			GLINK_ERR_CH(ctx,
 				"%s: R[%u]:%zu Intent not present for lcid\n",
 				__func__, riid, size);
-			rwref_read_put(&ctx->ch_state_lhb2);
-			return -EAGAIN;
+			ret = -EAGAIN;
+			goto glink_tx_common_err;
 		}
 		if (is_atomic && !(ctx->transport_ptr->capabilities &
 					  GCAP_AUTO_QUEUE_RX_INT)) {
 			GLINK_ERR_CH(ctx,
 				"%s: Cannot request intent in atomic context\n",
 				__func__);
-			rwref_read_put(&ctx->ch_state_lhb2);
-			return -EINVAL;
+			ret = -EINVAL;
+			goto glink_tx_common_err;
 		}
 
 		/* request intent of correct size */
@@ -2867,20 +2921,18 @@
 		if (ret) {
 			GLINK_ERR_CH(ctx, "%s: Request intent failed %d\n",
 					__func__, ret);
-			rwref_read_put(&ctx->ch_state_lhb2);
-			return ret;
+			goto glink_tx_common_err;
 		}
 
 		while (ch_pop_remote_rx_intent(ctx, size, &riid,
 						&intent_size, &cookie)) {
-			rwref_get(&ctx->ch_state_lhb2);
 			rwref_read_put(&ctx->ch_state_lhb2);
 			if (is_atomic) {
 				GLINK_ERR_CH(ctx,
 				    "%s Intent of size %zu not ready\n",
 				    __func__, size);
-				rwref_put(&ctx->ch_state_lhb2);
-				return -EAGAIN;
+				ret = -EAGAIN;
+				goto glink_tx_common_err_2;
 			}
 
 			if (ctx->transport_ptr->local_state == GLINK_XPRT_DOWN
@@ -2888,8 +2940,8 @@
 				GLINK_ERR_CH(ctx,
 					"%s: Channel closed while waiting for intent\n",
 					__func__);
-				rwref_put(&ctx->ch_state_lhb2);
-				return -EBUSY;
+				ret = -EBUSY;
+				goto glink_tx_common_err_2;
 			}
 
 			/* wait for the remote intent req ack */
@@ -2899,8 +2951,8 @@
 				GLINK_ERR_CH(ctx,
 					"%s: Intent request ack with size: %zu not granted for lcid\n",
 					__func__, size);
-				rwref_put(&ctx->ch_state_lhb2);
-				return -ETIMEDOUT;
+				ret = -ETIMEDOUT;
+				goto glink_tx_common_err_2;
 			}
 
 			if (!ctx->int_req_ack) {
@@ -2908,8 +2960,8 @@
 				    "%s: Intent Request with size: %zu %s",
 				    __func__, size,
 				    "not granted for lcid\n");
-				rwref_put(&ctx->ch_state_lhb2);
-				return -EAGAIN;
+				ret = -EAGAIN;
+				goto glink_tx_common_err_2;
 			}
 
 			/* wait for the rx_intent from remote side */
@@ -2919,13 +2971,12 @@
 				GLINK_ERR_CH(ctx,
 					"%s: Intent request with size: %zu not granted for lcid\n",
 					__func__, size);
-				rwref_put(&ctx->ch_state_lhb2);
-				return -ETIMEDOUT;
+				ret = -ETIMEDOUT;
+				goto glink_tx_common_err_2;
 			}
 
 			reinit_completion(&ctx->int_req_complete);
 			rwref_read_get(&ctx->ch_state_lhb2);
-			rwref_put(&ctx->ch_state_lhb2);
 		}
 	}
 
@@ -2945,8 +2996,8 @@
 	if (!tx_info) {
 		GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__);
 		ch_push_remote_rx_intent(ctx, intent_size, riid, cookie);
-		rwref_read_put(&ctx->ch_state_lhb2);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto glink_tx_common_err;
 	}
 	rwref_lock_init(&tx_info->pkt_ref, glink_tx_pkt_release);
 	INIT_LIST_HEAD(&tx_info->list_done);
@@ -2972,7 +3023,10 @@
 	else
 		xprt_schedule_tx(ctx->transport_ptr, ctx, tx_info);
 
+glink_tx_common_err:
 	rwref_read_put(&ctx->ch_state_lhb2);
+glink_tx_common_err_2:
+	glink_put_ch_ctx(ctx, false);
 	return ret;
 }
 
@@ -3013,13 +3067,15 @@
 	struct glink_core_rx_intent *intent_ptr;
 	int ret = 0;
 
-	if (!ctx)
-		return -EINVAL;
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 
 	if (!ch_is_fully_opened(ctx)) {
 		/* Can only queue rx intents if channel is fully opened */
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
@@ -3028,13 +3084,16 @@
 		GLINK_ERR_CH(ctx,
 			"%s: Intent pointer allocation failed size[%zu]\n",
 			__func__, size);
+		glink_put_ch_ctx(ctx, false);
 		return -ENOMEM;
 	}
 	GLINK_DBG_CH(ctx, "%s: L[%u]:%zu\n", __func__, intent_ptr->id,
 			intent_ptr->intent_size);
 
-	if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS)
+	if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS) {
+		glink_put_ch_ctx(ctx, false);
 		return ret;
+	}
 
 	/* notify remote side of rx intent */
 	ret = ctx->transport_ptr->ops->tx_cmd_local_rx_intent(
@@ -3042,7 +3101,7 @@
 	if (ret)
 		/* unable to transmit, dequeue intent */
 		ch_remove_local_rx_intent(ctx, intent_ptr->id);
-
+	glink_put_ch_ctx(ctx, false);
 	return ret;
 }
 EXPORT_SYMBOL(glink_queue_rx_intent);
@@ -3061,20 +3120,25 @@
 	struct channel_ctx *ctx = (struct channel_ctx *)handle;
 	struct glink_core_rx_intent *intent;
 	unsigned long flags;
+	int ret;
 
 	if (!ctx || !ch_is_fully_opened(ctx))
 		return false;
 
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return false;
 	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
 	list_for_each_entry(intent, &ctx->local_rx_intent_list, list) {
 		if (size <= intent->intent_size) {
 			spin_unlock_irqrestore(
 				&ctx->local_rx_intent_lst_lock_lhc1, flags);
+			glink_put_ch_ctx(ctx, false);
 			return true;
 		}
 	}
 	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
-
+	glink_put_ch_ctx(ctx, false);
 	return false;
 }
 EXPORT_SYMBOL(glink_rx_intent_exists);
@@ -3095,11 +3159,15 @@
 	uint32_t id;
 	int ret = 0;
 
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 	liid_ptr = ch_get_local_rx_intent_notified(ctx, ptr);
 
 	if (IS_ERR_OR_NULL(liid_ptr)) {
 		/* invalid pointer */
 		GLINK_ERR_CH(ctx, "%s: Invalid pointer %p\n", __func__, ptr);
+		glink_put_ch_ctx(ctx, false);
 		return -EINVAL;
 	}
 
@@ -3125,7 +3193,7 @@
 	/* send rx done */
 	ctx->transport_ptr->ops->tx_cmd_local_rx_done(ctx->transport_ptr->ops,
 			ctx->lcid, id, reuse);
-
+	glink_put_ch_ctx(ctx, false);
 	return ret;
 }
 EXPORT_SYMBOL(glink_rx_done);
@@ -3173,12 +3241,13 @@
 	struct channel_ctx *ctx = (struct channel_ctx *)handle;
 	int ret;
 
-	if (!ctx)
-		return -EINVAL;
-
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
@@ -3188,6 +3257,7 @@
 			ctx->lcid, ctx->lsigs);
 	GLINK_INFO_CH(ctx, "%s: Sent SIGNAL SET command\n", __func__);
 
+	glink_put_ch_ctx(ctx, false);
 	return ret;
 }
 EXPORT_SYMBOL(glink_sigs_set);
@@ -3203,17 +3273,22 @@
 int glink_sigs_local_get(void *handle, uint32_t *sigs)
 {
 	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
 
-	if (!ctx || !sigs)
+	if (!sigs)
 		return -EINVAL;
-
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
 	*sigs = ctx->lsigs;
+	glink_put_ch_ctx(ctx, false);
 	return 0;
 }
 EXPORT_SYMBOL(glink_sigs_local_get);
@@ -3229,17 +3304,23 @@
 int glink_sigs_remote_get(void *handle, uint32_t *sigs)
 {
 	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
 
-	if (!ctx || !sigs)
+	if (!sigs)
 		return -EINVAL;
 
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
 	*sigs = ctx->rsigs;
+	glink_put_ch_ctx(ctx, false);
 	return 0;
 }
 EXPORT_SYMBOL(glink_sigs_remote_get);
@@ -3334,12 +3415,16 @@
 	int ret;
 	unsigned long req_rate_kBps;
 
-	if (!ctx || !latency_us || !pkt_size)
+	if (!latency_us || !pkt_size)
 		return -EINVAL;
 
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
@@ -3349,7 +3434,7 @@
 	if (ret < 0)
 		GLINK_ERR_CH(ctx, "%s: QoS %lu:%zu cannot be met\n",
 			     __func__, latency_us, pkt_size);
-
+	glink_put_ch_ctx(ctx, false);
 	return ret;
 }
 EXPORT_SYMBOL(glink_qos_latency);
@@ -3367,16 +3452,18 @@
 	struct channel_ctx *ctx = (struct channel_ctx *)handle;
 	int ret;
 
-	if (!ctx)
-		return -EINVAL;
-
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
 	ret = glink_qos_reset_priority(ctx);
+	glink_put_ch_ctx(ctx, false);
 	return ret;
 }
 EXPORT_SYMBOL(glink_qos_cancel);
@@ -3397,12 +3484,13 @@
 	int ret;
 	unsigned long flags;
 
-	if (!ctx)
-		return -EINVAL;
-
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
@@ -3411,6 +3499,7 @@
 	ret = glink_qos_add_ch_tx_intent(ctx);
 	spin_unlock(&ctx->tx_lists_lock_lhc3);
 	spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
+	glink_put_ch_ctx(ctx, false);
 	return ret;
 }
 EXPORT_SYMBOL(glink_qos_start);
@@ -3429,16 +3518,20 @@
 unsigned long glink_qos_get_ramp_time(void *handle, size_t pkt_size)
 {
 	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
 
-	if (!ctx)
-		return (unsigned long)-EINVAL;
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return (unsigned long)ret;
 
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return (unsigned long)-EBUSY;
 	}
 
+	glink_put_ch_ctx(ctx, false);
 	return ctx->transport_ptr->ops->get_power_vote_ramp_time(
 			ctx->transport_ptr->ops,
 			glink_prio_to_power_state(ctx->transport_ptr,
@@ -3522,12 +3615,16 @@
 int glink_wait_link_down(void *handle)
 {
 	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
 
-	if (!ctx)
-		return -EINVAL;
-	if (!ctx->transport_ptr)
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
+	if (!ctx->transport_ptr) {
+		glink_put_ch_ctx(ctx, false);
 		return -EOPNOTSUPP;
-
+	}
+	glink_put_ch_ctx(ctx, false);
 	return ctx->transport_ptr->ops->wait_link_down(ctx->transport_ptr->ops);
 }
 EXPORT_SYMBOL(glink_wait_link_down);
@@ -3826,6 +3923,10 @@
 	xprt_ptr->local_version_idx = cfg->versions_entries - 1;
 	xprt_ptr->remote_version_idx = cfg->versions_entries - 1;
 	xprt_ptr->edge_ctx = edge_name_to_ctx_create(xprt_ptr);
+	if (!xprt_ptr->edge_ctx) {
+		kfree(xprt_ptr);
+		return -ENOMEM;
+	}
 	xprt_ptr->l_features =
 			cfg->versions[cfg->versions_entries - 1].features;
 	if (!if_ptr->poll)
@@ -3865,7 +3966,6 @@
 			xprt_ptr->edge, xprt_ptr->name);
 	if (IS_ERR_OR_NULL(xprt_ptr->tx_task)) {
 		GLINK_ERR("%s: unable to run thread\n", __func__);
-		glink_core_deinit_xprt_qos_cfg(xprt_ptr);
 		kfree(xprt_ptr);
 		return -ENOMEM;
 	}
@@ -4030,6 +4130,37 @@
 	return xprt_ptr;
 }
 
+static struct channel_ctx *get_first_ch_ctx(
+	struct glink_core_xprt_ctx *xprt_ctx)
+{
+	unsigned long flags;
+	struct channel_ctx *ctx;
+
+	spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+	if (!list_empty(&xprt_ctx->channels)) {
+		ctx = list_first_entry(&xprt_ctx->channels,
+					struct channel_ctx, port_list_node);
+		rwref_get(&ctx->ch_state_lhb2);
+	} else {
+		ctx = NULL;
+	}
+	spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+	return ctx;
+}
+
+static void glink_core_move_ch_node(struct glink_core_xprt_ctx *xprt_ptr,
+	struct glink_core_xprt_ctx *dummy_xprt_ctx, struct channel_ctx *ctx)
+{
+	unsigned long flags, d_flags;
+
+	spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+	spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
+	rwref_get(&dummy_xprt_ctx->xprt_state_lhb0);
+	list_move_tail(&ctx->port_list_node, &dummy_xprt_ctx->channels);
+	spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
+	spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+}
+
 /**
  * glink_core_channel_cleanup() - cleanup all channels for the transport
  *
@@ -4040,7 +4171,7 @@
 static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr)
 {
 	unsigned long flags, d_flags;
-	struct channel_ctx *ctx, *tmp_ctx;
+	struct channel_ctx *ctx;
 	struct channel_lcid *temp_lcid, *temp_lcid1;
 	struct glink_core_xprt_ctx *dummy_xprt_ctx;
 
@@ -4049,52 +4180,39 @@
 		GLINK_ERR("%s: Dummy Transport creation failed\n", __func__);
 		return;
 	}
-
 	rwref_read_get(&dummy_xprt_ctx->xprt_state_lhb0);
 	rwref_read_get(&xprt_ptr->xprt_state_lhb0);
-	spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
-	spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
-
-	list_for_each_entry_safe(ctx, tmp_ctx, &xprt_ptr->channels,
-						port_list_node) {
+	ctx = get_first_ch_ctx(xprt_ptr);
+	while (ctx) {
 		rwref_write_get_atomic(&ctx->ch_state_lhb2, true);
 		if (ctx->local_open_state == GLINK_CHANNEL_OPENED ||
 			ctx->local_open_state == GLINK_CHANNEL_OPENING) {
-			rwref_get(&dummy_xprt_ctx->xprt_state_lhb0);
-			list_move_tail(&ctx->port_list_node,
-					&dummy_xprt_ctx->channels);
 			ctx->transport_ptr = dummy_xprt_ctx;
-			rwref_write_put(&ctx->ch_state_lhb2);
+			glink_core_move_ch_node(xprt_ptr, dummy_xprt_ctx, ctx);
 		} else {
 			/* local state is in either CLOSED or CLOSING */
-			spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1,
-							flags);
-			spin_unlock_irqrestore(
-					&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
-					d_flags);
 			glink_core_remote_close_common(ctx, true);
 			if (ctx->local_open_state == GLINK_CHANNEL_CLOSING)
 				glink_core_ch_close_ack_common(ctx, true);
 			/* Channel should be fully closed now. Delete here */
 			if (ch_is_fully_closed(ctx))
 				glink_delete_ch_from_list(ctx, false);
-			rwref_write_put(&ctx->ch_state_lhb2);
-			spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
-						d_flags);
-			spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
 		}
+		rwref_put(&ctx->ch_state_lhb2);
+		rwref_write_put(&ctx->ch_state_lhb2);
+		ctx = get_first_ch_ctx(xprt_ptr);
 	}
+	spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
 	list_for_each_entry_safe(temp_lcid, temp_lcid1,
 			&xprt_ptr->free_lcid_list, list_node) {
 		list_del(&temp_lcid->list_node);
 		kfree(&temp_lcid->list_node);
 	}
-	dummy_xprt_ctx->dummy_in_use = false;
 	spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
-	spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
 	rwref_read_put(&xprt_ptr->xprt_state_lhb0);
 
 	spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+	dummy_xprt_ctx->dummy_in_use = false;
 	while (!list_empty(&dummy_xprt_ctx->channels)) {
 		ctx = list_first_entry(&dummy_xprt_ctx->channels,
 					struct channel_ctx, port_list_node);
@@ -5275,7 +5393,7 @@
 			struct glink_core_xprt_ctx *xprt_ctx)
 {
 	unsigned long flags;
-	struct glink_core_tx_pkt *tx_info;
+	struct glink_core_tx_pkt *tx_info, *temp_tx_info;
 	size_t txd_len = 0;
 	size_t tx_len = 0;
 	uint32_t num_pkts = 0;
@@ -5310,6 +5428,20 @@
 						ctx->lcid, tx_info);
 		}
 		spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
+		if (!list_empty(&ctx->tx_active)) {
+			/*
+			 * Verify if same tx_info still exist in tx_active
+			 * list and is not removed during tx operation.
+			 * It can happen if SSR and tx done both happen
+			 * before tx_lists_lock_lhc3 is taken.
+			 */
+			temp_tx_info = list_first_entry(&ctx->tx_active,
+					struct glink_core_tx_pkt, list_node);
+			if (temp_tx_info != tx_info)
+				continue;
+		} else {
+			break;
+		}
 		if (ret == -EAGAIN) {
 			/*
 			 * transport unable to send at the moment and will call
@@ -5336,6 +5468,7 @@
 			 * Break out of the loop so that the scheduler can
 			 * continue with the next channel.
 			 */
+			rwref_put(&tx_info->pkt_ref);
 			break;
 		}
 
@@ -5343,8 +5476,8 @@
 		if (!tx_info->size_remaining) {
 			num_pkts++;
 			list_del_init(&tx_info->list_node);
-			rwref_put(&tx_info->pkt_ref);
 		}
+		rwref_put(&tx_info->pkt_ref);
 	}
 
 	ctx->txd_len += txd_len;
@@ -5393,6 +5526,7 @@
 		glink_pm_qos_vote(xprt_ptr);
 		ch_ptr = list_first_entry(&xprt_ptr->prio_bin[prio].tx_ready,
 				struct channel_ctx, tx_ready_list_node);
+		rwref_get(&ch_ptr->ch_state_lhb2);
 		spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
 
 		if (tx_ready_head == NULL || tx_ready_head_prio < prio) {
@@ -5404,6 +5538,7 @@
 			GLINK_ERR_XPRT(xprt_ptr,
 				"%s: Unable to send data on this transport.\n",
 				__func__);
+			rwref_put(&ch_ptr->ch_state_lhb2);
 			break;
 		}
 		transmitted_successfully = false;
@@ -5414,6 +5549,7 @@
 			 * transport unable to send at the moment and will call
 			 * tx_resume() when it can send again.
 			 */
+			rwref_put(&ch_ptr->ch_state_lhb2);
 			break;
 		} else if (ret < 0) {
 			/*
@@ -5426,6 +5562,7 @@
 			GLINK_ERR_XPRT(xprt_ptr,
 					"%s: unrecoverable xprt failure %d\n",
 					__func__, ret);
+			rwref_put(&ch_ptr->ch_state_lhb2);
 			break;
 		} else if (!ret) {
 			/*
@@ -5437,6 +5574,7 @@
 			list_rotate_left(&xprt_ptr->prio_bin[prio].tx_ready);
 			spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3,
 						flags);
+			rwref_put(&ch_ptr->ch_state_lhb2);
 			continue;
 		}
 
@@ -5454,6 +5592,7 @@
 
 		tx_ready_head = NULL;
 		transmitted_successfully = true;
+		rwref_put(&ch_ptr->ch_state_lhb2);
 	}
 	glink_pm_qos_unvote(xprt_ptr);
 	GLINK_PERF("%s: worker exiting\n", __func__);
@@ -6024,6 +6163,7 @@
 static int glink_init(void)
 {
 	log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "glink", 0);
+	rwlock_init(&magic_lock);
 	if (!log_ctx)
 		GLINK_ERR("%s: unable to create log context\n", __func__);
 	glink_debugfs_init();
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index ef886b2..266c0a2 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -51,7 +51,7 @@
 #define RPM_MAX_TOC_ENTRIES 20
 #define RPM_FIFO_ADDR_ALIGN_BYTES 3
 #define TRACER_PKT_FEATURE BIT(2)
-
+#define DEFERRED_CMDS_THRESHOLD 25
 /**
  * enum command_types - definition of the types of commands sent/received
  * @VERSION_CMD:		Version and feature set supported
@@ -181,6 +181,7 @@
  *				processing.
  * @deferred_cmds:		List of deferred commands that need to be
  *				processed in process context.
+ * @deferred_cmds_cnt:		Number of deferred commands in queue.
  * @num_pw_states:		Size of @ramp_time_us.
  * @ramp_time_us:		Array of ramp times in microseconds where array
  *				index position represents a power state.
@@ -212,12 +213,14 @@
 	bool tx_blocked_signal_sent;
 	struct kthread_work kwork;
 	struct kthread_worker kworker;
+	struct work_struct wakeup_work;
 	struct task_struct *task;
 	struct tasklet_struct tasklet;
 	struct srcu_struct use_ref;
 	bool in_ssr;
 	spinlock_t rx_lock;
 	struct list_head deferred_cmds;
+	uint32_t deferred_cmds_cnt;
 	uint32_t num_pw_states;
 	unsigned long *ramp_time_us;
 	struct mailbox_config_info *mailbox;
@@ -700,10 +703,15 @@
 	} else if (intent->data == NULL) {
 		if (einfo->intentless) {
 			intent->data = kmalloc(cmd.frag_size, GFP_ATOMIC);
-			if (!intent->data)
+			if (!intent->data) {
 				err = true;
-			else
+				GLINK_ERR(
+				"%s: atomic alloc fail ch %d liid %d size %d\n",
+						__func__, rcid, intent_id,
+						cmd.frag_size);
+			} else {
 				intent->intent_size = cmd.frag_size;
+			}
 		} else {
 			GLINK_ERR(
 				"%s: intent for ch %d liid %d has no data buff\n",
@@ -792,6 +800,7 @@
 	d_cmd->param2 = _cmd->param2;
 	d_cmd->data = data;
 	list_add_tail(&d_cmd->list_node, &einfo->deferred_cmds);
+	einfo->deferred_cmds_cnt++;
 	kthread_queue_work(&einfo->kworker, &einfo->kwork);
 	return true;
 }
@@ -866,20 +875,10 @@
 		srcu_read_unlock(&einfo->use_ref, rcu_id);
 		return;
 	}
-	if (!atomic_ctx) {
-		if (einfo->tx_resume_needed && fifo_write_avail(einfo)) {
-			einfo->tx_resume_needed = false;
-			einfo->xprt_if.glink_core_if_ptr->tx_resume(
-							&einfo->xprt_if);
-		}
-		spin_lock_irqsave(&einfo->write_lock, flags);
-		if (einfo->tx_blocked_signal_sent) {
-			wake_up_all(&einfo->tx_blocked_queue);
-			einfo->tx_blocked_signal_sent = false;
-		}
-		spin_unlock_irqrestore(&einfo->write_lock, flags);
-	}
 
+	if ((atomic_ctx) && ((einfo->tx_resume_needed) ||
+		(waitqueue_active(&einfo->tx_blocked_queue)))) /* tx waiting ?*/
+		schedule_work(&einfo->wakeup_work);
 
 	/*
 	 * Access to the fifo needs to be synchronized, however only the calls
@@ -898,10 +897,15 @@
 		if (einfo->in_ssr)
 			break;
 
+		if (atomic_ctx && !einfo->intentless &&
+		    einfo->deferred_cmds_cnt >= DEFERRED_CMDS_THRESHOLD)
+			break;
+
 		if (!atomic_ctx && !list_empty(&einfo->deferred_cmds)) {
 			d_cmd = list_first_entry(&einfo->deferred_cmds,
 						struct deferred_cmd, list_node);
 			list_del(&d_cmd->list_node);
+			einfo->deferred_cmds_cnt--;
 			cmd.id = d_cmd->id;
 			cmd.param1 = d_cmd->param1;
 			cmd.param2 = d_cmd->param2;
@@ -1182,6 +1186,39 @@
 }
 
 /**
+ * tx_wakeup_worker() - worker function to wakeup tx blocked thread
+ * @work:	kwork associated with the edge to process commands on.
+ */
+static void tx_wakeup_worker(struct work_struct *work)
+{
+	struct edge_info *einfo;
+	bool trigger_wakeup = false;
+	unsigned long flags;
+	int rcu_id;
+
+	einfo = container_of(work, struct edge_info, wakeup_work);
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+	if (einfo->tx_resume_needed && fifo_write_avail(einfo)) {
+		einfo->tx_resume_needed = false;
+		einfo->xprt_if.glink_core_if_ptr->tx_resume(
+						&einfo->xprt_if);
+	}
+	spin_lock_irqsave(&einfo->write_lock, flags);
+	if (waitqueue_active(&einfo->tx_blocked_queue)) { /* tx waiting ?*/
+		einfo->tx_blocked_signal_sent = false;
+		trigger_wakeup = true;
+	}
+	spin_unlock_irqrestore(&einfo->write_lock, flags);
+	if (trigger_wakeup)
+		wake_up_all(&einfo->tx_blocked_queue);
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
  * rx_worker() - worker function to process received commands
  * @work:	kwork associated with the edge to process commands on.
  */
@@ -2290,6 +2327,7 @@
 	init_waitqueue_head(&einfo->tx_blocked_queue);
 	kthread_init_work(&einfo->kwork, rx_worker);
 	kthread_init_worker(&einfo->kworker);
+	INIT_WORK(&einfo->wakeup_work, tx_wakeup_worker);
 	tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
 	einfo->read_from_fifo = read_from_fifo;
 	einfo->write_to_fifo = write_to_fifo;
@@ -2389,6 +2427,7 @@
 reg_xprt_fail:
 smem_alloc_fail:
 	kthread_flush_worker(&einfo->kworker);
+	flush_work(&einfo->wakeup_work);
 	kthread_stop(einfo->task);
 	einfo->task = NULL;
 	tasklet_kill(&einfo->tasklet);
@@ -2476,6 +2515,7 @@
 	init_waitqueue_head(&einfo->tx_blocked_queue);
 	kthread_init_work(&einfo->kwork, rx_worker);
 	kthread_init_worker(&einfo->kworker);
+	INIT_WORK(&einfo->wakeup_work, tx_wakeup_worker);
 	tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
 	einfo->intentless = true;
 	einfo->read_from_fifo = memcpy32_fromio;
@@ -2636,6 +2676,7 @@
 reg_xprt_fail:
 toc_init_fail:
 	kthread_flush_worker(&einfo->kworker);
+	flush_work(&einfo->wakeup_work);
 	kthread_stop(einfo->task);
 	einfo->task = NULL;
 	tasklet_kill(&einfo->tasklet);
@@ -2767,6 +2808,7 @@
 	init_waitqueue_head(&einfo->tx_blocked_queue);
 	kthread_init_work(&einfo->kwork, rx_worker);
 	kthread_init_worker(&einfo->kworker);
+	INIT_WORK(&einfo->wakeup_work, tx_wakeup_worker);
 	tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
 	einfo->read_from_fifo = read_from_fifo;
 	einfo->write_to_fifo = write_to_fifo;
@@ -2887,6 +2929,7 @@
 reg_xprt_fail:
 smem_alloc_fail:
 	kthread_flush_worker(&einfo->kworker);
+	flush_work(&einfo->wakeup_work);
 	kthread_stop(einfo->task);
 	einfo->task = NULL;
 	tasklet_kill(&einfo->tasklet);
diff --git a/drivers/soc/qcom/glink_spi_xprt.c b/drivers/soc/qcom/glink_spi_xprt.c
index 0ff92cd..e02c07a 100644
--- a/drivers/soc/qcom/glink_spi_xprt.c
+++ b/drivers/soc/qcom/glink_spi_xprt.c
@@ -789,9 +789,9 @@
 				offset += sizeof(*intents);
 				einfo->xprt_if.glink_core_if_ptr->
 					rx_cmd_remote_rx_intent_put_cookie(
-						&einfo->xprt_if, cmd->param1,
-						intents->id, intents->size,
-						(void *)(intents->addr));
+					&einfo->xprt_if, cmd->param1,
+					intents->id, intents->size,
+					(void *)(uintptr_t)(intents->addr));
 			}
 			break;
 
@@ -821,9 +821,10 @@
 		case TRACER_PKT_CONT_CMD:
 			rx_descp = (struct rx_desc *)(rx_data + offset);
 			offset += sizeof(*rx_descp);
-			process_rx_data(einfo, cmd->id,	cmd->param1,
-					cmd->param2, (void *)rx_descp->addr,
-					rx_descp->size,	rx_descp->size_left);
+			process_rx_data(einfo, cmd->id, cmd->param1,
+					cmd->param2,
+					(void *)(uintptr_t)(rx_descp->addr),
+					rx_descp->size, rx_descp->size_left);
 			break;
 
 		case TX_SHORT_DATA_CMD:
@@ -875,23 +876,22 @@
 	int rcu_id;
 
 	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
 	if (unlikely(!einfo->rx_fifo_start)) {
 		rx_avail = glink_spi_xprt_read_avail(einfo);
 		if (!rx_avail) {
 			srcu_read_unlock(&einfo->use_ref, rcu_id);
 			return;
 		}
-		einfo->in_ssr = false;
 		einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
 	}
 
-	if (einfo->in_ssr) {
-		srcu_read_unlock(&einfo->use_ref, rcu_id);
-		return;
-	}
-
 	glink_spi_xprt_set_poll_mode(einfo);
-	while (inactive_cycles < MAX_INACTIVE_CYCLES) {
+	do {
 		if (einfo->tx_resume_needed &&
 		    glink_spi_xprt_write_avail(einfo)) {
 			einfo->tx_resume_needed = false;
@@ -926,7 +926,7 @@
 		}
 		process_rx_cmd(einfo, rx_data, rx_avail);
 		kfree(rx_data);
-	}
+	} while (inactive_cycles < MAX_INACTIVE_CYCLES && !einfo->in_ssr);
 	glink_spi_xprt_set_irq_mode(einfo);
 	srcu_read_unlock(&einfo->use_ref, rcu_id);
 }
@@ -1818,9 +1818,16 @@
 		spi_dev = to_spi_device(sdev);
 		einfo->spi_dev = spi_dev;
 		break;
+	case WDSP_EVENT_POST_BOOTUP:
+		einfo->in_ssr = false;
+		synchronize_srcu(&einfo->use_ref);
+		/* No break here to trigger fake rx_worker */
 	case WDSP_EVENT_IPC1_INTR:
 		kthread_queue_work(&einfo->kworker, &einfo->kwork);
 		break;
+	case WDSP_EVENT_PRE_SHUTDOWN:
+		ssr(&einfo->xprt_if);
+		break;
 	default:
 		pr_debug("%s: unhandled event %d", __func__, event);
 		break;
@@ -2040,7 +2047,6 @@
 	init_xprt_cfg(einfo, subsys_name);
 	init_xprt_if(einfo);
 
-	einfo->in_ssr = true;
 	einfo->fifo_size = DEFAULT_FIFO_SIZE;
 	kthread_init_work(&einfo->kwork, rx_worker);
 	kthread_init_worker(&einfo->kworker);
diff --git a/drivers/soc/qcom/glink_ssr.c b/drivers/soc/qcom/glink_ssr.c
index f36e7fc..b24598a 100644
--- a/drivers/soc/qcom/glink_ssr.c
+++ b/drivers/soc/qcom/glink_ssr.c
@@ -80,6 +80,19 @@
 };
 
 /**
+ * struct rx_done_ch_work - Work structure used for sending rx_done on
+ *				glink_ssr channels
+ * handle:	G-Link channel handle to be used for sending rx_done
+ * ptr:		Intent pointer data provided in notify rx function
+ * work:	Work structure
+ */
+struct rx_done_ch_work {
+	void *handle;
+	const void *ptr;
+	struct work_struct work;
+};
+
+/**
  * struct close_ch_work - Work structure for used for closing glink_ssr channels
  * edge:	The G-Link edge name for the channel being closed
  * handle:	G-Link channel handle to be closed
@@ -102,6 +115,15 @@
 static atomic_t responses_remaining = ATOMIC_INIT(0);
 static wait_queue_head_t waitqueue;
 
+static void rx_done_cb_worker(struct work_struct *work)
+{
+	struct rx_done_ch_work *rx_done_work =
+		container_of(work, struct rx_done_ch_work, work);
+
+	glink_rx_done(rx_done_work->handle, rx_done_work->ptr, false);
+	kfree(rx_done_work);
+}
+
 static void link_state_cb_worker(struct work_struct *work)
 {
 	unsigned long flags;
@@ -196,7 +218,14 @@
 {
 	struct ssr_notify_data *cb_data = (struct ssr_notify_data *)priv;
 	struct cleanup_done_msg *resp = (struct cleanup_done_msg *)ptr;
+	struct rx_done_ch_work *rx_done_work;
 
+	rx_done_work = kmalloc(sizeof(*rx_done_work), GFP_ATOMIC);
+	if (!rx_done_work) {
+		GLINK_SSR_ERR("<SSR> %s: Could not allocate rx_done_work\n",
+				__func__);
+		return;
+	}
 	if (unlikely(!cb_data))
 		goto missing_cb_data;
 	if (unlikely(!cb_data->do_cleanup_data))
@@ -221,6 +250,10 @@
 
 	kfree(cb_data->do_cleanup_data);
 	cb_data->do_cleanup_data = NULL;
+	rx_done_work->ptr = ptr;
+	rx_done_work->handle = handle;
+	INIT_WORK(&rx_done_work->work, rx_done_cb_worker);
+	queue_work(glink_ssr_wq, &rx_done_work->work);
 	wake_up(&waitqueue);
 	return;
 
@@ -308,6 +341,7 @@
 	if (WARN_ON(!ss_info->cb_data))
 		return;
 	kfree(ss_info->cb_data);
+	ss_info->cb_data = NULL;
 	kfree(close_work);
 }
 
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 722127d..0b35caa 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -48,6 +48,11 @@
 #include <soc/qcom/socinfo.h>
 #include <soc/qcom/ramdump.h>
 
+#ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
+#include <net/cnss_prealloc.h>
+#endif
+
+
 #include "wlan_firmware_service_v01.h"
 
 #ifdef CONFIG_ICNSS_DEBUG
@@ -62,7 +67,7 @@
 #define WLFW_CLIENT_ID			0x4b4e454c
 #define MAX_PROP_SIZE			32
 #define NUM_LOG_PAGES			10
-#define NUM_REG_LOG_PAGES		4
+#define NUM_LOG_LONG_PAGES		4
 #define ICNSS_MAGIC			0x5abc5abc
 
 #define ICNSS_SERVICE_LOCATION_CLIENT_NAME			"ICNSS-WLAN"
@@ -77,14 +82,10 @@
 		ipc_log_string(icnss_ipc_log_context, _x);		\
 	} while (0)
 
-#ifdef CONFIG_ICNSS_DEBUG
 #define icnss_ipc_log_long_string(_x...) do {				\
 	if (icnss_ipc_log_long_context)					\
 		ipc_log_string(icnss_ipc_log_long_context, _x);		\
 	} while (0)
-#else
-#define icnss_ipc_log_long_string(_x...)
-#endif
 
 #define icnss_pr_err(_fmt, ...) do {					\
 		pr_err(_fmt, ##__VA_ARGS__);				\
@@ -110,28 +111,25 @@
 				     ##__VA_ARGS__);			\
 	} while (0)
 
-#define icnss_reg_dbg(_fmt, ...) do {				\
+#define icnss_pr_vdbg(_fmt, ...) do {					\
 		pr_debug(_fmt, ##__VA_ARGS__);				\
-		icnss_ipc_log_long_string("REG: " pr_fmt(_fmt),		\
+		icnss_ipc_log_long_string("DBG: " pr_fmt(_fmt),		\
 				     ##__VA_ARGS__);			\
 	} while (0)
 
 #ifdef CONFIG_ICNSS_DEBUG
 #define ICNSS_ASSERT(_condition) do {					\
 		if (!(_condition)) {					\
-			icnss_pr_err("ASSERT at line %d\n",		\
-				     __LINE__);				\
+			icnss_pr_err("ASSERT at line %d\n", __LINE__);	\
 			BUG_ON(1);					\
 		}							\
 	} while (0)
+
+bool ignore_qmi_timeout;
+#define ICNSS_QMI_ASSERT() ICNSS_ASSERT(ignore_qmi_timeout)
 #else
-#define ICNSS_ASSERT(_condition) do {					\
-		if (!(_condition)) {					\
-			icnss_pr_err("ASSERT at line %d\n",		\
-				     __LINE__);				\
-			WARN_ON(1);					\
-		}							\
-	} while (0)
+#define ICNSS_ASSERT(_condition) do { } while (0)
+#define ICNSS_QMI_ASSERT() do { } while (0)
 #endif
 
 enum icnss_debug_quirks {
@@ -156,10 +154,7 @@
 module_param(dynamic_feature_mask, ullong, 0600);
 
 void *icnss_ipc_log_context;
-
-#ifdef CONFIG_ICNSS_DEBUG
 void *icnss_ipc_log_long_context;
-#endif
 
 #define ICNSS_EVENT_PENDING			2989
 
@@ -181,6 +176,7 @@
 struct icnss_event_pd_service_down_data {
 	bool crashed;
 	bool fw_rejuvenate;
+	bool wdog_bite;
 };
 
 struct icnss_driver_event {
@@ -205,6 +201,7 @@
 	ICNSS_PD_RESTART,
 	ICNSS_MSA0_ASSIGNED,
 	ICNSS_WLFW_EXISTS,
+	ICNSS_WDOG_BITE,
 };
 
 struct ce_irq_list {
@@ -212,6 +209,38 @@
 	irqreturn_t (*handler)(int, void *);
 };
 
+struct icnss_vreg_info {
+	struct regulator *reg;
+	const char *name;
+	u32 min_v;
+	u32 max_v;
+	u32 load_ua;
+	unsigned long settle_delay;
+	bool required;
+};
+
+struct icnss_clk_info {
+	struct clk *handle;
+	const char *name;
+	u32 freq;
+	bool required;
+};
+
+static struct icnss_vreg_info icnss_vreg_info[] = {
+	{NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false},
+	{NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
+	{NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
+	{NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
+};
+
+#define ICNSS_VREG_INFO_SIZE		ARRAY_SIZE(icnss_vreg_info)
+
+static struct icnss_clk_info icnss_clk_info[] = {
+	{NULL, "cxo_ref_clk_pin", 0, false},
+};
+
+#define ICNSS_CLK_INFO_SIZE		ARRAY_SIZE(icnss_clk_info)
+
 struct icnss_stats {
 	struct {
 		uint32_t posted;
@@ -265,6 +294,7 @@
 	uint32_t rejuvenate_ack_req;
 	uint32_t rejuvenate_ack_resp;
 	uint32_t rejuvenate_ack_err;
+	uint32_t trigger_recovery;
 };
 
 #define MAX_NO_OF_MAC_ADDR 4
@@ -284,6 +314,8 @@
 	struct platform_device *pdev;
 	struct icnss_driver_ops *ops;
 	struct ce_irq_list ce_irq_list[ICNSS_MAX_IRQ_REGISTRATIONS];
+	struct icnss_vreg_info vreg_info[ICNSS_VREG_INFO_SIZE];
+	struct icnss_clk_info clk_info[ICNSS_CLK_INFO_SIZE];
 	u32 ce_irqs[ICNSS_MAX_IRQ_REGISTRATIONS];
 	phys_addr_t mem_base_pa;
 	void __iomem *mem_base_va;
@@ -310,8 +342,9 @@
 	u32 pwr_pin_result;
 	u32 phy_io_pin_result;
 	u32 rf_pin_result;
+	uint32_t nr_mem_region;
 	struct icnss_mem_region_info
-		icnss_mem_region[QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01];
+		mem_region[QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01];
 	struct dentry *root_dentry;
 	spinlock_t on_off_lock;
 	struct icnss_stats stats;
@@ -334,14 +367,24 @@
 	struct ramdump_device *msa0_dump_dev;
 	bool is_wlan_mac_set;
 	struct icnss_wlan_mac_addr wlan_mac_addr;
+	bool bypass_s1_smmu;
 } *penv;
 
+#ifdef CONFIG_ICNSS_DEBUG
+static void icnss_ignore_qmi_timeout(bool ignore)
+{
+	ignore_qmi_timeout = ignore;
+}
+#else
+static void icnss_ignore_qmi_timeout(bool ignore) { }
+#endif
+
 static void icnss_pm_stay_awake(struct icnss_priv *priv)
 {
 	if (atomic_inc_return(&priv->pm_count) != 1)
 		return;
 
-	icnss_pr_dbg("PM stay awake, state: 0x%lx, count: %d\n", priv->state,
+	icnss_pr_vdbg("PM stay awake, state: 0x%lx, count: %d\n", priv->state,
 		     atomic_read(&priv->pm_count));
 
 	pm_stay_awake(&priv->pdev->dev);
@@ -358,7 +401,7 @@
 	if (r != 0)
 		return;
 
-	icnss_pr_dbg("PM relax, state: 0x%lx, count: %d\n", priv->state,
+	icnss_pr_vdbg("PM relax, state: 0x%lx, count: %d\n", priv->state,
 		     atomic_read(&priv->pm_count));
 
 	pm_relax(&priv->pdev->dev);
@@ -680,41 +723,220 @@
 	return ret;
 }
 
+static int icnss_vreg_on(struct icnss_priv *priv)
+{
+	int ret = 0;
+	struct icnss_vreg_info *vreg_info;
+	int i;
+
+	for (i = 0; i < ICNSS_VREG_INFO_SIZE; i++) {
+		vreg_info = &priv->vreg_info[i];
+
+		if (!vreg_info->reg)
+			continue;
+
+		icnss_pr_vdbg("Regulator %s being enabled\n", vreg_info->name);
+
+		ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
+					    vreg_info->max_v);
+		if (ret) {
+			icnss_pr_err("Regulator %s, can't set voltage: min_v: %u, max_v: %u, ret: %d\n",
+				     vreg_info->name, vreg_info->min_v,
+				     vreg_info->max_v, ret);
+			break;
+		}
+
+		if (vreg_info->load_ua) {
+			ret = regulator_set_load(vreg_info->reg,
+						 vreg_info->load_ua);
+			if (ret < 0) {
+				icnss_pr_err("Regulator %s, can't set load: %u, ret: %d\n",
+					     vreg_info->name,
+					     vreg_info->load_ua, ret);
+				break;
+			}
+		}
+
+		ret = regulator_enable(vreg_info->reg);
+		if (ret) {
+			icnss_pr_err("Regulator %s, can't enable: %d\n",
+				     vreg_info->name, ret);
+			break;
+		}
+
+		if (vreg_info->settle_delay)
+			udelay(vreg_info->settle_delay);
+	}
+
+	if (!ret)
+		return 0;
+
+	for (; i >= 0; i--) {
+		vreg_info = &priv->vreg_info[i];
+
+		if (!vreg_info->reg)
+			continue;
+
+		regulator_disable(vreg_info->reg);
+		regulator_set_load(vreg_info->reg, 0);
+		regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
+	}
+
+	return ret;
+}
+
+static int icnss_vreg_off(struct icnss_priv *priv)
+{
+	int ret = 0;
+	struct icnss_vreg_info *vreg_info;
+	int i;
+
+	for (i = ICNSS_VREG_INFO_SIZE - 1; i >= 0; i--) {
+		vreg_info = &priv->vreg_info[i];
+
+		if (!vreg_info->reg)
+			continue;
+
+		icnss_pr_vdbg("Regulator %s being disabled\n", vreg_info->name);
+
+		ret = regulator_disable(vreg_info->reg);
+		if (ret)
+			icnss_pr_err("Regulator %s, can't disable: %d\n",
+				     vreg_info->name, ret);
+
+		ret = regulator_set_load(vreg_info->reg, 0);
+		if (ret < 0)
+			icnss_pr_err("Regulator %s, can't set load: %d\n",
+				     vreg_info->name, ret);
+
+		ret = regulator_set_voltage(vreg_info->reg, 0,
+					    vreg_info->max_v);
+		if (ret)
+			icnss_pr_err("Regulator %s, can't set voltage: %d\n",
+				     vreg_info->name, ret);
+	}
+
+	return ret;
+}
+
+static int icnss_clk_init(struct icnss_priv *priv)
+{
+	struct icnss_clk_info *clk_info;
+	int i;
+	int ret = 0;
+
+	for (i = 0; i < ICNSS_CLK_INFO_SIZE; i++) {
+		clk_info = &priv->clk_info[i];
+
+		if (!clk_info->handle)
+			continue;
+
+		icnss_pr_vdbg("Clock %s being enabled\n", clk_info->name);
+
+		if (clk_info->freq) {
+			ret = clk_set_rate(clk_info->handle, clk_info->freq);
+
+			if (ret) {
+				icnss_pr_err("Clock %s, can't set frequency: %u, ret: %d\n",
+					     clk_info->name, clk_info->freq,
+					     ret);
+				break;
+			}
+		}
+
+		ret = clk_prepare_enable(clk_info->handle);
+		if (ret) {
+			icnss_pr_err("Clock %s, can't enable: %d\n",
+				     clk_info->name, ret);
+			break;
+		}
+	}
+
+	if (ret == 0)
+		return 0;
+
+	for (; i >= 0; i--) {
+		clk_info = &priv->clk_info[i];
+
+		if (!clk_info->handle)
+			continue;
+
+		clk_disable_unprepare(clk_info->handle);
+	}
+
+	return ret;
+}
+
+static int icnss_clk_deinit(struct icnss_priv *priv)
+{
+	struct icnss_clk_info *clk_info;
+	int i;
+
+	for (i = 0; i < ICNSS_CLK_INFO_SIZE; i++) {
+		clk_info = &priv->clk_info[i];
+
+		if (!clk_info->handle)
+			continue;
+
+		icnss_pr_vdbg("Clock %s being disabled\n", clk_info->name);
+
+		clk_disable_unprepare(clk_info->handle);
+	}
+
+	return 0;
+}
+
 static int icnss_hw_power_on(struct icnss_priv *priv)
 {
 	int ret = 0;
-	unsigned long flags;
 
 	icnss_pr_dbg("HW Power on: state: 0x%lx\n", priv->state);
 
-	spin_lock_irqsave(&priv->on_off_lock, flags);
+	spin_lock(&priv->on_off_lock);
 	if (test_bit(ICNSS_POWER_ON, &priv->state)) {
-		spin_unlock_irqrestore(&priv->on_off_lock, flags);
+		spin_unlock(&priv->on_off_lock);
 		return ret;
 	}
 	set_bit(ICNSS_POWER_ON, &priv->state);
-	spin_unlock_irqrestore(&priv->on_off_lock, flags);
+	spin_unlock(&priv->on_off_lock);
 
+	ret = icnss_vreg_on(priv);
+	if (ret)
+		goto out;
+
+	ret = icnss_clk_init(priv);
+	if (ret)
+		goto vreg_off;
+
+	return ret;
+
+vreg_off:
+	icnss_vreg_off(priv);
+out:
+	clear_bit(ICNSS_POWER_ON, &priv->state);
 	return ret;
 }
 
 static int icnss_hw_power_off(struct icnss_priv *priv)
 {
 	int ret = 0;
-	unsigned long flags;
 
 	if (test_bit(HW_ALWAYS_ON, &quirks))
 		return 0;
 
 	icnss_pr_dbg("HW Power off: 0x%lx\n", priv->state);
 
-	spin_lock_irqsave(&priv->on_off_lock, flags);
+	spin_lock(&priv->on_off_lock);
 	if (!test_bit(ICNSS_POWER_ON, &priv->state)) {
-		spin_unlock_irqrestore(&priv->on_off_lock, flags);
+		spin_unlock(&priv->on_off_lock);
 		return ret;
 	}
 	clear_bit(ICNSS_POWER_ON, &priv->state);
-	spin_unlock_irqrestore(&priv->on_off_lock, flags);
+	spin_unlock(&priv->on_off_lock);
+
+	icnss_clk_deinit(priv);
+
+	ret = icnss_vreg_off(priv);
 
 	return ret;
 }
@@ -760,7 +982,7 @@
 }
 EXPORT_SYMBOL(icnss_power_off);
 
-static int icnss_map_msa_permissions(struct icnss_priv *priv, u32 index)
+static int icnss_map_msa_permissions(struct icnss_mem_region_info *mem_region)
 {
 	int ret = 0;
 	phys_addr_t addr;
@@ -773,10 +995,10 @@
 	int source_nelems = sizeof(source_vmlist)/sizeof(u32);
 	int dest_nelems = 0;
 
-	addr = priv->icnss_mem_region[index].reg_addr;
-	size = priv->icnss_mem_region[index].size;
+	addr = mem_region->reg_addr;
+	size = mem_region->size;
 
-	if (!priv->icnss_mem_region[index].secure_flag) {
+	if (!mem_region->secure_flag) {
 		dest_vmids[2] = VMID_WLAN_CE;
 		dest_nelems = 3;
 	} else {
@@ -786,19 +1008,20 @@
 	ret = hyp_assign_phys(addr, size, source_vmlist, source_nelems,
 			      dest_vmids, dest_perms, dest_nelems);
 	if (ret) {
-		icnss_pr_err("Region %u hyp_assign_phys failed IPA=%pa size=%u err=%d\n",
-			     index, &addr, size, ret);
+		icnss_pr_err("Hyperviser map failed for PA=%pa size=%u err=%d\n",
+			     &addr, size, ret);
 		goto out;
 	}
-	icnss_pr_dbg("Hypervisor map for region %u: source=%x, dest_nelems=%d, dest[0]=%x, dest[1]=%x, dest[2]=%x\n",
-		     index, source_vmlist[0], dest_nelems,
-		     dest_vmids[0], dest_vmids[1], dest_vmids[2]);
+
+	icnss_pr_dbg("Hypervisor map for source=%x, dest_nelems=%d, dest[0]=%x, dest[1]=%x, dest[2]=%x\n",
+		     source_vmlist[0], dest_nelems, dest_vmids[0],
+		     dest_vmids[1], dest_vmids[2]);
 out:
 	return ret;
 
 }
 
-static int icnss_unmap_msa_permissions(struct icnss_priv *priv, u32 index)
+static int icnss_unmap_msa_permissions(struct icnss_mem_region_info *mem_region)
 {
 	int ret = 0;
 	phys_addr_t addr;
@@ -809,9 +1032,10 @@
 	int source_nelems = 0;
 	int dest_nelems = sizeof(dest_vmids)/sizeof(u32);
 
-	addr = priv->icnss_mem_region[index].reg_addr;
-	size = priv->icnss_mem_region[index].size;
-	if (!priv->icnss_mem_region[index].secure_flag) {
+	addr = mem_region->reg_addr;
+	size = mem_region->size;
+
+	if (!mem_region->secure_flag) {
 		source_vmlist[2] = VMID_WLAN_CE;
 		source_nelems = 3;
 	} else {
@@ -822,14 +1046,13 @@
 	ret = hyp_assign_phys(addr, size, source_vmlist, source_nelems,
 			      dest_vmids, dest_perms, dest_nelems);
 	if (ret) {
-		icnss_pr_err("Region %u hyp_assign_phys failed IPA=%pa size=%u err=%d\n",
-			     index, &addr, size, ret);
+		icnss_pr_err("Hyperviser unmap failed for PA=%pa size=%u err=%d\n",
+			     &addr, size, ret);
 		goto out;
 	}
-	icnss_pr_dbg("hypervisor unmap for region %u, source_nelems=%d, source[0]=%x, source[1]=%x, source[2]=%x, dest=%x\n",
-		     index, source_nelems,
-		     source_vmlist[0], source_vmlist[1], source_vmlist[2],
-		     dest_vmids[0]);
+	icnss_pr_dbg("Hypervisor unmap for source_nelems=%d, source[0]=%x, source[1]=%x, source[2]=%x, dest=%x\n",
+		     source_nelems, source_vmlist[0], source_vmlist[1],
+		     source_vmlist[2], dest_vmids[0]);
 out:
 	return ret;
 }
@@ -837,34 +1060,37 @@
 static int icnss_setup_msa_permissions(struct icnss_priv *priv)
 {
 	int ret;
+	int i;
 
 	if (test_bit(ICNSS_MSA0_ASSIGNED, &priv->state))
 		return 0;
 
-	ret = icnss_map_msa_permissions(priv, 0);
-	if (ret)
-		return ret;
+	for (i = 0; i < priv->nr_mem_region; i++) {
 
-	ret = icnss_map_msa_permissions(priv, 1);
-	if (ret)
-		goto err_map_msa;
+		ret = icnss_map_msa_permissions(&priv->mem_region[i]);
+		if (ret)
+			goto err_unmap;
+	}
 
 	set_bit(ICNSS_MSA0_ASSIGNED, &priv->state);
 
-	return ret;
+	return 0;
 
-err_map_msa:
-	icnss_unmap_msa_permissions(priv, 0);
+err_unmap:
+	for (i--; i >= 0; i--)
+		icnss_unmap_msa_permissions(&priv->mem_region[i]);
 	return ret;
 }
 
 static void icnss_remove_msa_permissions(struct icnss_priv *priv)
 {
+	int i;
+
 	if (!test_bit(ICNSS_MSA0_ASSIGNED, &priv->state))
 		return;
 
-	icnss_unmap_msa_permissions(priv, 0);
-	icnss_unmap_msa_permissions(priv, 1);
+	for (i = 0; i < priv->nr_mem_region; i++)
+		icnss_unmap_msa_permissions(&priv->mem_region[i]);
 
 	clear_bit(ICNSS_MSA0_ASSIGNED, &priv->state);
 }
@@ -915,7 +1141,7 @@
 	icnss_pr_dbg("Receive mem_region_info_len: %d\n",
 		     resp.mem_region_info_len);
 
-	if (resp.mem_region_info_len > 2) {
+	if (resp.mem_region_info_len > QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01) {
 		icnss_pr_err("Invalid memory region length received: %d\n",
 			     resp.mem_region_info_len);
 		ret = -EINVAL;
@@ -923,24 +1149,25 @@
 	}
 
 	penv->stats.msa_info_resp++;
+	penv->nr_mem_region = resp.mem_region_info_len;
 	for (i = 0; i < resp.mem_region_info_len; i++) {
-		penv->icnss_mem_region[i].reg_addr =
+		penv->mem_region[i].reg_addr =
 			resp.mem_region_info[i].region_addr;
-		penv->icnss_mem_region[i].size =
+		penv->mem_region[i].size =
 			resp.mem_region_info[i].size;
-		penv->icnss_mem_region[i].secure_flag =
+		penv->mem_region[i].secure_flag =
 			resp.mem_region_info[i].secure_flag;
 		icnss_pr_dbg("Memory Region: %d Addr: 0x%llx Size: 0x%x Flag: 0x%08x\n",
-			 i, penv->icnss_mem_region[i].reg_addr,
-			 penv->icnss_mem_region[i].size,
-			 penv->icnss_mem_region[i].secure_flag);
+			     i, penv->mem_region[i].reg_addr,
+			     penv->mem_region[i].size,
+			     penv->mem_region[i].secure_flag);
 	}
 
 	return 0;
 
 out:
 	penv->stats.msa_info_err++;
-	ICNSS_ASSERT(false);
+	ICNSS_QMI_ASSERT();
 	return ret;
 }
 
@@ -988,7 +1215,7 @@
 
 out:
 	penv->stats.msa_ready_err++;
-	ICNSS_ASSERT(false);
+	ICNSS_QMI_ASSERT();
 	return ret;
 }
 
@@ -1051,7 +1278,7 @@
 
 out:
 	penv->stats.ind_register_err++;
-	ICNSS_ASSERT(false);
+	ICNSS_QMI_ASSERT();
 	return ret;
 }
 
@@ -1120,7 +1347,7 @@
 
 out:
 	penv->stats.cap_err++;
-	ICNSS_ASSERT(false);
+	ICNSS_QMI_ASSERT();
 	return ret;
 }
 
@@ -1181,7 +1408,7 @@
 
 out:
 	penv->stats.mode_req_err++;
-	ICNSS_ASSERT(false);
+	ICNSS_QMI_ASSERT();
 	return ret;
 }
 
@@ -1231,7 +1458,7 @@
 
 out:
 	penv->stats.cfg_req_err++;
-	ICNSS_ASSERT(false);
+	ICNSS_QMI_ASSERT();
 	return ret;
 }
 
@@ -1284,7 +1511,7 @@
 
 out:
 	penv->stats.ini_req_err++;
-	ICNSS_ASSERT(false);
+	ICNSS_QMI_ASSERT();
 	return ret;
 }
 
@@ -1339,7 +1566,7 @@
 		goto out;
 	}
 
-	if (!resp->data_valid || resp->data_len <= data_len) {
+	if (!resp->data_valid || resp->data_len < data_len) {
 		icnss_pr_err("Athdiag read data is invalid, data_valid = %u, data_len = %u\n",
 			     resp->data_valid, resp->data_len);
 		ret = -EINVAL;
@@ -1450,7 +1677,7 @@
 
 out:
 	priv->stats.rejuvenate_ack_err++;
-	ICNSS_ASSERT(false);
+	ICNSS_QMI_ASSERT();
 	return ret;
 }
 
@@ -1524,7 +1751,7 @@
 	if (!penv || !penv->wlfw_clnt)
 		return;
 
-	icnss_pr_dbg("Receiving Event in work queue context\n");
+	icnss_pr_vdbg("Receiving Event in work queue context\n");
 
 	do {
 	} while ((ret = qmi_recv_msg(penv->wlfw_clnt)) == 0);
@@ -1532,13 +1759,13 @@
 	if (ret != -ENOMSG)
 		icnss_pr_err("Error receiving message: %d\n", ret);
 
-	icnss_pr_dbg("Receiving Event completed\n");
+	icnss_pr_vdbg("Receiving Event completed\n");
 }
 
 static void icnss_qmi_wlfw_clnt_notify(struct qmi_handle *handle,
 			     enum qmi_event_type event, void *notify_priv)
 {
-	icnss_pr_dbg("QMI client notify: %d\n", event);
+	icnss_pr_vdbg("QMI client notify: %d\n", event);
 
 	if (!penv || !penv->wlfw_clnt)
 		return;
@@ -1553,11 +1780,29 @@
 	}
 }
 
+static int icnss_call_driver_uevent(struct icnss_priv *priv,
+				    enum icnss_uevent uevent, void *data)
+{
+	struct icnss_uevent_data uevent_data;
+
+	if (!priv->ops || !priv->ops->uevent)
+		return 0;
+
+	icnss_pr_dbg("Calling driver uevent state: 0x%lx, uevent: %d\n",
+		     priv->state, uevent);
+
+	uevent_data.uevent = uevent;
+	uevent_data.data = data;
+
+	return priv->ops->uevent(&priv->pdev->dev, &uevent_data);
+}
+
 static void icnss_qmi_wlfw_clnt_ind(struct qmi_handle *handle,
 			  unsigned int msg_id, void *msg,
 			  unsigned int msg_len, void *ind_cb_priv)
 {
 	struct icnss_event_pd_service_down_data *event_data;
+	struct icnss_uevent_fw_down_data fw_down_data;
 
 	if (!penv)
 		return;
@@ -1582,11 +1827,16 @@
 	case QMI_WLFW_REJUVENATE_IND_V01:
 		icnss_pr_dbg("Received Rejuvenate Indication msg_id 0x%x, state: 0x%lx\n",
 			     msg_id, penv->state);
+
+		icnss_ignore_qmi_timeout(true);
 		event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
 		if (event_data == NULL)
 			return;
 		event_data->crashed = true;
 		event_data->fw_rejuvenate = true;
+		fw_down_data.crashed = true;
+		icnss_call_driver_uevent(penv, ICNSS_UEVENT_FW_DOWN,
+					 &fw_down_data);
 		icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
 					0, event_data);
 		break;
@@ -1707,6 +1957,9 @@
 	if (!priv->ops || !priv->ops->probe)
 		return 0;
 
+	if (test_bit(ICNSS_DRIVER_PROBED, &priv->state))
+		return -EINVAL;
+
 	icnss_pr_dbg("Calling driver probe state: 0x%lx\n", priv->state);
 
 	icnss_hw_power_on(priv);
@@ -1715,6 +1968,8 @@
 	if (ret < 0) {
 		icnss_pr_err("Driver probe failed: %d, state: 0x%lx\n",
 			     ret, priv->state);
+		wcnss_prealloc_check_memory_leak();
+		wcnss_pre_alloc_reset();
 		goto out;
 	}
 
@@ -1727,17 +1982,39 @@
 	return ret;
 }
 
+static int icnss_call_driver_shutdown(struct icnss_priv *priv)
+{
+	if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state))
+		goto out;
+
+	if (!priv->ops || !priv->ops->shutdown)
+		goto out;
+
+	icnss_pr_dbg("Calling driver shutdown state: 0x%lx\n", priv->state);
+
+	priv->ops->shutdown(&priv->pdev->dev);
+
+out:
+	return 0;
+}
+
 static int icnss_pd_restart_complete(struct icnss_priv *priv)
 {
 	int ret;
 
-	clear_bit(ICNSS_PD_RESTART, &priv->state);
 	icnss_pm_relax(priv);
 
+	if (test_bit(ICNSS_WDOG_BITE, &priv->state)) {
+		icnss_call_driver_shutdown(priv);
+		clear_bit(ICNSS_WDOG_BITE, &priv->state);
+	}
+
+	clear_bit(ICNSS_PD_RESTART, &priv->state);
+
 	if (!priv->ops || !priv->ops->reinit)
 		goto out;
 
-	if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state))
+	if (!test_bit(ICNSS_DRIVER_PROBED, &priv->state))
 		goto call_probe;
 
 	icnss_pr_dbg("Calling driver reinit state: 0x%lx\n", priv->state);
@@ -1774,6 +2051,8 @@
 
 	set_bit(ICNSS_FW_READY, &penv->state);
 
+	icnss_call_driver_uevent(penv, ICNSS_UEVENT_FW_READY, NULL);
+
 	icnss_pr_info("WLAN FW is ready: 0x%lx\n", penv->state);
 
 	icnss_hw_power_off(penv);
@@ -1820,6 +2099,8 @@
 	if (ret) {
 		icnss_pr_err("Driver probe failed: %d, state: 0x%lx\n",
 			     ret, penv->state);
+		wcnss_prealloc_check_memory_leak();
+		wcnss_pre_alloc_reset();
 		goto power_off;
 	}
 
@@ -1845,6 +2126,8 @@
 		penv->ops->remove(&penv->pdev->dev);
 
 	clear_bit(ICNSS_DRIVER_PROBED, &penv->state);
+	wcnss_prealloc_check_memory_leak();
+	wcnss_pre_alloc_reset();
 
 	penv->ops = NULL;
 
@@ -1869,27 +2152,39 @@
 	penv->ops->remove(&priv->pdev->dev);
 
 	clear_bit(ICNSS_DRIVER_PROBED, &priv->state);
+	wcnss_prealloc_check_memory_leak();
+	wcnss_pre_alloc_reset();
+
+	icnss_hw_power_off(penv);
 
 	return 0;
 }
 
-static int icnss_call_driver_shutdown(struct icnss_priv *priv)
+static int icnss_fw_crashed(struct icnss_priv *priv,
+			    struct icnss_event_pd_service_down_data *event_data)
 {
-	icnss_pr_dbg("Calling driver shutdown state: 0x%lx\n", priv->state);
+	icnss_pr_dbg("FW crashed, state: 0x%lx, wdog_bite: %d\n",
+		     priv->state, event_data->wdog_bite);
 
 	set_bit(ICNSS_PD_RESTART, &priv->state);
 	clear_bit(ICNSS_FW_READY, &priv->state);
 
 	icnss_pm_stay_awake(priv);
 
-	if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state))
-		return 0;
+	if (test_bit(ICNSS_DRIVER_PROBED, &priv->state))
+		icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_CRASHED, NULL);
 
-	if (!priv->ops || !priv->ops->shutdown)
-		return 0;
+	if (event_data->wdog_bite) {
+		set_bit(ICNSS_WDOG_BITE, &priv->state);
+		goto out;
+	}
 
-	priv->ops->shutdown(&priv->pdev->dev);
+	icnss_call_driver_shutdown(priv);
 
+	if (event_data->fw_rejuvenate)
+		wlfw_rejuvenate_ack_send_sync_msg(priv);
+
+out:
 	return 0;
 }
 
@@ -1900,7 +2195,7 @@
 	struct icnss_event_pd_service_down_data *event_data = data;
 
 	if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state))
-		return 0;
+		goto out;
 
 	if (test_bit(ICNSS_PD_RESTART, &priv->state)) {
 		icnss_pr_err("PD Down while recovery inprogress, crashed: %d, state: 0x%lx\n",
@@ -1910,18 +2205,15 @@
 	}
 
 	if (event_data->crashed)
-		icnss_call_driver_shutdown(priv);
+		icnss_fw_crashed(priv, event_data);
 	else
 		icnss_call_driver_remove(priv);
 
-	if (event_data->fw_rejuvenate)
-		wlfw_rejuvenate_ack_send_sync_msg(priv);
-
 out:
-	ret = icnss_hw_power_off(priv);
-
 	kfree(data);
 
+	icnss_ignore_qmi_timeout(false);
+
 	return ret;
 }
 
@@ -2046,8 +2338,9 @@
 	struct notif_data *notif = data;
 	struct icnss_priv *priv = container_of(nb, struct icnss_priv,
 					       modem_ssr_nb);
+	struct icnss_uevent_fw_down_data fw_down_data;
 
-	icnss_pr_dbg("Modem-Notify: event %lu\n", code);
+	icnss_pr_vdbg("Modem-Notify: event %lu\n", code);
 
 	if (code == SUBSYS_AFTER_SHUTDOWN &&
 		notif->crashed == CRASH_STATUS_ERR_FATAL) {
@@ -2063,7 +2356,10 @@
 	if (test_bit(ICNSS_PDR_ENABLED, &priv->state))
 		return NOTIFY_OK;
 
-	icnss_pr_info("Modem went down, state: %lx\n", priv->state);
+	icnss_pr_info("Modem went down, state: 0x%lx, crashed: %d\n",
+		      priv->state, notif->crashed);
+
+	icnss_ignore_qmi_timeout(true);
 
 	event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
 
@@ -2072,6 +2368,12 @@
 
 	event_data->crashed = notif->crashed;
 
+	if (notif->crashed == CRASH_STATUS_WDOG_BITE)
+		event_data->wdog_bite = true;
+
+	fw_down_data.crashed = !!notif->crashed;
+	icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN, &fw_down_data);
+
 	icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
 				ICNSS_EVENT_SYNC, event_data);
 
@@ -2135,31 +2437,47 @@
 					       service_notifier_nb);
 	enum pd_subsys_state *state = data;
 	struct icnss_event_pd_service_down_data *event_data;
+	struct icnss_uevent_fw_down_data fw_down_data;
 
-	switch (notification) {
-	case SERVREG_NOTIF_SERVICE_STATE_DOWN_V01:
-		icnss_pr_info("Service down, data: 0x%p, state: 0x%lx\n", data,
-			      priv->state);
-		event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
+	icnss_pr_dbg("PD service notification: 0x%lx state: 0x%lx\n",
+		     notification, priv->state);
 
-		if (event_data == NULL)
-			return notifier_from_errno(-ENOMEM);
+	if (notification != SERVREG_NOTIF_SERVICE_STATE_DOWN_V01)
+		goto done;
 
-		if (state == NULL || *state != ROOT_PD_SHUTDOWN)
-			event_data->crashed = true;
+	event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
 
-		icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
-					ICNSS_EVENT_SYNC, event_data);
-		break;
-	case SERVREG_NOTIF_SERVICE_STATE_UP_V01:
-		icnss_pr_dbg("Service up, state: 0x%lx\n", priv->state);
-		break;
-	default:
-		icnss_pr_dbg("Service state Unknown, notification: 0x%lx, state: 0x%lx\n",
-			     notification, priv->state);
-		return NOTIFY_DONE;
+	if (event_data == NULL)
+		return notifier_from_errno(-ENOMEM);
+
+	if (state == NULL) {
+		event_data->crashed = true;
+		goto event_post;
 	}
 
+	icnss_pr_info("PD service down, pd_state: %d, state: 0x%lx\n",
+		      *state, priv->state);
+
+	switch (*state) {
+	case ROOT_PD_WDOG_BITE:
+		event_data->crashed = true;
+		event_data->wdog_bite = true;
+		break;
+	case ROOT_PD_SHUTDOWN:
+		break;
+	default:
+		event_data->crashed = true;
+		break;
+	}
+
+event_post:
+	icnss_ignore_qmi_timeout(true);
+
+	fw_down_data.crashed = event_data->crashed;
+	icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN, &fw_down_data);
+	icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
+				ICNSS_EVENT_SYNC, event_data);
+done:
 	return NOTIFY_OK;
 }
 
@@ -2265,7 +2583,7 @@
 
 	return 0;
 out:
-	icnss_pr_err("PD restart not enabled: %d\n", ret);
+	icnss_pr_err("Failed to enable PD restart: %d\n", ret);
 	return ret;
 
 }
@@ -2375,7 +2693,7 @@
 		goto out;
 	}
 
-	icnss_pr_dbg("CE request IRQ: %d, state: 0x%lx\n", ce_id, penv->state);
+	icnss_pr_vdbg("CE request IRQ: %d, state: 0x%lx\n", ce_id, penv->state);
 
 	if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
 		icnss_pr_err("Invalid CE ID, ce_id: %d\n", ce_id);
@@ -2401,7 +2719,7 @@
 	irq_entry->irq = irq;
 	irq_entry->handler = handler;
 
-	icnss_pr_dbg("IRQ requested: %d, ce_id: %d\n", irq, ce_id);
+	icnss_pr_vdbg("IRQ requested: %d, ce_id: %d\n", irq, ce_id);
 
 	penv->stats.ce_irqs[ce_id].request++;
 out:
@@ -2420,7 +2738,7 @@
 		goto out;
 	}
 
-	icnss_pr_dbg("CE free IRQ: %d, state: 0x%lx\n", ce_id, penv->state);
+	icnss_pr_vdbg("CE free IRQ: %d, state: 0x%lx\n", ce_id, penv->state);
 
 	if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
 		icnss_pr_err("Invalid CE ID to free, ce_id: %d\n", ce_id);
@@ -2454,7 +2772,7 @@
 		return;
 	}
 
-	icnss_pr_dbg("Enable IRQ: ce_id: %d, state: 0x%lx\n", ce_id,
+	icnss_pr_vdbg("Enable IRQ: ce_id: %d, state: 0x%lx\n", ce_id,
 		     penv->state);
 
 	if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
@@ -2478,7 +2796,7 @@
 		return;
 	}
 
-	icnss_pr_dbg("Disable IRQ: ce_id: %d, state: 0x%lx\n", ce_id,
+	icnss_pr_vdbg("Disable IRQ: ce_id: %d, state: 0x%lx\n", ce_id,
 		     penv->state);
 
 	if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
@@ -2878,12 +3196,25 @@
 		goto out;
 	}
 
-	if (!priv->service_notifier[0].handle) {
-		icnss_pr_err("Invalid handle during recovery\n");
+	if (!test_bit(ICNSS_PDR_ENABLED, &priv->state)) {
+		icnss_pr_err("PD restart not enabled to trigger recovery: state: 0x%lx\n",
+			     priv->state);
+		ret = -EOPNOTSUPP;
+		goto out;
+	}
+
+	if (!priv->service_notifier || !priv->service_notifier[0].handle) {
+		icnss_pr_err("Invalid handle during recovery, state: 0x%lx\n",
+			     priv->state);
 		ret = -EINVAL;
 		goto out;
 	}
 
+	WARN_ON(1);
+	icnss_pr_warn("Initiate PD restart at WLAN FW, state: 0x%lx\n",
+		      priv->state);
+	priv->stats.trigger_recovery++;
+
 	/*
 	 * Initiate PDR, required only for the first instance
 	 */
@@ -2914,13 +3245,15 @@
 		goto map_fail;
 	}
 
-	ret = iommu_domain_set_attr(mapping->domain,
-				    DOMAIN_ATTR_ATOMIC,
-				    &atomic_ctx);
-	if (ret < 0) {
-		icnss_pr_err("Set atomic_ctx attribute failed, err = %d\n",
-			     ret);
-		goto set_attr_fail;
+	if (!priv->bypass_s1_smmu) {
+		ret = iommu_domain_set_attr(mapping->domain,
+					    DOMAIN_ATTR_ATOMIC,
+					    &atomic_ctx);
+		if (ret < 0) {
+			icnss_pr_err("Set atomic_ctx attribute failed, err = %d\n",
+				     ret);
+			goto set_attr_fail;
+		}
 	}
 
 	ret = iommu_domain_set_attr(mapping->domain,
@@ -2959,6 +3292,114 @@
 	priv->smmu_mapping = NULL;
 }
 
+static int icnss_get_vreg_info(struct device *dev,
+			       struct icnss_vreg_info *vreg_info)
+{
+	int ret = 0;
+	char prop_name[MAX_PROP_SIZE];
+	struct regulator *reg;
+	const __be32 *prop;
+	int len = 0;
+	int i;
+
+	reg = devm_regulator_get_optional(dev, vreg_info->name);
+	if (PTR_ERR(reg) == -EPROBE_DEFER) {
+		icnss_pr_err("EPROBE_DEFER for regulator: %s\n",
+			     vreg_info->name);
+		ret = PTR_ERR(reg);
+		goto out;
+	}
+
+	if (IS_ERR(reg)) {
+		ret = PTR_ERR(reg);
+
+		if (vreg_info->required) {
+			icnss_pr_err("Regulator %s doesn't exist: %d\n",
+				     vreg_info->name, ret);
+			goto out;
+		} else {
+			icnss_pr_dbg("Optional regulator %s doesn't exist: %d\n",
+				     vreg_info->name, ret);
+			goto done;
+		}
+	}
+
+	vreg_info->reg = reg;
+
+	snprintf(prop_name, MAX_PROP_SIZE,
+		 "qcom,%s-config", vreg_info->name);
+
+	prop = of_get_property(dev->of_node, prop_name, &len);
+
+	icnss_pr_dbg("Got regulator config, prop: %s, len: %d\n",
+		     prop_name, len);
+
+	if (!prop || len < (2 * sizeof(__be32))) {
+		icnss_pr_dbg("Property %s %s\n", prop_name,
+			     prop ? "invalid format" : "doesn't exist");
+		goto done;
+	}
+
+	for (i = 0; (i * sizeof(__be32)) < len; i++) {
+		switch (i) {
+		case 0:
+			vreg_info->min_v = be32_to_cpup(&prop[0]);
+			break;
+		case 1:
+			vreg_info->max_v = be32_to_cpup(&prop[1]);
+			break;
+		case 2:
+			vreg_info->load_ua = be32_to_cpup(&prop[2]);
+			break;
+		case 3:
+			vreg_info->settle_delay = be32_to_cpup(&prop[3]);
+			break;
+		default:
+			icnss_pr_dbg("Property %s, ignoring value at %d\n",
+				     prop_name, i);
+			break;
+		}
+	}
+
+done:
+	icnss_pr_dbg("Regulator: %s, min_v: %u, max_v: %u, load: %u, delay: %lu\n",
+		     vreg_info->name, vreg_info->min_v, vreg_info->max_v,
+		     vreg_info->load_ua, vreg_info->settle_delay);
+
+	return 0;
+
+out:
+	return ret;
+}
+
+static int icnss_get_clk_info(struct device *dev,
+			      struct icnss_clk_info *clk_info)
+{
+	struct clk *handle;
+	int ret = 0;
+
+	handle = devm_clk_get(dev, clk_info->name);
+	if (IS_ERR(handle)) {
+		ret = PTR_ERR(handle);
+		if (clk_info->required) {
+			icnss_pr_err("Clock %s isn't available: %d\n",
+				     clk_info->name, ret);
+			goto out;
+		} else {
+			icnss_pr_dbg("Ignoring clock %s: %d\n", clk_info->name,
+				     ret);
+			ret = 0;
+			goto out;
+		}
+	}
+
+	icnss_pr_dbg("Clock: %s, freq: %u\n", clk_info->name, clk_info->freq);
+
+	clk_info->handle = handle;
+out:
+	return ret;
+}
+
 static int icnss_fw_debug_show(struct seq_file *s, void *data)
 {
 	struct icnss_priv *priv = s->private;
@@ -2969,6 +3410,7 @@
 	seq_puts(s, "  VAL: 0 (Test mode disable)\n");
 	seq_puts(s, "  VAL: 1 (WLAN FW test)\n");
 	seq_puts(s, "  VAL: 2 (CCPM test)\n");
+	seq_puts(s, "  VAL: 3 (Trigger Recovery)\n");
 
 	seq_puts(s, "\nCMD: dynamic_feature_mask\n");
 	seq_puts(s, "  VAL: (64 bit feature mask)\n");
@@ -3223,6 +3665,9 @@
 		case ICNSS_WLFW_EXISTS:
 			seq_puts(s, "WLAN FW EXISTS");
 			continue;
+		case ICNSS_WDOG_BITE:
+			seq_puts(s, "MODEM WDOG BITE");
+			continue;
 		}
 
 		seq_printf(s, "UNKNOWN-%d", i);
@@ -3321,6 +3766,7 @@
 	ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_req);
 	ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_resp);
 	ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_err);
+	ICNSS_STATS_DUMP(s, priv, trigger_recovery);
 
 	seq_puts(s, "\n<------------------ PM stats ------------------->\n");
 	ICNSS_STATS_DUMP(s, priv, pm_suspend);
@@ -3666,6 +4112,26 @@
 	if (ret == -EPROBE_DEFER)
 		goto out;
 
+	memcpy(priv->vreg_info, icnss_vreg_info, sizeof(icnss_vreg_info));
+	for (i = 0; i < ICNSS_VREG_INFO_SIZE; i++) {
+		ret = icnss_get_vreg_info(dev, &priv->vreg_info[i]);
+
+		if (ret)
+			goto out;
+	}
+
+	memcpy(priv->clk_info, icnss_clk_info, sizeof(icnss_clk_info));
+	for (i = 0; i < ICNSS_CLK_INFO_SIZE; i++) {
+		ret = icnss_get_clk_info(dev, &priv->clk_info[i]);
+		if (ret)
+			goto out;
+	}
+
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,smmu-s1-bypass"))
+		priv->bypass_s1_smmu = true;
+
+	icnss_pr_dbg("SMMU S1 BYPASS = %d\n", priv->bypass_s1_smmu);
+
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
 	if (!res) {
 		icnss_pr_err("Memory base not found in DT\n");
@@ -3830,7 +4296,7 @@
 		return -EINVAL;
 	}
 
-	icnss_pr_dbg("PM Suspend, state: 0x%lx\n", priv->state);
+	icnss_pr_vdbg("PM Suspend, state: 0x%lx\n", priv->state);
 
 	if (!priv->ops || !priv->ops->pm_suspend ||
 	    !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
@@ -3859,7 +4325,7 @@
 		return -EINVAL;
 	}
 
-	icnss_pr_dbg("PM resume, state: 0x%lx\n", priv->state);
+	icnss_pr_vdbg("PM resume, state: 0x%lx\n", priv->state);
 
 	if (!priv->ops || !priv->ops->pm_resume ||
 	    !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
@@ -3888,7 +4354,7 @@
 		return -EINVAL;
 	}
 
-	icnss_pr_dbg("PM suspend_noirq, state: 0x%lx\n", priv->state);
+	icnss_pr_vdbg("PM suspend_noirq, state: 0x%lx\n", priv->state);
 
 	if (!priv->ops || !priv->ops->suspend_noirq ||
 	    !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
@@ -3917,7 +4383,7 @@
 		return -EINVAL;
 	}
 
-	icnss_pr_dbg("PM resume_noirq, state: 0x%lx\n", priv->state);
+	icnss_pr_vdbg("PM resume_noirq, state: 0x%lx\n", priv->state);
 
 	if (!priv->ops || !priv->ops->resume_noirq ||
 	    !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
@@ -3961,26 +4427,6 @@
 	},
 };
 
-#ifdef CONFIG_ICNSS_DEBUG
-static void __init icnss_ipc_log_long_context_init(void)
-{
-	icnss_ipc_log_long_context = ipc_log_context_create(NUM_REG_LOG_PAGES,
-							   "icnss_long", 0);
-	if (!icnss_ipc_log_long_context)
-		icnss_pr_err("Unable to create register log context\n");
-}
-
-static void __exit icnss_ipc_log_long_context_destroy(void)
-{
-	ipc_log_context_destroy(icnss_ipc_log_long_context);
-	icnss_ipc_log_long_context = NULL;
-}
-#else
-
-static void __init icnss_ipc_log_long_context_init(void) { }
-static void __exit icnss_ipc_log_long_context_destroy(void) { }
-#endif
-
 static int __init icnss_initialize(void)
 {
 	icnss_ipc_log_context = ipc_log_context_create(NUM_LOG_PAGES,
@@ -3988,7 +4434,10 @@
 	if (!icnss_ipc_log_context)
 		icnss_pr_err("Unable to create log context\n");
 
-	icnss_ipc_log_long_context_init();
+	icnss_ipc_log_long_context = ipc_log_context_create(NUM_LOG_LONG_PAGES,
+						       "icnss_long", 0);
+	if (!icnss_ipc_log_long_context)
+		icnss_pr_err("Unable to create log long context\n");
 
 	return platform_driver_register(&icnss_driver);
 }
@@ -3998,8 +4447,8 @@
 	platform_driver_unregister(&icnss_driver);
 	ipc_log_context_destroy(icnss_ipc_log_context);
 	icnss_ipc_log_context = NULL;
-
-	icnss_ipc_log_long_context_destroy();
+	ipc_log_context_destroy(icnss_ipc_log_long_context);
+	icnss_ipc_log_long_context = NULL;
 }
 
 
diff --git a/drivers/soc/qcom/ipc_router_glink_xprt.c b/drivers/soc/qcom/ipc_router_glink_xprt.c
index 9a9d73b..458e39d 100644
--- a/drivers/soc/qcom/ipc_router_glink_xprt.c
+++ b/drivers/soc/qcom/ipc_router_glink_xprt.c
@@ -43,8 +43,14 @@
 #define MIN_FRAG_SZ (IPC_ROUTER_HDR_SIZE + sizeof(union rr_control_msg))
 #define IPC_RTR_XPRT_NAME_LEN (2 * GLINK_NAME_SIZE)
 #define PIL_SUBSYSTEM_NAME_LEN 32
-#define DEFAULT_NUM_INTENTS 5
-#define DEFAULT_RX_INTENT_SIZE 2048
+
+#define MAX_NUM_LO_INTENTS 5
+#define MAX_NUM_MD_INTENTS 3
+#define MAX_NUM_HI_INTENTS 2
+#define LO_RX_INTENT_SIZE 2048
+#define MD_RX_INTENT_SIZE 8192
+#define HI_RX_INTENT_SIZE (17 * 1024)
+
 /**
  * ipc_router_glink_xprt - IPC Router's GLINK XPRT structure
  * @list: IPC router's GLINK XPRT list.
@@ -82,6 +88,9 @@
 	unsigned int xprt_version;
 	unsigned int xprt_option;
 	bool disable_pil_loading;
+	uint32_t cur_lo_intents_cnt;
+	uint32_t cur_md_intents_cnt;
+	uint32_t cur_hi_intents_cnt;
 };
 
 struct ipc_router_glink_xprt_work {
@@ -342,7 +351,7 @@
 	}
 
 	D("%s %zu bytes @ %p\n", __func__, rx_work->iovec_size, rx_work->iovec);
-	if (rx_work->iovec_size <= DEFAULT_RX_INTENT_SIZE)
+	if (rx_work->iovec_size <= HI_RX_INTENT_SIZE)
 		reuse_intent = true;
 
 	pkt = glink_xprt_copy_data(rx_work);
@@ -371,9 +380,14 @@
 				IPC_ROUTER_XPRT_EVENT_OPEN, NULL);
 	D("%s: Notified IPC Router of %s OPEN\n",
 	  __func__, glink_xprtp->xprt.name);
-	for (i = 0; i < DEFAULT_NUM_INTENTS; i++)
+	glink_xprtp->cur_lo_intents_cnt = 0;
+	glink_xprtp->cur_md_intents_cnt = 0;
+	glink_xprtp->cur_hi_intents_cnt = 0;
+	for (i = 0; i < MAX_NUM_LO_INTENTS; i++) {
 		glink_queue_rx_intent(glink_xprtp->ch_hndl, (void *)glink_xprtp,
-				      DEFAULT_RX_INTENT_SIZE);
+				      LO_RX_INTENT_SIZE);
+		glink_xprtp->cur_lo_intents_cnt++;
+	}
 	kfree(xprt_work);
 }
 
@@ -394,13 +408,32 @@
 
 static void glink_xprt_qrx_intent_worker(struct work_struct *work)
 {
+	size_t sz;
 	struct queue_rx_intent_work *qrx_intent_work =
 		container_of(work, struct queue_rx_intent_work, work);
 	struct ipc_router_glink_xprt *glink_xprtp =
 					qrx_intent_work->glink_xprtp;
+	uint32_t *cnt = NULL;
+	int ret;
 
-	glink_queue_rx_intent(glink_xprtp->ch_hndl, (void *)glink_xprtp,
-			      qrx_intent_work->intent_size);
+	sz = qrx_intent_work->intent_size;
+	if (sz <= MD_RX_INTENT_SIZE) {
+		if (glink_xprtp->cur_md_intents_cnt >= MAX_NUM_MD_INTENTS)
+			goto qrx_intent_worker_out;
+		sz = MD_RX_INTENT_SIZE;
+		cnt = &glink_xprtp->cur_md_intents_cnt;
+	} else if (sz <= HI_RX_INTENT_SIZE) {
+		if (glink_xprtp->cur_hi_intents_cnt >= MAX_NUM_HI_INTENTS)
+			goto qrx_intent_worker_out;
+		sz = HI_RX_INTENT_SIZE;
+		cnt = &glink_xprtp->cur_hi_intents_cnt;
+	}
+
+	ret = glink_queue_rx_intent(glink_xprtp->ch_hndl, (void *)glink_xprtp,
+					sz);
+	if (!ret && cnt)
+		(*cnt)++;
+qrx_intent_worker_out:
 	kfree(qrx_intent_work);
 }
 
@@ -470,7 +503,7 @@
 	struct ipc_router_glink_xprt *glink_xprtp =
 		(struct ipc_router_glink_xprt *)priv;
 
-	if (sz <= DEFAULT_RX_INTENT_SIZE)
+	if (sz <= LO_RX_INTENT_SIZE)
 		return true;
 
 	qrx_intent_work = kmalloc(sizeof(struct queue_rx_intent_work),
diff --git a/drivers/soc/qcom/llcc-core.c b/drivers/soc/qcom/llcc-core.c
index 2c9d0a0..3d6b002 100644
--- a/drivers/soc/qcom/llcc-core.c
+++ b/drivers/soc/qcom/llcc-core.c
@@ -20,7 +20,6 @@
 #include <linux/regmap.h>
 
 /* Config registers offsets*/
-#define COMMON_CFG0		0x00030004
 #define DRP_ECC_ERROR_CFG	0x00040000
 
 /* TRP, DRP interrupt register offsets */
@@ -29,52 +28,44 @@
 #define TRP_INTERRUPT_0_ENABLE		0x00020488
 #define DRP_INTERRUPT_ENABLE		0x0004100C
 
-#define DATA_RAM_ECC_ENABLE	0x1
 #define SB_ERROR_THRESHOLD	0x1
 #define SB_ERROR_THRESHOLD_SHIFT	24
 #define SB_DB_TRP_INTERRUPT_ENABLE	0x3
 #define TRP0_INTERRUPT_ENABLE	0x1
 #define DRP0_INTERRUPT_ENABLE	BIT(6)
-#define COMMON_INTERRUPT_0_AMON BIT(8)
 #define SB_DB_DRP_INTERRUPT_ENABLE	0x3
 
-static void qcom_llcc_core_setup(struct regmap *llcc_regmap)
+static void qcom_llcc_core_setup(struct regmap *llcc_regmap, uint32_t b_off)
 {
 	u32 sb_err_threshold;
 
 	/* Enable TRP in instance 2 of common interrupt enable register */
-	regmap_update_bits(llcc_regmap, CMN_INTERRUPT_2_ENABLE,
+	regmap_update_bits(llcc_regmap, b_off + CMN_INTERRUPT_2_ENABLE,
 			   TRP0_INTERRUPT_ENABLE, TRP0_INTERRUPT_ENABLE);
 
 	/* Enable ECC interrupts on Tag Ram */
-	regmap_update_bits(llcc_regmap, TRP_INTERRUPT_0_ENABLE,
+	regmap_update_bits(llcc_regmap, b_off + TRP_INTERRUPT_0_ENABLE,
 		SB_DB_TRP_INTERRUPT_ENABLE, SB_DB_TRP_INTERRUPT_ENABLE);
 
-	/* Enable ECC for for data ram */
-	regmap_update_bits(llcc_regmap, COMMON_CFG0,
-				DATA_RAM_ECC_ENABLE, DATA_RAM_ECC_ENABLE);
-
 	/* Enable SB error for Data RAM */
 	sb_err_threshold = (SB_ERROR_THRESHOLD << SB_ERROR_THRESHOLD_SHIFT);
-	regmap_write(llcc_regmap, DRP_ECC_ERROR_CFG, sb_err_threshold);
+	regmap_write(llcc_regmap, b_off + DRP_ECC_ERROR_CFG, sb_err_threshold);
 
 	/* Enable DRP in instance 2 of common interrupt enable register */
-	regmap_update_bits(llcc_regmap, CMN_INTERRUPT_2_ENABLE,
+	regmap_update_bits(llcc_regmap, b_off + CMN_INTERRUPT_2_ENABLE,
 			   DRP0_INTERRUPT_ENABLE, DRP0_INTERRUPT_ENABLE);
 
 	/* Enable ECC interrupts on Data Ram */
-	regmap_write(llcc_regmap, DRP_INTERRUPT_ENABLE,
+	regmap_write(llcc_regmap, b_off + DRP_INTERRUPT_ENABLE,
 		     SB_DB_DRP_INTERRUPT_ENABLE);
-
-	/* Enable AMON interrupt in the common interrupt register */
-	regmap_update_bits(llcc_regmap, CMN_INTERRUPT_0_ENABLE,
-			COMMON_INTERRUPT_0_AMON, COMMON_INTERRUPT_0_AMON);
 }
 
 static int qcom_llcc_core_probe(struct platform_device *pdev)
 {
 	struct regmap *llcc_regmap;
 	struct device *dev = &pdev->dev;
+	u32 b_off = 0;
+	int ret = 0;
 
 	llcc_regmap = syscon_node_to_regmap(dev->of_node);
 
@@ -83,7 +74,14 @@
 		return PTR_ERR(llcc_regmap);
 	}
 
-	qcom_llcc_core_setup(llcc_regmap);
+	ret = of_property_read_u32(dev->of_node,
+			"qcom,llcc-broadcast-off", &b_off);
+	if (ret) {
+		dev_err(&pdev->dev, "Unable to read broadcast-off\n");
+		return -EINVAL;
+	}
+
+	qcom_llcc_core_setup(llcc_regmap, b_off);
 
 	return 0;
 }
diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c
index 501b902..5ca0941 100644
--- a/drivers/soc/qcom/llcc-slice.c
+++ b/drivers/soc/qcom/llcc-slice.c
@@ -44,13 +44,12 @@
 
 #define CACHE_LINE_SIZE_SHIFT 6
 #define SIZE_PER_LLCC_SHIFT   2
+
 #define MAX_CAP_TO_BYTES(n) (n * 1024)
 #define LLCC_TRP_ACT_CTRLn(n) (n * 0x1000)
 #define LLCC_TRP_STATUSn(n)   (4 + n * 0x1000)
 #define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + 0x8 * n)
 #define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + 0x8 * n)
-#define LLCC_TRP_PCB_ACT       0x23204
-#define LLCC_TRP_SCID_DIS_CAP_ALLOC 0x23200
 
 /**
  * Driver data for llcc
@@ -65,6 +64,7 @@
 	struct mutex slice_mutex;
 	u32 llcc_config_data_sz;
 	u32 max_slices;
+	u32 b_off;
 	unsigned long *llcc_slice_map;
 };
 
@@ -174,8 +174,8 @@
 	u32 slice_status;
 	unsigned long timeout;
 
-	act_ctrl_reg = LLCC_TRP_ACT_CTRLn(sid);
-	status_reg = LLCC_TRP_STATUSn(sid);
+	act_ctrl_reg = drv->b_off + LLCC_TRP_ACT_CTRLn(sid);
+	status_reg = drv->b_off + LLCC_TRP_STATUSn(sid);
 
 	regmap_write(drv->llcc_map, act_ctrl_reg, act_ctrl_reg_val);
 
@@ -320,20 +320,19 @@
 	u32 attr0_cfg;
 	u32 attr1_val;
 	u32 attr0_val;
-	u32 pcb = 0;
-	u32 cad = 0;
 	u32 max_cap_cacheline;
 	u32 sz;
 	const struct llcc_slice_config *llcc_table;
 	struct llcc_drv_data *drv = platform_get_drvdata(pdev);
 	struct llcc_slice_desc desc;
+	u32 b_off = drv->b_off;
 
 	sz = drv->llcc_config_data_sz;
 	llcc_table = drv->slice_data;
 
 	for (i = 0; i < sz; i++) {
-		attr1_cfg = LLCC_TRP_ATTR1_CFGn(llcc_table[i].slice_id);
-		attr0_cfg = LLCC_TRP_ATTR0_CFGn(llcc_table[i].slice_id);
+		attr1_cfg = b_off + LLCC_TRP_ATTR1_CFGn(llcc_table[i].slice_id);
+		attr0_cfg = b_off + LLCC_TRP_ATTR0_CFGn(llcc_table[i].slice_id);
 
 		attr1_val = llcc_table[i].cache_mode;
 		attr1_val |= (llcc_table[i].probe_target_ways <<
@@ -358,14 +357,6 @@
 		regmap_write(drv->llcc_map, attr1_cfg, attr1_val);
 		regmap_write(drv->llcc_map, attr0_cfg, attr0_val);
 
-		/* Write the retain on power collapse bit for each scid */
-		pcb |= llcc_table[i].retain_on_pc << llcc_table[i].slice_id;
-		regmap_write(drv->llcc_map, LLCC_TRP_PCB_ACT, pcb);
-
-		/* Disable capacity alloc */
-		cad |= llcc_table[i].dis_cap_alloc << llcc_table[i].slice_id;
-		regmap_write(drv->llcc_map, LLCC_TRP_SCID_DIS_CAP_ALLOC, cad);
-
 		/* Make sure that the SCT is programmed before activating */
 		mb();
 
@@ -398,7 +389,15 @@
 	rc = of_property_read_u32(pdev->dev.of_node, "max-slices",
 				  &drv_data->max_slices);
 	if (rc) {
-		dev_info(&pdev->dev, "Invalid max-slices dt entry\n");
+		dev_err(&pdev->dev, "Invalid max-slices dt entry\n");
+		devm_kfree(&pdev->dev, drv_data);
+		return rc;
+	}
+
+	rc = of_property_read_u32(pdev->dev.parent->of_node,
+			"qcom,llcc-broadcast-off", &drv_data->b_off);
+	if (rc) {
+		dev_err(&pdev->dev, "Invalid qcom,broadcast-off entry\n");
 		devm_kfree(&pdev->dev, drv_data);
 		return rc;
 	}
diff --git a/drivers/soc/qcom/msm-core.c b/drivers/soc/qcom/msm-core.c
index fa3ba1d..de2a1ce 100644
--- a/drivers/soc/qcom/msm-core.c
+++ b/drivers/soc/qcom/msm-core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -52,7 +52,7 @@
 #define NUM_OF_CORNERS 10
 #define DEFAULT_SCALING_FACTOR 1
 
-#define ALLOCATE_2D_ARRAY(type) (\
+#define ALLOCATE_2D_ARRAY(type) \
 static type **allocate_2d_array_##type(int idx)\
 {\
 	int i;\
@@ -77,15 +77,13 @@
 		kfree(ptr[i]);\
 	kfree(ptr);\
 	return ERR_PTR(-ENOMEM);\
-})
+}
 
 struct cpu_activity_info {
 	int cpu;
 	int mpidr;
 	long temp;
 	int sensor_id;
-	struct sensor_threshold hi_threshold;
-	struct sensor_threshold low_threshold;
 	struct cpu_static_info *sp;
 };
 
@@ -109,7 +107,6 @@
 static struct cpu_activity_info activity[NR_CPUS];
 DEFINE_PER_CPU(struct cpu_pstate_pwr *, ptable);
 static struct cpu_pwr_stats cpu_stats[NR_CPUS];
-static uint32_t scaling_factor;
 ALLOCATE_2D_ARRAY(uint32_t);
 
 static int poll_ms;
@@ -123,79 +120,11 @@
 static int max_throttling_temp = 80; /* in C */
 module_param_named(throttling_temp, max_throttling_temp, int, 0664);
 
-/*
- * Cannot be called from an interrupt context
- */
-static void set_and_activate_threshold(uint32_t sensor_id,
-	struct sensor_threshold *threshold)
-{
-	if (sensor_set_trip(sensor_id, threshold)) {
-		pr_err("%s: Error in setting trip %d\n",
-			KBUILD_MODNAME, threshold->trip);
-		return;
-	}
-
-	if (sensor_activate_trip(sensor_id, threshold, true)) {
-		sensor_cancel_trip(sensor_id, threshold);
-		pr_err("%s: Error in enabling trip %d\n",
-			KBUILD_MODNAME, threshold->trip);
-		return;
-	}
-}
-
-static void set_threshold(struct cpu_activity_info *cpu_node)
-{
-	if (cpu_node->sensor_id < 0)
-		return;
-
-	/*
-	 * Before operating on the threshold structure which is used by
-	 * thermal core ensure that the sensor is disabled to prevent
-	 * incorrect operations on the sensor list maintained by thermal code.
-	 */
-	sensor_activate_trip(cpu_node->sensor_id,
-			&cpu_node->hi_threshold, false);
-	sensor_activate_trip(cpu_node->sensor_id,
-			&cpu_node->low_threshold, false);
-
-	cpu_node->hi_threshold.temp = (cpu_node->temp + high_hyst_temp) *
-					scaling_factor;
-	cpu_node->low_threshold.temp = (cpu_node->temp - low_hyst_temp) *
-					scaling_factor;
-
-	/*
-	 * Set the threshold only if we are below the hotplug limit
-	 * Adding more work at this high temperature range, seems to
-	 * fail hotplug notifications.
-	 */
-	if (cpu_node->hi_threshold.temp < (CPU_HOTPLUG_LIMIT * scaling_factor))
-		set_and_activate_threshold(cpu_node->sensor_id,
-			&cpu_node->hi_threshold);
-
-	set_and_activate_threshold(cpu_node->sensor_id,
-		&cpu_node->low_threshold);
-}
-
 static void samplequeue_handle(struct work_struct *work)
 {
 	complete(&sampling_completion);
 }
 
-/* May be called from an interrupt context */
-static void core_temp_notify(enum thermal_trip_type type,
-		int temp, void *data)
-{
-	struct cpu_activity_info *cpu_node =
-		(struct cpu_activity_info *) data;
-
-	trace_temp_notification(cpu_node->sensor_id,
-		type, temp, cpu_node->temp);
-
-	cpu_node->temp = temp / scaling_factor;
-
-	complete(&sampling_completion);
-}
-
 static void repopulate_stats(int cpu)
 {
 	int i;
@@ -226,7 +155,6 @@
 	int cpu;
 	static long prev_temp[NR_CPUS];
 	struct cpu_activity_info *cpu_node;
-	int temp;
 
 	if (disabled)
 		return;
@@ -238,11 +166,6 @@
 		if (cpu_node->sensor_id < 0)
 			continue;
 
-		if (cpu_node->temp == prev_temp[cpu]) {
-			sensor_get_temp(cpu_node->sensor_id, &temp);
-			cpu_node->temp = temp / scaling_factor;
-		}
-
 		prev_temp[cpu] = cpu_node->temp;
 
 		/*
@@ -276,7 +199,7 @@
 	int cpu, num_of_freqs;
 	struct cpufreq_frequency_table *table;
 
-	table = cpufreq_frequency_get_table(policy->cpu);
+	table = policy->freq_table;
 	if (!table) {
 		pr_err("Couldn't get freq table for cpu%d\n",
 				policy->cpu);
@@ -319,12 +242,6 @@
 			cpu_node = &activity[cpu];
 			if (prev_temp[cpu] != cpu_node->temp) {
 				prev_temp[cpu] = cpu_node->temp;
-				set_threshold(cpu_node);
-				trace_temp_threshold(cpu, cpu_node->temp,
-					cpu_node->hi_threshold.temp /
-					scaling_factor,
-					cpu_node->low_threshold.temp /
-					scaling_factor);
 			}
 		}
 		if (!poll_ms)
@@ -484,9 +401,9 @@
 		return -EINVAL;
 
 	get_user(cluster, &argp->cluster);
-	mpidr = (argp->cluster << (MAX_CORES_PER_CLUSTER *
+	mpidr = (cluster << (MAX_CORES_PER_CLUSTER *
 			MAX_NUM_OF_CLUSTERS));
-	cpumask = argp->cpumask;
+	get_user(cpumask, &argp->cpumask);
 
 	switch (cmd) {
 	case EA_LEAKAGE:
@@ -551,16 +468,6 @@
 	return 0;
 }
 
-static inline void init_sens_threshold(struct sensor_threshold *threshold,
-		enum thermal_trip_type trip, long temp,
-		void *data)
-{
-	threshold->trip = trip;
-	threshold->temp = temp;
-	threshold->data = data;
-	threshold->notify = (void *)core_temp_notify;
-}
-
 static int msm_core_stats_init(struct device *dev, int cpu)
 {
 	int i;
@@ -695,71 +602,6 @@
 	return ret;
 }
 
-static int msm_core_tsens_init(struct device_node *node, int cpu)
-{
-	int ret = 0;
-	char *key = NULL;
-	struct device_node *phandle;
-	const char *sensor_type = NULL;
-	struct cpu_activity_info *cpu_node = &activity[cpu];
-	int temp;
-
-	if (!node)
-		return -ENODEV;
-
-	key = "sensor";
-	phandle = of_parse_phandle(node, key, 0);
-	if (!phandle) {
-		pr_info("%s: No sensor mapping found for the core\n",
-				__func__);
-		/* Do not treat this as error as some targets might have
-		 * temperature notification only in userspace.
-		 * Use default temperature for the core. Userspace might
-		 * update the temperature once it is up.
-		 */
-		cpu_node->sensor_id = -ENODEV;
-		cpu_node->temp = DEFAULT_TEMP;
-		return 0;
-	}
-
-	key = "qcom,sensor-name";
-	ret = of_property_read_string(phandle, key,
-				&sensor_type);
-	if (ret) {
-		pr_err("%s: Cannot read tsens id\n", __func__);
-		return ret;
-	}
-
-	cpu_node->sensor_id = sensor_get_id((char *)sensor_type);
-	if (cpu_node->sensor_id < 0)
-		return cpu_node->sensor_id;
-
-	key = "qcom,scaling-factor";
-	ret = of_property_read_u32(phandle, key,
-				&scaling_factor);
-	if (ret) {
-		pr_info("%s: Cannot read tsens scaling factor\n", __func__);
-		scaling_factor = DEFAULT_SCALING_FACTOR;
-	}
-
-	ret = sensor_get_temp(cpu_node->sensor_id, &temp);
-	if (ret)
-		return ret;
-
-	cpu_node->temp = temp / scaling_factor;
-
-	init_sens_threshold(&cpu_node->hi_threshold,
-			THERMAL_TRIP_CONFIGURABLE_HI,
-			(cpu_node->temp + high_hyst_temp) * scaling_factor,
-			(void *)cpu_node);
-	init_sens_threshold(&cpu_node->low_threshold,
-			THERMAL_TRIP_CONFIGURABLE_LOW,
-			(cpu_node->temp - low_hyst_temp) * scaling_factor,
-			(void *)cpu_node);
-
-	return ret;
-}
-
 static int msm_core_mpidr_init(struct device_node *phandle)
 {
 	int ret = 0;
@@ -846,11 +688,6 @@
 		for_each_possible_cpu(cpu) {
 			if (activity[cpu].sensor_id < 0)
 				continue;
-
-			sensor_activate_trip(activity[cpu].sensor_id,
-				&activity[cpu].hi_threshold, false);
-			sensor_activate_trip(activity[cpu].sensor_id,
-				&activity[cpu].low_threshold, false);
 		}
 		break;
 	default:
@@ -893,8 +730,6 @@
 	int ret = 0;
 	unsigned long cpu = 0;
 	struct device_node *child_node = NULL;
-	struct device_node *ea_node = NULL;
-	char *key = NULL;
 	int mpidr;
 
 	for_each_possible_cpu(cpu) {
@@ -907,23 +742,8 @@
 		if (mpidr < 0)
 			return mpidr;
 
-		if (cpu >= num_possible_cpus())
-			continue;
-
 		activity[cpu].mpidr = mpidr;
 
-		key = "qcom,ea";
-		ea_node = of_parse_phandle(child_node, key, 0);
-		if (!ea_node) {
-			pr_err("%s Couldn't find the ea_node for cpu%lu\n",
-				__func__, cpu);
-			return -ENODEV;
-		}
-
-		ret = msm_core_tsens_init(ea_node, cpu);
-		if (ret)
-			return ret;
-
 		if (!activity[cpu].sp->table)
 			continue;
 
@@ -972,55 +792,11 @@
 	}
 }
 
-static int uio_init(struct platform_device *pdev)
-{
-	int ret = 0;
-	struct uio_info *info = NULL;
-	struct resource *clnt_res = NULL;
-	u32 ea_mem_size = 0;
-	phys_addr_t ea_mem_pyhsical = 0;
-
-	clnt_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!clnt_res) {
-		pr_err("resource not found\n");
-		return -ENODEV;
-	}
-
-	info = devm_kzalloc(&pdev->dev, sizeof(struct uio_info), GFP_KERNEL);
-	if (!info)
-		return -ENOMEM;
-
-	ea_mem_size = resource_size(clnt_res);
-	ea_mem_pyhsical = clnt_res->start;
-
-	if (ea_mem_size == 0) {
-		pr_err("msm-core: memory size is zero");
-		return -EINVAL;
-	}
-
-	/* Setup device */
-	info->name = clnt_res->name;
-	info->version = "1.0";
-	info->mem[0].addr = ea_mem_pyhsical;
-	info->mem[0].size = ea_mem_size;
-	info->mem[0].memtype = UIO_MEM_PHYS;
-
-	ret = uio_register_device(&pdev->dev, info);
-	if (ret) {
-		pr_err("uio register failed ret=%d", ret);
-		return ret;
-	}
-	dev_set_drvdata(&pdev->dev, info);
-
-	return 0;
-}
-
 static int msm_core_dev_probe(struct platform_device *pdev)
 {
 	int ret = 0;
 	char *key = NULL;
 	struct device_node *node;
-	int cpu;
 	struct uio_info *info;
 
 	if (!pdev)
@@ -1049,10 +825,6 @@
 	key = "qcom,throttling-temp";
 	ret = of_property_read_u32(node, key, &max_throttling_temp);
 
-	ret = uio_init(pdev);
-	if (ret)
-		return ret;
-
 	ret = msm_core_freq_init();
 	if (ret)
 		goto failed;
@@ -1071,9 +843,6 @@
 	if (ret)
 		goto failed;
 
-	for_each_possible_cpu(cpu)
-		set_threshold(&activity[cpu]);
-
 	INIT_DEFERRABLE_WORK(&sampling_work, samplequeue_handle);
 	schedule_delayed_work(&sampling_work, msecs_to_jiffies(0));
 	cpufreq_register_notifier(&cpu_policy, CPUFREQ_POLICY_NOTIFIER);
@@ -1096,11 +865,6 @@
 	for_each_possible_cpu(cpu) {
 		if (activity[cpu].sensor_id < 0)
 			continue;
-
-		sensor_cancel_trip(activity[cpu].sensor_id,
-				&activity[cpu].hi_threshold);
-		sensor_cancel_trip(activity[cpu].sensor_id,
-				&activity[cpu].low_threshold);
 	}
 	free_dyn_memory();
 	misc_deregister(&msm_core_device);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
index 8c6deb1..c977d1b 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
@@ -572,7 +572,7 @@
 		bcm_dev->lnode_list[lnode_idx].lnode_query_ab[ctx] =
 			msm_bus_div64(cur_dev->node_bw[ctx].sum_query_ab *
 					(uint64_t)bcm_dev->bcmdev->width,
-				cur_dev->node_info->agg_params.num_aggports,
+				cur_dev->node_info->agg_params.num_aggports *
 				cur_dev->node_info->agg_params.buswidth);
 
 		for (i = 0; i < bcm_dev->num_lnodes; i++) {
@@ -1298,7 +1298,7 @@
 					struct msm_bus_tcs_usecase *tcs_usecase)
 {
 	int lnode, src, dest, cur_idx;
-	uint64_t req_clk, req_bw, curr_clk, curr_bw, slp_clk, slp_bw;
+	uint64_t req_clk, req_bw, curr_clk, curr_bw;
 	int i, ret = 0;
 	struct msm_bus_scale_pdata *pdata;
 	struct device *src_dev;
@@ -1339,8 +1339,8 @@
 					curr_bw, curr_clk);
 		}
 
-		ret = query_path(src_dev, dest, req_clk, req_bw, slp_clk,
-			slp_bw, curr_clk, curr_bw, lnode);
+		ret = query_path(src_dev, dest, req_clk, req_bw, 0,
+			0, curr_clk, curr_bw, lnode);
 
 		if (ret) {
 			MSM_BUS_ERR("%s: Query path failed! %d ctx %d\n",
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_core.h b/drivers/soc/qcom/msm_bus/msm_bus_core.h
index 7a0fbc5..4911cf2 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_core.h
+++ b/drivers/soc/qcom/msm_bus/msm_bus_core.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -288,7 +288,7 @@
 	struct device **src_devs;
 };
 
-uint64_t msm_bus_div64(unsigned int width, uint64_t bw);
+uint64_t msm_bus_div64(uint64_t num, unsigned int base);
 int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabric);
 void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabric);
 struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
index e2ad422..beb5c2b 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -17,7 +17,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
-//#include <soc/qcom/rpm-smd.h>
 #include <soc/qcom/cmd-db.h>
 #include <soc/qcom/rpmh.h>
 #include <soc/qcom/tcs.h>
@@ -36,11 +35,6 @@
 #define BCM_TCS_CMD_VOTE_Y_SHFT		0
 #define BCM_TCS_CMD_VOTE_Y_MASK		0xFFFC000
 
-#define VCD_MAX_CNT			10
-
-#define RSC_HLOS_DRV_ID			2
-#define RSC_DISP_DRV_ID			0
-
 #define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \
 	(((commit & 0x1) << BCM_TCS_CMD_COMMIT_SHFT) |\
 	((valid & 0x1) << BCM_TCS_CMD_VALID_SHFT) |\
@@ -49,10 +43,8 @@
 
 static int msm_bus_dev_init_qos(struct device *dev, void *data);
 
-struct list_head bcm_clist_inorder[VCD_MAX_CNT];
-struct list_head bcm_query_list_inorder[VCD_MAX_CNT];
-static struct rpmh_client *mbox_apps;
-static struct rpmh_client *mbox_disp;
+static struct list_head bcm_query_list_inorder[VCD_MAX_CNT];
+static struct msm_bus_node_device_type *cur_rsc;
 
 struct bcm_db {
 	uint32_t unit_size;
@@ -304,7 +296,8 @@
 				int *n_sleep,
 				struct tcs_cmd *cmdlist_active,
 				struct tcs_cmd *cmdlist_wake,
-				struct tcs_cmd *cmdlist_sleep)
+				struct tcs_cmd *cmdlist_sleep,
+				struct list_head *cur_bcm_clist)
 {
 	struct msm_bus_node_device_type *cur_bcm = NULL;
 	int i = 0;
@@ -318,13 +311,13 @@
 
 	for (i = 0; i < VCD_MAX_CNT; i++) {
 		last_tcs = -1;
-		if (list_empty(&bcm_clist_inorder[i]))
+		if (list_empty(&cur_bcm_clist[i]))
 			continue;
-		list_for_each_entry(cur_bcm, &bcm_clist_inorder[i], link) {
-			if (cur_bcm->dirty) {
+		list_for_each_entry(cur_bcm, &cur_bcm_clist[i], link) {
+			if (cur_bcm->updated) {
 				if (last_tcs != -1 &&
 					list_is_last(&cur_bcm->link,
-						&bcm_clist_inorder[i])) {
+						&cur_bcm_clist[i])) {
 					cmdlist_active[last_tcs].data |=
 						BCM_TCS_CMD_COMMIT_MASK;
 					cmdlist_active[last_tcs].complete
@@ -335,7 +328,7 @@
 			n_active[idx]++;
 			commit = false;
 			if (list_is_last(&cur_bcm->link,
-						&bcm_clist_inorder[i])) {
+						&cur_bcm_clist[i])) {
 				commit = true;
 				idx++;
 			}
@@ -344,7 +337,7 @@
 				cur_bcm->node_bw[ACTIVE_CTX].max_ab, commit);
 			k++;
 			last_tcs = k;
-			cur_bcm->dirty = true;
+			cur_bcm->updated = true;
 		}
 	}
 
@@ -355,9 +348,9 @@
 	idx = 0;
 	for (i = 0; i < VCD_MAX_CNT; i++) {
 		last_tcs = -1;
-		if (list_empty(&bcm_clist_inorder[i]))
+		if (list_empty(&cur_bcm_clist[i]))
 			continue;
-		list_for_each_entry(cur_bcm, &bcm_clist_inorder[i], link) {
+		list_for_each_entry(cur_bcm, &cur_bcm_clist[i], link) {
 			commit = false;
 			if ((cur_bcm->node_bw[DUAL_CTX].max_ab ==
 				cur_bcm->node_bw[ACTIVE_CTX].max_ab) &&
@@ -365,7 +358,7 @@
 				cur_bcm->node_bw[ACTIVE_CTX].max_ib)) {
 				if (last_tcs != -1 &&
 					list_is_last(&cur_bcm->link,
-					&bcm_clist_inorder[i])) {
+					&cur_bcm_clist[i])) {
 					cmdlist_wake[k].data |=
 						BCM_TCS_CMD_COMMIT_MASK;
 					cmdlist_sleep[k].data |=
@@ -379,7 +372,7 @@
 			last_tcs = k;
 			n_sleep[idx]++;
 			if (list_is_last(&cur_bcm->link,
-						&bcm_clist_inorder[i])) {
+						&cur_bcm_clist[i])) {
 				commit = true;
 				idx++;
 			}
@@ -445,10 +438,18 @@
 	cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[0]);
 	cur_vcd = cur_bcm->bcmdev->clk_domain;
 
-	if (!cur_bcm->dirty)
-		list_add_tail(&cur_bcm->link, &bcm_clist_inorder[cur_vcd]);
-	else
-		cur_bcm->dirty = false;
+	if (!cur_bcm->node_info->num_rsc_devs)
+		goto exit_bcm_clist_add;
+
+	if (!cur_rsc)
+		cur_rsc = to_msm_bus_node(cur_bcm->node_info->rsc_devs[0]);
+
+	if (!cur_bcm->dirty) {
+		list_add_tail(&cur_bcm->link,
+					&cur_rsc->rscdev->bcm_clist[cur_vcd]);
+		cur_bcm->dirty = true;
+	}
+	cur_bcm->updated = false;
 
 exit_bcm_clist_add:
 	return ret;
@@ -480,7 +481,7 @@
 	struct msm_bus_node_device_type *cur_bcm = NULL;
 
 	if (!cur_dev->node_info->num_bcm_devs)
-		goto exit_bcm_clist_add;
+		goto exit_bcm_clist_clean;
 
 	cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[0]);
 
@@ -492,7 +493,7 @@
 		list_del_init(&cur_bcm->link);
 	}
 
-exit_bcm_clist_add:
+exit_bcm_clist_clean:
 	return ret;
 }
 
@@ -506,7 +507,6 @@
 
 	cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[0]);
 
-	MSM_BUS_ERR("%s: removing bcm %d\n", __func__, cur_bcm->node_info->id);
 	cur_bcm->query_dirty = false;
 	list_del_init(&cur_bcm->query_link);
 
@@ -525,6 +525,7 @@
 	struct tcs_cmd *cmdlist_wake = NULL;
 	struct tcs_cmd *cmdlist_sleep = NULL;
 	struct rpmh_client *cur_mbox = NULL;
+	struct list_head *cur_bcm_clist = NULL;
 	int *n_active = NULL;
 	int *n_wake = NULL;
 	int *n_sleep = NULL;
@@ -534,6 +535,7 @@
 	int cnt_sleep = 0;
 	int i = 0;
 
+
 	list_for_each_entry_safe(node, node_tmp, clist, link) {
 		if (unlikely(node->node_info->defer_qos))
 			msm_bus_dev_init_qos(&node->dev, NULL);
@@ -541,10 +543,13 @@
 		bcm_clist_add(node);
 	}
 
+	cur_mbox = cur_rsc->rscdev->mbox;
+	cur_bcm_clist = cur_rsc->rscdev->bcm_clist;
+
 	for (i = 0; i < VCD_MAX_CNT; i++) {
-		if (list_empty(&bcm_clist_inorder[i]))
+		if (list_empty(&cur_bcm_clist[i]))
 			continue;
-		list_for_each_entry(cur_bcm, &bcm_clist_inorder[i], link) {
+		list_for_each_entry(cur_bcm, &cur_bcm_clist[i], link) {
 			if ((cur_bcm->node_bw[DUAL_CTX].max_ab !=
 				cur_bcm->node_bw[ACTIVE_CTX].max_ab) ||
 				(cur_bcm->node_bw[DUAL_CTX].max_ib !=
@@ -552,18 +557,13 @@
 				cnt_sleep++;
 				cnt_wake++;
 			}
-			if (!cur_bcm->dirty)
+			if (!cur_bcm->updated)
 				cnt_active++;
-			if (!cur_mbox) {
-				if (cur_bcm->bcmdev->drv_id == RSC_HLOS_DRV_ID)
-					cur_mbox = mbox_apps;
-				else
-					cur_mbox = mbox_disp;
-			}
 		}
 		cnt_vcd++;
 	}
 
+	MSM_BUS_ERR("%s: cmd_gen\n", __func__);
 	n_active = kcalloc(cnt_vcd+1, sizeof(int), GFP_KERNEL);
 	n_wake = kcalloc(cnt_vcd+1, sizeof(int), GFP_KERNEL);
 	n_sleep = kcalloc(cnt_vcd+1, sizeof(int), GFP_KERNEL);
@@ -577,17 +577,13 @@
 								GFP_KERNEL);
 	}
 	bcm_cnt = tcs_cmd_list_gen(n_active, n_wake, n_sleep, cmdlist_active,
-					cmdlist_wake, cmdlist_sleep);
+				cmdlist_wake, cmdlist_sleep, cur_bcm_clist);
 
 	ret = rpmh_invalidate(cur_mbox);
-
-	ret = rpmh_write_passthru(cur_mbox, RPMH_ACTIVE_ONLY_STATE,
+	ret = rpmh_write_passthru(cur_mbox, cur_rsc->rscdev->req_state,
 						cmdlist_active, n_active);
-	if (cur_mbox == mbox_apps)
-		ret = rpmh_write_passthru(cur_mbox, RPMH_WAKE_ONLY_STATE,
-						cmdlist_wake, n_wake);
-	else
-		ret = rpmh_write_passthru(cur_mbox, RPMH_AWAKE_STATE,
+
+	ret = rpmh_write_passthru(cur_mbox, RPMH_WAKE_ONLY_STATE,
 						cmdlist_wake, n_wake);
 
 	ret = rpmh_write_passthru(cur_mbox, RPMH_SLEEP_STATE,
@@ -599,6 +595,7 @@
 		list_del_init(&node->link);
 	}
 
+	cur_rsc = NULL;
 	kfree(cmdlist_active);
 	kfree(cmdlist_wake);
 	kfree(cmdlist_sleep);
@@ -950,7 +947,6 @@
 
 	// Add way to count # of VCDs, initialize LL
 	for (i = 0; i < VCD_MAX_CNT; i++) {
-		INIT_LIST_HEAD(&bcm_clist_inorder[i]);
 		INIT_LIST_HEAD(&bcm_query_list_inorder[i]);
 	}
 
@@ -958,7 +954,44 @@
 	return ret;
 }
 
+static int msm_bus_rsc_init(struct platform_device *pdev,
+			struct device *dev,
+			struct msm_bus_node_device_type *pdata)
+{
+	struct msm_bus_rsc_device_type *rscdev;
+	struct msm_bus_node_device_type *node_dev = NULL;
+	int ret = 0;
+	int i = 0;
 
+	node_dev = to_msm_bus_node(dev);
+	if (!node_dev) {
+		ret = -ENXIO;
+		goto exit_rsc_init;
+	}
+
+	rscdev = devm_kzalloc(dev, sizeof(struct msm_bus_rsc_device_type),
+								GFP_KERNEL);
+	if (!rscdev) {
+		ret = -ENOMEM;
+		goto exit_rsc_init;
+	}
+
+	node_dev->rscdev = rscdev;
+	rscdev->req_state = pdata->rscdev->req_state;
+	rscdev->mbox = rpmh_get_byname(pdev, node_dev->node_info->name);
+
+	if (IS_ERR_OR_NULL(rscdev->mbox)) {
+		MSM_BUS_ERR("%s: Failed to get mbox:%s", __func__,
+						node_dev->node_info->name);
+	}
+
+	// Add way to count # of VCDs, initialize LL
+	for (i = 0; i < VCD_MAX_CNT; i++)
+		INIT_LIST_HEAD(&rscdev->bcm_clist[i]);
+
+exit_rsc_init:
+	return ret;
+}
 
 static int msm_bus_init_clk(struct device *bus_dev,
 				struct msm_bus_node_device_type *pdata)
@@ -1059,10 +1092,12 @@
 	node_info->num_connections = pdata_node_info->num_connections;
 	node_info->num_blist = pdata_node_info->num_blist;
 	node_info->num_bcm_devs = pdata_node_info->num_bcm_devs;
+	node_info->num_rsc_devs = pdata_node_info->num_rsc_devs;
 	node_info->num_qports = pdata_node_info->num_qports;
 	node_info->virt_dev = pdata_node_info->virt_dev;
 	node_info->is_fab_dev = pdata_node_info->is_fab_dev;
 	node_info->is_bcm_dev = pdata_node_info->is_bcm_dev;
+	node_info->is_rsc_dev = pdata_node_info->is_rsc_dev;
 	node_info->qos_params.mode = pdata_node_info->qos_params.mode;
 	node_info->qos_params.prio1 = pdata_node_info->qos_params.prio1;
 	node_info->qos_params.prio0 = pdata_node_info->qos_params.prio0;
@@ -1176,6 +1211,30 @@
 		pdata_node_info->bcm_dev_ids,
 		sizeof(int) * pdata_node_info->num_bcm_devs);
 
+	node_info->rsc_devs = devm_kzalloc(bus_dev,
+			sizeof(struct device *) *
+				pdata_node_info->num_rsc_devs,
+			GFP_KERNEL);
+	if (!node_info->rsc_devs) {
+		MSM_BUS_ERR("%s:rsc dev connections alloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	node_info->rsc_dev_ids = devm_kzalloc(bus_dev,
+			sizeof(int) * pdata_node_info->num_rsc_devs,
+			GFP_KERNEL);
+	if (!node_info->rsc_devs) {
+		MSM_BUS_ERR("%s:Bus connections alloc failed\n", __func__);
+		devm_kfree(bus_dev, node_info->rsc_devs);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	memcpy(node_info->rsc_dev_ids,
+		pdata_node_info->rsc_dev_ids,
+		sizeof(int) * pdata_node_info->num_rsc_devs);
+
 	node_info->qport = devm_kzalloc(bus_dev,
 			sizeof(int) * pdata_node_info->num_qports,
 			GFP_KERNEL);
@@ -1268,6 +1327,7 @@
 {
 	struct msm_bus_node_device_type *bus_node = NULL;
 	struct msm_bus_node_device_type *bcm_node = NULL;
+	struct msm_bus_node_device_type *rsc_node = NULL;
 	int ret = 0;
 	int j;
 	struct msm_bus_node_device_type *fab;
@@ -1281,7 +1341,8 @@
 
 	/* Setup parent bus device for this node */
 	if (!bus_node->node_info->is_fab_dev &&
-		!bus_node->node_info->is_bcm_dev) {
+		!bus_node->node_info->is_bcm_dev &&
+		!bus_node->node_info->is_rsc_dev) {
 		struct device *bus_parent_device =
 			bus_find_device(&msm_bus_type, NULL,
 				(void *)&bus_node->node_info->bus_device_id,
@@ -1351,6 +1412,22 @@
 		bcm_node->bcmdev->num_bus_devs++;
 	}
 
+	for (j = 0; j < bus_node->node_info->num_rsc_devs; j++) {
+		bus_node->node_info->rsc_devs[j] =
+			bus_find_device(&msm_bus_type, NULL,
+				(void *)&bus_node->node_info->rsc_dev_ids[j],
+				msm_bus_device_match_adhoc);
+
+		if (!bus_node->node_info->rsc_devs[j]) {
+			MSM_BUS_ERR("%s: Error finding conn %d for device %d",
+				__func__, bus_node->node_info->rsc_dev_ids[j],
+				 bus_node->node_info->id);
+			ret = -ENODEV;
+			goto exit_setup_dev_conn;
+		}
+		rsc_node = to_msm_bus_node(bus_node->node_info->rsc_devs[j]);
+	}
+
 exit_setup_dev_conn:
 	return ret;
 }
@@ -1454,13 +1531,22 @@
 				goto exit_device_probe;
 			}
 		}
-		if (pdata->info[i].node_info->is_bcm_dev)
+		if (pdata->info[i].node_info->is_bcm_dev) {
 			ret = msm_bus_bcm_init(node_dev, &pdata->info[i]);
 			if (ret) {
 				MSM_BUS_ERR("%s: Error intializing bcm %d",
 					__func__, pdata->info[i].node_info->id);
 				goto exit_device_probe;
 			}
+		}
+		if (pdata->info[i].node_info->is_rsc_dev) {
+			ret = msm_bus_rsc_init(pdev, node_dev, &pdata->info[i]);
+			if (ret) {
+				MSM_BUS_ERR("%s: Error intializing rsc %d",
+					__func__, pdata->info[i].node_info->id);
+				goto exit_device_probe;
+			}
+		}
 	}
 
 	ret = bus_for_each_dev(&msm_bus_type, NULL, NULL,
@@ -1481,18 +1567,6 @@
 	msm_bus_arb_setops_adhoc(&arb_ops);
 	bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_node_debug);
 
-	mbox_apps = rpmh_get_byname(pdev, "apps_rsc");
-	if (IS_ERR_OR_NULL(mbox_apps)) {
-		MSM_BUS_ERR("%s: apps mbox failure", __func__);
-		return PTR_ERR(mbox_apps);
-	}
-
-	mbox_disp = rpmh_get_byname(pdev, "disp_rsc");
-	if (IS_ERR_OR_NULL(mbox_disp)) {
-		MSM_BUS_ERR("%s: disp mbox failure", __func__);
-		return PTR_ERR(mbox_disp);
-	}
-
 	devm_kfree(&pdev->dev, pdata->info);
 	devm_kfree(&pdev->dev, pdata);
 exit_device_probe:
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c
index c417ebe..5710bca 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c
@@ -90,6 +90,37 @@
 	return NULL;
 }
 
+static struct msm_bus_rsc_device_type *get_rsc_device_info(
+		struct device_node *dev_node,
+		struct platform_device *pdev)
+{
+	struct msm_bus_rsc_device_type *rsc_dev;
+	int ret;
+
+	rsc_dev = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_bus_rsc_device_type),
+			GFP_KERNEL);
+	if (!rsc_dev) {
+		dev_err(&pdev->dev,
+			"Error: Unable to allocate memory for rsc_dev\n");
+		goto rsc_dev_err;
+	}
+
+	ret = of_property_read_u32(dev_node, "qcom,req_state",
+			&rsc_dev->req_state);
+	if (ret) {
+		dev_dbg(&pdev->dev, "req_state missing, using default\n");
+		rsc_dev->req_state = 2;
+	}
+
+	return rsc_dev;
+
+rsc_dev_err:
+	devm_kfree(&pdev->dev, rsc_dev);
+	rsc_dev = 0;
+	return NULL;
+}
+
 static struct msm_bus_bcm_device_type *get_bcm_device_info(
 		struct device_node *dev_node,
 		struct platform_device *pdev)
@@ -113,11 +144,6 @@
 		goto bcm_dev_err;
 	}
 
-	ret = of_property_read_u32(dev_node, "qcom,drv-id",
-			&bcm_dev->drv_id);
-	if (ret)
-		dev_dbg(&pdev->dev, "drv-id is missing\n");
-
 	return bcm_dev;
 
 bcm_dev_err:
@@ -359,6 +385,7 @@
 	struct device_node *con_node;
 	struct device_node *bus_dev;
 	struct device_node *bcm_dev;
+	struct device_node *rsc_dev;
 
 	node_info = devm_kzalloc(&pdev->dev,
 			sizeof(struct msm_bus_node_info_type),
@@ -456,13 +483,34 @@
 					node_info->id);
 			goto node_info_err;
 		}
-		dev_err(&pdev->dev, "found bcm device. Node %d BCM:%d\n",
-				node_info->id, node_info->bcm_dev_ids[0]);
-
 		of_node_put(bcm_dev);
 	}
 
+	if (of_get_property(dev_node, "qcom,rscs", &size)) {
+		node_info->num_rsc_devs = size / sizeof(int);
+		node_info->rsc_dev_ids = devm_kzalloc(&pdev->dev, size,
+				GFP_KERNEL);
+	} else {
+		node_info->num_rsc_devs = 0;
+		node_info->rsc_devs = 0;
+	}
+
+	for (i = 0; i < node_info->num_rsc_devs; i++) {
+		rsc_dev = of_parse_phandle(dev_node, "qcom,rscs", i);
+		if (IS_ERR_OR_NULL(rsc_dev))
+			goto node_info_err;
+
+		if (of_property_read_u32(rsc_dev, "cell-id",
+				&node_info->rsc_dev_ids[i])){
+			dev_err(&pdev->dev, "Can't find rsc device. Node %d",
+					node_info->id);
+			goto node_info_err;
+		}
+		of_node_put(rsc_dev);
+	}
+
 	node_info->is_bcm_dev = of_property_read_bool(dev_node, "qcom,bcm-dev");
+	node_info->is_rsc_dev = of_property_read_bool(dev_node, "qcom,rsc-dev");
 	node_info->is_fab_dev = of_property_read_bool(dev_node, "qcom,fab-dev");
 	node_info->virt_dev = of_property_read_bool(dev_node, "qcom,virt-dev");
 
@@ -505,6 +553,18 @@
 		}
 	}
 
+	if (node_device->node_info->is_rsc_dev) {
+
+		node_device->rscdev = get_rsc_device_info(dev_node, pdev);
+
+		if (IS_ERR_OR_NULL(node_device->rscdev)) {
+			dev_err(&pdev->dev,
+				"Error: RSC device info missing\n");
+			devm_kfree(&pdev->dev, node_device->node_info);
+			return -ENODATA;
+		}
+	}
+
 	if (node_device->node_info->is_fab_dev) {
 		struct device_node *qos_clk_node;
 
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
index 3b5eabd..f415735 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
@@ -20,6 +20,8 @@
 #include <linux/msm_bus_rules.h>
 #include "msm_bus_core.h"
 
+#define VCD_MAX_CNT 16
+
 struct msm_bus_node_device_type;
 
 struct link_node {
@@ -64,6 +66,12 @@
 	uint32_t vrail_used;
 };
 
+struct msm_bus_rsc_device_type {
+	struct rpmh_client *mbox;
+	struct list_head bcm_clist[VCD_MAX_CNT];
+	int req_state;
+};
+
 struct msm_bus_bcm_device_type {
 	const char *name;
 	uint32_t width;
@@ -128,16 +136,20 @@
 	unsigned int num_connections;
 	unsigned int num_blist;
 	unsigned int num_bcm_devs;
+	unsigned int num_rsc_devs;
 	bool is_fab_dev;
 	bool virt_dev;
 	bool is_bcm_dev;
+	bool is_rsc_dev;
 	bool is_traversed;
 	unsigned int *connections;
 	unsigned int *black_listed_connections;
 	unsigned int *bcm_dev_ids;
+	unsigned int *rsc_dev_ids;
 	struct device **dev_connections;
 	struct device **black_connections;
 	struct device **bcm_devs;
+	struct device **rsc_devs;
 	int bcm_req_idx;
 	unsigned int bus_device_id;
 	struct device *bus_device;
@@ -151,6 +163,7 @@
 	struct msm_bus_node_info_type *node_info;
 	struct msm_bus_fab_device_type *fabdev;
 	struct msm_bus_bcm_device_type *bcmdev;
+	struct msm_bus_rsc_device_type *rscdev;
 	int num_lnodes;
 	struct link_node *lnode_list;
 	struct nodebw node_bw[NUM_CTX];
@@ -164,6 +177,7 @@
 	struct device_node *of_node;
 	struct device dev;
 	bool dirty;
+	bool updated;
 	bool query_dirty;
 	struct list_head dev_link;
 	struct list_head devlist;
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rules.c b/drivers/soc/qcom/msm_bus/msm_bus_rules.c
index 5b5159d..03042fa 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_rules.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rules.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -588,7 +588,7 @@
 static bool __rule_unregister(int num_rules, struct bus_rule_type *rule,
 					struct notifier_block *nb)
 {
-	int i;
+	int i = 0;
 	struct rule_node_info *node = NULL;
 	struct rule_node_info *node_tmp = NULL;
 	struct rules_def *node_rule;
diff --git a/drivers/soc/qcom/msm_glink_pkt.c b/drivers/soc/qcom/msm_glink_pkt.c
index 38d29e4..2471d27 100644
--- a/drivers/soc/qcom/msm_glink_pkt.c
+++ b/drivers/soc/qcom/msm_glink_pkt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -502,13 +502,21 @@
 				struct queue_rx_intent_work, work);
 	struct glink_pkt_dev *devp = work_item->devp;
 
-	if (!devp || !devp->handle) {
+	if (!devp) {
+		GLINK_PKT_ERR("%s: Invalid device\n", __func__);
+		kfree(work_item);
+		return;
+	}
+	mutex_lock(&devp->ch_lock);
+	if (!devp->handle) {
 		GLINK_PKT_ERR("%s: Invalid device Handle\n", __func__);
+		mutex_unlock(&devp->ch_lock);
 		kfree(work_item);
 		return;
 	}
 
 	ret = glink_queue_rx_intent(devp->handle, devp, work_item->intent_size);
+	mutex_unlock(&devp->ch_lock);
 	GLINK_PKT_INFO("%s: Triggered with size[%zu] ret[%d]\n",
 				__func__, work_item->intent_size, ret);
 	if (ret)
@@ -664,8 +672,15 @@
 	spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
 
 	ret = copy_to_user(buf, pkt->data, pkt->size);
-	if (WARN_ON(ret != 0))
-		return ret;
+	if (ret) {
+		GLINK_PKT_ERR(
+		"%s copy_to_user failed ret[%d] on dev id:%d size %zu\n",
+		 __func__, ret, devp->i, pkt->size);
+		spin_lock_irqsave(&devp->pkt_list_lock, flags);
+		list_add_tail(&pkt->list, &devp->pkt_list);
+		spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
+		return -EFAULT;
+	}
 
 	ret = pkt->size;
 	glink_rx_done(devp->handle, pkt->data, false);
@@ -739,8 +754,13 @@
 	}
 
 	ret = copy_from_user(data, buf, count);
-	if (WARN_ON(ret != 0))
-		return ret;
+	if (ret) {
+		GLINK_PKT_ERR(
+		"%s copy_from_user failed ret[%d] on dev id:%d size %zu\n",
+		 __func__, ret, devp->i, count);
+		kfree(data);
+		return -EFAULT;
+	}
 
 	ret = glink_tx(devp->handle, data, data, count, GLINK_TX_REQ_INTENT);
 	if (ret) {
@@ -1038,6 +1058,27 @@
 }
 
 /**
+ * pop_rx_pkt() - return first pkt from rx pkt_list
+ * devp:	pointer to G-Link packet device.
+ *
+ * This function return first item from rx pkt_list and NULL if list is empty.
+ */
+struct glink_rx_pkt *pop_rx_pkt(struct glink_pkt_dev *devp)
+{
+	unsigned long flags;
+	struct glink_rx_pkt *pkt = NULL;
+
+	spin_lock_irqsave(&devp->pkt_list_lock, flags);
+	if (!list_empty(&devp->pkt_list)) {
+		pkt = list_first_entry(&devp->pkt_list,
+				struct glink_rx_pkt, list);
+		list_del(&pkt->list);
+	}
+	spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
+	return pkt;
+}
+
+/**
  * glink_pkt_release() - release operation on glink_pkt device
  * inode:	Pointer to the inode structure.
  * file:	Pointer to the file structure.
@@ -1051,6 +1092,7 @@
 	int ret = 0;
 	struct glink_pkt_dev *devp = file->private_data;
 	unsigned long flags;
+	struct glink_rx_pkt *pkt;
 
 	GLINK_PKT_INFO("%s() on dev id:%d by [%s] ref_cnt[%d]\n",
 			__func__, devp->i, current->comm, devp->ref_cnt);
@@ -1059,9 +1101,14 @@
 		devp->ref_cnt--;
 
 	if (devp->handle && devp->ref_cnt == 0) {
+		while ((pkt = pop_rx_pkt(devp))) {
+			glink_rx_done(devp->handle, pkt->data, false);
+			kfree(pkt);
+		}
 		wake_up(&devp->ch_read_wait_queue);
 		wake_up_interruptible(&devp->ch_opened_wait_queue);
 		ret = glink_close(devp->handle);
+		devp->handle = NULL;
 		if (ret)  {
 			GLINK_PKT_ERR("%s: close failed ret[%d]\n",
 						__func__, ret);
diff --git a/drivers/soc/qcom/msm_smem.c b/drivers/soc/qcom/msm_smem.c
index c2fb37b..1bbd751 100644
--- a/drivers/soc/qcom/msm_smem.c
+++ b/drivers/soc/qcom/msm_smem.c
@@ -79,6 +79,7 @@
 static void *smem_ramdump_dev;
 static DEFINE_MUTEX(spinlock_init_lock);
 static DEFINE_SPINLOCK(smem_init_check_lock);
+static struct device *smem_dev;
 static int smem_module_inited;
 static RAW_NOTIFIER_HEAD(smem_module_init_notifier_list);
 static DEFINE_MUTEX(smem_module_init_notifier_lock);
@@ -374,7 +375,7 @@
 	uint32_t a_hdr_size;
 	int rc;
 
-	SMEM_DBG("%s(%u, %u, %u, %u, %d, %d)\n", __func__, id, *size, to_proc,
+	SMEM_DBG("%s(%u, %u, %u, %d, %d)\n", __func__, id, to_proc,
 					flags, skip_init_check, use_rspinlock);
 
 	if (!skip_init_check && !smem_initialized_check())
@@ -817,7 +818,7 @@
 void *smem_get_entry(unsigned int id, unsigned int *size, unsigned int to_proc,
 							unsigned int flags)
 {
-	SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, *size, to_proc, flags);
+	SMEM_DBG("%s(%u, %u, %u)\n", __func__, id, to_proc, flags);
 
 	/*
 	 * Handle the circular dependecy between SMEM and software implemented
@@ -1084,7 +1085,8 @@
 	void *handle;
 	struct restart_notifier_block *nb;
 
-	smem_ramdump_dev = create_ramdump_device("smem", NULL);
+	if (smem_dev)
+		smem_ramdump_dev = create_ramdump_device("smem", smem_dev);
 	if (IS_ERR_OR_NULL(smem_ramdump_dev)) {
 		LOG_ERR("%s: Unable to create smem ramdump device.\n",
 			__func__);
@@ -1509,7 +1511,7 @@
 		SMEM_INFO("smem security enabled\n");
 		smem_init_security();
 	}
-
+	smem_dev = &pdev->dev;
 	probe_done = true;
 
 	ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 03a6204..11e1b4d 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -928,7 +928,8 @@
 					desc->attrs);
 			priv->region = NULL;
 		}
-		pil_clear_segment(desc);
+		if (desc->clear_fw_region && priv->region_start)
+			pil_clear_segment(desc);
 		pil_release_mmap(desc);
 	}
 	return ret;
diff --git a/drivers/soc/qcom/peripheral-loader.h b/drivers/soc/qcom/peripheral-loader.h
index 752a6ce..af7249b 100644
--- a/drivers/soc/qcom/peripheral-loader.h
+++ b/drivers/soc/qcom/peripheral-loader.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,7 @@
  * This defaults to iounmap if not specified.
  * @shutdown_fail: Set if PIL op for shutting down subsystem fails.
  * @modem_ssr: true if modem is restarting, false if booting for first time.
+ * @clear_fw_region: Clear fw region on failure in loading.
  * @subsys_vmid: memprot id for the subsystem.
  */
 struct pil_desc {
@@ -54,6 +55,7 @@
 	void *map_data;
 	bool shutdown_fail;
 	bool modem_ssr;
+	bool clear_fw_region;
 	u32 subsys_vmid;
 };
 
diff --git a/drivers/soc/qcom/pil-q6v5.c b/drivers/soc/qcom/pil-q6v5.c
index fb4d0ea..d9d6c72 100644
--- a/drivers/soc/qcom/pil-q6v5.c
+++ b/drivers/soc/qcom/pil-q6v5.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -637,6 +637,7 @@
 	if (ret)
 		return ERR_PTR(ret);
 
+	desc->clear_fw_region = false;
 	desc->dev = &pdev->dev;
 
 	drv->qdsp6v5_2_0 = of_device_is_compatible(pdev->dev.of_node,
diff --git a/drivers/soc/qcom/qdsp6v2/adsp-loader.c b/drivers/soc/qcom/qdsp6v2/adsp-loader.c
index 1bde1bf..d90267e 100644
--- a/drivers/soc/qcom/qdsp6v2/adsp-loader.c
+++ b/drivers/soc/qcom/qdsp6v2/adsp-loader.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2014, 2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,8 @@
 #include <linux/qdsp6v2/apr.h>
 #include <linux/of_device.h>
 #include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
 #include <soc/qcom/subsystem_restart.h>
 
 #define Q6_PIL_GET_DELAY_MS 100
@@ -44,12 +46,13 @@
 	NULL,
 };
 
+static struct work_struct adsp_ldr_work;
 static struct platform_device *adsp_private;
 static void adsp_loader_unload(struct platform_device *pdev);
 
-static void adsp_loader_do(struct platform_device *pdev)
+static void adsp_load_fw(struct work_struct *adsp_ldr_work)
 {
-
+	struct platform_device *pdev = adsp_private;
 	struct adsp_loader_private *priv = NULL;
 
 	const char *adsp_dt = "qcom,adsp-state";
@@ -146,6 +149,10 @@
 	dev_err(&pdev->dev, "%s: Q6 image loading failed\n", __func__);
 }
 
+static void adsp_loader_do(struct platform_device *pdev)
+{
+	schedule_work(&adsp_ldr_work);
+}
 
 static ssize_t adsp_boot_store(struct kobject *kobj,
 	struct kobj_attribute *attr,
@@ -272,6 +279,8 @@
 		return ret;
 	}
 
+	INIT_WORK(&adsp_ldr_work, adsp_load_fw);
+
 	return 0;
 }
 
diff --git a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
index 40a539e..40aac6a 100644
--- a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
+++ b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
@@ -17,7 +17,6 @@
 #include <linux/uaccess.h>
 #include <linux/spinlock.h>
 #include <linux/mutex.h>
-#include <linux/list.h>
 #include <linux/sched.h>
 #include <linux/wait.h>
 #include <linux/errno.h>
@@ -33,16 +32,10 @@
 #define APR_MAXIMUM_NUM_OF_RETRIES 2
 
 struct apr_tx_buf {
-	struct list_head list;
 	struct apr_pkt_priv pkt_priv;
 	char buf[APR_MAX_BUF];
 };
 
-struct apr_buf_list {
-	struct list_head list;
-	spinlock_t lock;
-};
-
 struct link_state {
 	uint32_t dest;
 	void *handle;
@@ -51,7 +44,6 @@
 };
 
 static struct link_state link_state[APR_DEST_MAX];
-static struct apr_buf_list buf_list;
 
 static char *svc_names[APR_DEST_MAX][APR_CLIENT_MAX] = {
 	{
@@ -67,43 +59,36 @@
 static struct apr_svc_ch_dev
 	apr_svc_ch[APR_DL_MAX][APR_DEST_MAX][APR_CLIENT_MAX];
 
-static struct apr_tx_buf *apr_get_free_buf(int len)
+static struct apr_tx_buf *apr_alloc_buf(int len)
 {
-	struct apr_tx_buf *tx_buf;
-	unsigned long flags;
 
 	if (len > APR_MAX_BUF) {
 		pr_err("%s: buf too large [%d]\n", __func__, len);
 		return ERR_PTR(-EINVAL);
 	}
 
-	spin_lock_irqsave(&buf_list.lock, flags);
-	if (list_empty(&buf_list.list)) {
-		spin_unlock_irqrestore(&buf_list.lock, flags);
-		pr_err("%s: No buf available\n", __func__);
-		return ERR_PTR(-ENOMEM);
+	return kzalloc(sizeof(struct apr_tx_buf), GFP_ATOMIC);
+}
+
+static void apr_free_buf(const void *ptr)
+{
+
+	struct apr_pkt_priv *apr_pkt_priv = (struct apr_pkt_priv *)ptr;
+	struct apr_tx_buf *tx_buf;
+
+	if (!apr_pkt_priv) {
+		pr_err("%s: Invalid apr_pkt_priv\n", __func__);
+		return;
 	}
 
-	tx_buf = list_first_entry(&buf_list.list, struct apr_tx_buf, list);
-	list_del(&tx_buf->list);
-	spin_unlock_irqrestore(&buf_list.lock, flags);
-
-	return tx_buf;
+	if (apr_pkt_priv->pkt_owner == APR_PKT_OWNER_DRIVER) {
+		tx_buf = container_of((void *)apr_pkt_priv,
+				      struct apr_tx_buf, pkt_priv);
+		pr_debug("%s: Freeing buffer %pK", __func__, tx_buf);
+		kfree(tx_buf);
+	}
 }
 
-static void apr_buf_add_tail(const void *buf)
-{
-	struct apr_tx_buf *list;
-	unsigned long flags;
-
-	if (!buf)
-		return;
-
-	spin_lock_irqsave(&buf_list.lock, flags);
-	list = container_of((void *)buf, struct apr_tx_buf, buf);
-	list_add_tail(&list->list, &buf_list.list);
-	spin_unlock_irqrestore(&buf_list.lock, flags);
-}
 
 static int __apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data,
 			   struct apr_pkt_priv *pkt_priv, int len)
@@ -129,14 +114,14 @@
 {
 	int rc = 0, retries = 0;
 	void *pkt_data = NULL;
-	struct apr_tx_buf *tx_buf;
+	struct apr_tx_buf *tx_buf = NULL;
 	struct apr_pkt_priv *pkt_priv_ptr = pkt_priv;
 
 	if (!apr_ch->handle || !pkt_priv)
 		return -EINVAL;
 
 	if (pkt_priv->pkt_owner == APR_PKT_OWNER_DRIVER) {
-		tx_buf = apr_get_free_buf(len);
+		tx_buf = apr_alloc_buf(len);
 		if (IS_ERR_OR_NULL(tx_buf)) {
 			rc = -EINVAL;
 			goto exit;
@@ -159,7 +144,7 @@
 	if (rc < 0) {
 		pr_err("%s: Unable to send the packet, rc:%d\n", __func__, rc);
 		if (pkt_priv->pkt_owner == APR_PKT_OWNER_DRIVER)
-			apr_buf_add_tail(pkt_data);
+			kfree(tx_buf);
 	}
 exit:
 	return rc;
@@ -188,39 +173,17 @@
 static void apr_tal_notify_tx_abort(void *handle, const void *priv,
 				    const void *pkt_priv)
 {
-	struct apr_pkt_priv *apr_pkt_priv_ptr =
-				(struct apr_pkt_priv *)pkt_priv;
-	struct apr_tx_buf *list_node;
-
-	if (!apr_pkt_priv_ptr) {
-		pr_err("%s: Invalid pkt_priv\n", __func__);
-		return;
-	}
-
-	pr_debug("%s: tx_abort received for apr_pkt_priv_ptr:%pK\n",
-		 __func__, apr_pkt_priv_ptr);
-
-	if (apr_pkt_priv_ptr->pkt_owner == APR_PKT_OWNER_DRIVER) {
-		list_node = container_of(apr_pkt_priv_ptr,
-					 struct apr_tx_buf, pkt_priv);
-		apr_buf_add_tail(list_node->buf);
-	}
+	pr_debug("%s: tx_abort received for pkt_priv:%pK\n",
+		 __func__, pkt_priv);
+	apr_free_buf(pkt_priv);
 }
 
 void apr_tal_notify_tx_done(void *handle, const void *priv,
 			    const void *pkt_priv, const void *ptr)
 {
-	struct apr_pkt_priv *apr_pkt_priv = (struct apr_pkt_priv *)pkt_priv;
-
-	if (!pkt_priv || !ptr) {
-		pr_err("%s: Invalid pkt_priv or ptr\n", __func__);
-		return;
-	}
-
-	pr_debug("%s: tx_done received\n", __func__);
-
-	if (apr_pkt_priv->pkt_owner == APR_PKT_OWNER_DRIVER)
-		apr_buf_add_tail(ptr);
+	pr_debug("%s: tx_done received for pkt_priv:%pK\n",
+		 __func__, pkt_priv);
+	apr_free_buf(pkt_priv);
 }
 
 bool apr_tal_notify_rx_intent_req(void *handle, const void *priv,
@@ -254,6 +217,7 @@
 	 */
 	pr_debug("%s: remote queued an intent\n", __func__);
 	apr_ch->if_remote_intent_ready = true;
+	wake_up(&apr_ch->wait);
 }
 
 void apr_tal_notify_state(void *handle, const void *priv, unsigned int event)
@@ -456,8 +420,6 @@
 static int __init apr_tal_init(void)
 {
 	int i, j, k;
-	struct apr_tx_buf *buf;
-	struct list_head *ptr, *next;
 
 	for (i = 0; i < APR_DL_MAX; i++) {
 		for (j = 0; j < APR_DEST_MAX; j++) {
@@ -473,21 +435,6 @@
 	for (i = 0; i < APR_DEST_MAX; i++)
 		init_waitqueue_head(&link_state[i].wait);
 
-	spin_lock_init(&buf_list.lock);
-	INIT_LIST_HEAD(&buf_list.list);
-	for (i = 0; i < APR_NUM_OF_TX_BUF; i++) {
-		buf = kzalloc(sizeof(struct apr_tx_buf), GFP_KERNEL);
-		if (!buf) {
-			pr_err("%s: Unable to allocate tx buf\n", __func__);
-			goto tx_buf_alloc_fail;
-		}
-
-		INIT_LIST_HEAD(&buf->list);
-		spin_lock(&buf_list.lock);
-		list_add_tail(&buf->list, &buf_list.list);
-		spin_unlock(&buf_list.lock);
-	}
-
 	link_state[APR_DEST_MODEM].link_state = GLINK_LINK_STATE_DOWN;
 	link_state[APR_DEST_MODEM].handle =
 		glink_register_link_state_cb(&mpss_link_info, NULL);
@@ -501,13 +448,5 @@
 		pr_err("%s: Unable to register lpass link state\n", __func__);
 
 	return 0;
-
-tx_buf_alloc_fail:
-	list_for_each_safe(ptr, next, &buf_list.list) {
-		buf = list_entry(ptr, struct apr_tx_buf, list);
-		list_del(&buf->list);
-		kfree(buf);
-	}
-	return -ENOMEM;
 }
 device_initcall(apr_tal_init);
diff --git a/drivers/soc/qcom/qdsp6v2/audio_notifier.c b/drivers/soc/qcom/qdsp6v2/audio_notifier.c
index a2b0f0e..414c123 100644
--- a/drivers/soc/qcom/qdsp6v2/audio_notifier.c
+++ b/drivers/soc/qcom/qdsp6v2/audio_notifier.c
@@ -394,8 +394,8 @@
 	int ret;
 
 	list_for_each_safe(ptr, next, &client_list) {
-		client_data = list_entry(ptr,
-			struct client_data, list);
+		client_data = list_entry(ptr, struct client_data, list);
+
 		ret = audio_notifer_reg_client(client_data);
 		if (ret < 0)
 			pr_err("%s: audio_notifer_reg_client failed for client %s, ret %d\n",
@@ -518,9 +518,8 @@
 		goto done;
 	}
 	mutex_lock(&notifier_mutex);
-	list_for_each_safe(ptr, next, &client_data->list) {
-		client_data = list_entry(ptr, struct client_data,
-					list);
+	list_for_each_safe(ptr, next, &client_list) {
+		client_data = list_entry(ptr, struct client_data, list);
 		if (!strcmp(client_name, client_data->client_name)) {
 			ret2 = audio_notifer_dereg_client(client_data);
 			if (ret2 < 0) {
diff --git a/drivers/soc/qcom/qdsp6v2/cdsp-loader.c b/drivers/soc/qcom/qdsp6v2/cdsp-loader.c
index 9bb4eb0..70977d3 100644
--- a/drivers/soc/qcom/qdsp6v2/cdsp-loader.c
+++ b/drivers/soc/qcom/qdsp6v2/cdsp-loader.c
@@ -19,6 +19,8 @@
 #include <linux/platform_device.h>
 #include <linux/of_device.h>
 #include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
 #include <soc/qcom/subsystem_restart.h>
 
 #define BOOT_CMD 1
@@ -47,10 +49,12 @@
 
 static u32 cdsp_state = CDSP_SUBSYS_DOWN;
 static struct platform_device *cdsp_private;
+static struct work_struct cdsp_ldr_work;
 static void cdsp_loader_unload(struct platform_device *pdev);
 
-static int cdsp_loader_do(struct platform_device *pdev)
+static void cdsp_load_fw(struct work_struct *cdsp_ldr_work)
 {
+	struct platform_device *pdev = cdsp_private;
 	struct cdsp_loader_private *priv = NULL;
 
 	int rc = 0;
@@ -99,14 +103,17 @@
 		}
 
 		dev_dbg(&pdev->dev, "%s: CDSP image is loaded\n", __func__);
-		return rc;
+		return;
 	}
 
 fail:
 	dev_err(&pdev->dev, "%s: CDSP image loading failed\n", __func__);
-	return rc;
 }
 
+static void cdsp_loader_do(struct platform_device *pdev)
+{
+	schedule_work(&cdsp_ldr_work);
+}
 
 static ssize_t cdsp_boot_store(struct kobject *kobj,
 	struct kobj_attribute *attr,
@@ -124,7 +131,7 @@
 		pr_debug("%s: going to call cdsp_loader_do\n", __func__);
 		cdsp_loader_do(cdsp_private);
 	} else if (boot == IMAGE_UNLOAD_CMD) {
-		pr_debug("%s: going to call adsp_unloader\n", __func__);
+		pr_debug("%s: going to call cdsp_unloader\n", __func__);
 		cdsp_loader_unload(cdsp_private);
 	}
 	return count;
@@ -236,6 +243,8 @@
 		return ret;
 	}
 
+	INIT_WORK(&cdsp_ldr_work, cdsp_load_fw);
+
 	return 0;
 }
 
diff --git a/drivers/soc/qcom/qdsp6v2/voice_svc.c b/drivers/soc/qcom/qdsp6v2/voice_svc.c
index 07e8991..f3b1b83 100644
--- a/drivers/soc/qcom/qdsp6v2/voice_svc.c
+++ b/drivers/soc/qcom/qdsp6v2/voice_svc.c
@@ -42,6 +42,12 @@
 	struct list_head response_queue;
 	wait_queue_head_t response_wait;
 	spinlock_t response_lock;
+	/*
+	 * This mutex ensures responses are processed in sequential order and
+	 * that no two threads access and free the same response at the same
+	 * time.
+	 */
+	struct mutex response_mutex_lock;
 };
 
 struct apr_data {
@@ -361,6 +367,9 @@
 	struct voice_svc_prvt *prtd;
 	struct voice_svc_write_msg *data = NULL;
 	uint32_t cmd;
+	struct voice_svc_register *register_data = NULL;
+	struct voice_svc_cmd_request *request_data = NULL;
+	uint32_t request_payload_size;
 
 	pr_debug("%s\n", __func__);
 
@@ -409,12 +418,19 @@
 		 */
 		if (count == (sizeof(struct voice_svc_write_msg) +
 			      sizeof(struct voice_svc_register))) {
-			ret = process_reg_cmd(
-			(struct voice_svc_register *)data->payload, prtd);
+			register_data =
+				(struct voice_svc_register *)data->payload;
+			if (register_data == NULL) {
+				pr_err("%s: register data is NULL", __func__);
+				ret = -EINVAL;
+				goto done;
+			}
+			ret = process_reg_cmd(register_data, prtd);
 			if (!ret)
 				ret = count;
 		} else {
-			pr_err("%s: invalid payload size\n", __func__);
+			pr_err("%s: invalid data payload size for register command\n",
+				__func__);
 			ret = -EINVAL;
 			goto done;
 		}
@@ -423,19 +439,40 @@
 		/*
 		 * Check that count reflects the expected size to ensure
 		 * sufficient memory was allocated. Since voice_svc_cmd_request
-		 * has a variable size, check the minimum value count must be.
+		 * has a variable size, check the minimum value count must be to
+		 * parse the message request then check the minimum size to hold
+		 * the payload of the message request.
 		 */
 		if (count >= (sizeof(struct voice_svc_write_msg) +
 			      sizeof(struct voice_svc_cmd_request))) {
-		ret = voice_svc_send_req(
-			(struct voice_svc_cmd_request *)data->payload, prtd);
-		if (!ret)
-			ret = count;
-	} else {
-		pr_err("%s: invalid payload size\n", __func__);
-		ret = -EINVAL;
-		goto done;
-	}
+			request_data =
+				(struct voice_svc_cmd_request *)data->payload;
+			if (request_data == NULL) {
+				pr_err("%s: request data is NULL", __func__);
+				ret = -EINVAL;
+				goto done;
+			}
+
+			request_payload_size = request_data->payload_size;
+
+			if (count >= (sizeof(struct voice_svc_write_msg) +
+				      sizeof(struct voice_svc_cmd_request) +
+				      request_payload_size)) {
+				ret = voice_svc_send_req(request_data, prtd);
+				if (!ret)
+					ret = count;
+			} else {
+				pr_err("%s: invalid request payload size\n",
+					__func__);
+				ret = -EINVAL;
+				goto done;
+			}
+		} else {
+			pr_err("%s: invalid data payload size for request command\n",
+				__func__);
+			ret = -EINVAL;
+			goto done;
+		}
 		break;
 	default:
 		pr_debug("%s: Invalid command: %u\n", __func__, cmd);
@@ -466,6 +503,7 @@
 		goto done;
 	}
 
+	mutex_lock(&prtd->response_mutex_lock);
 	spin_lock_irqsave(&prtd->response_lock, spin_flags);
 
 	if (list_empty(&prtd->response_queue)) {
@@ -479,7 +517,7 @@
 			pr_debug("%s: Read timeout\n", __func__);
 
 			ret = -ETIMEDOUT;
-			goto done;
+			goto unlock;
 		} else if (ret > 0 && !list_empty(&prtd->response_queue)) {
 			pr_debug("%s: Interrupt received for response\n",
 				 __func__);
@@ -487,7 +525,7 @@
 			pr_debug("%s: Interrupted by SIGNAL %d\n",
 				 __func__, ret);
 
-			goto done;
+			goto unlock;
 		}
 
 		spin_lock_irqsave(&prtd->response_lock, spin_flags);
@@ -506,7 +544,7 @@
 		       __func__, count, size);
 
 		ret = -ENOMEM;
-		goto done;
+		goto unlock;
 	}
 
 	if (!access_ok(VERIFY_WRITE, arg, size)) {
@@ -514,7 +552,7 @@
 		       __func__);
 
 		ret = -EPERM;
-		goto done;
+		goto unlock;
 	}
 
 	ret = copy_to_user(arg, &resp->resp,
@@ -524,7 +562,7 @@
 		pr_err("%s: copy_to_user failed %d\n", __func__, ret);
 
 		ret = -EPERM;
-		goto done;
+		goto unlock;
 	}
 
 	spin_lock_irqsave(&prtd->response_lock, spin_flags);
@@ -538,6 +576,8 @@
 
 	ret = count;
 
+unlock:
+	mutex_unlock(&prtd->response_mutex_lock);
 done:
 	return ret;
 }
@@ -591,6 +631,7 @@
 	INIT_LIST_HEAD(&prtd->response_queue);
 	init_waitqueue_head(&prtd->response_wait);
 	spin_lock_init(&prtd->response_lock);
+	mutex_init(&prtd->response_mutex_lock);
 	file->private_data = (void *)prtd;
 
 	/* Current APR implementation doesn't support session based
@@ -641,6 +682,7 @@
 			pr_err("%s: Failed to dereg MVM %d\n", __func__, ret);
 	}
 
+	mutex_lock(&prtd->response_mutex_lock);
 	spin_lock_irqsave(&prtd->response_lock, spin_flags);
 
 	while (!list_empty(&prtd->response_queue)) {
@@ -654,6 +696,9 @@
 	}
 
 	spin_unlock_irqrestore(&prtd->response_lock, spin_flags);
+	mutex_unlock(&prtd->response_mutex_lock);
+
+	mutex_destroy(&prtd->response_mutex_lock);
 
 	kfree(file->private_data);
 	file->private_data = NULL;
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index 9c3f9431..0b952a4 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -101,6 +101,7 @@
 		.ei_array	= NULL,
 	},
 };
+EXPORT_SYMBOL(qmi_response_type_v01_ei);
 
 struct elem_info qmi_error_resp_type_v01_ei[] = {
 	{
diff --git a/drivers/soc/qcom/qpnp-pbs.c b/drivers/soc/qcom/qpnp-pbs.c
new file mode 100644
index 0000000..287c8a2
--- /dev/null
+++ b/drivers/soc/qcom/qpnp-pbs.c
@@ -0,0 +1,361 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"PBS: %s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/qpnp/qpnp-pbs.h>
+
+#define QPNP_PBS_DEV_NAME "qcom,qpnp-pbs"
+
+#define PBS_CLIENT_TRIG_CTL		0x42
+#define PBS_CLIENT_SW_TRIG_BIT		BIT(7)
+#define PBS_CLIENT_SCRATCH1		0x50
+#define PBS_CLIENT_SCRATCH2		0x51
+
+static LIST_HEAD(pbs_dev_list);
+static DEFINE_MUTEX(pbs_list_lock);
+
+struct qpnp_pbs {
+	struct platform_device	*pdev;
+	struct device		*dev;
+	struct device_node	*dev_node;
+	struct regmap		*regmap;
+	struct mutex		pbs_lock;
+	struct list_head	link;
+
+	u32			base;
+};
+
+static int qpnp_pbs_read(struct qpnp_pbs *pbs, u32 address,
+					u8 *val, int count)
+{
+	int rc = 0;
+	struct platform_device *pdev = pbs->pdev;
+
+	rc = regmap_bulk_read(pbs->regmap, address, val, count);
+	if (rc)
+		pr_err("Failed to read address=0x%02x sid=0x%02x rc=%d\n",
+			address, to_spmi_device(pdev->dev.parent)->usid, rc);
+
+	return rc;
+}
+
+static int qpnp_pbs_write(struct qpnp_pbs *pbs, u16 address,
+					u8 *val, int count)
+{
+	int rc = 0;
+	struct platform_device *pdev = pbs->pdev;
+
+	rc = regmap_bulk_write(pbs->regmap, address, val, count);
+	if (rc < 0)
+		pr_err("Failed to write address =0x%02x sid=0x%02x rc=%d\n",
+			  address, to_spmi_device(pdev->dev.parent)->usid, rc);
+	else
+		pr_debug("Wrote 0x%02X to addr 0x%04x\n", *val, address);
+
+	return rc;
+}
+
+static int qpnp_pbs_masked_write(struct qpnp_pbs *pbs, u16 address,
+						   u8 mask, u8 val)
+{
+	int rc;
+
+	rc = regmap_update_bits(pbs->regmap, address, mask, val);
+	if (rc < 0)
+		pr_err("Failed to write address 0x%04X, rc = %d\n",
+					address, rc);
+	else
+		pr_debug("Wrote 0x%02X to addr 0x%04X\n",
+			val, address);
+
+	return rc;
+}
+
+static struct qpnp_pbs *get_pbs_client_node(struct device_node *dev_node)
+{
+	struct qpnp_pbs *pbs;
+
+	mutex_lock(&pbs_list_lock);
+	list_for_each_entry(pbs, &pbs_dev_list, link) {
+		if (dev_node == pbs->dev_node) {
+			mutex_unlock(&pbs_list_lock);
+			return pbs;
+		}
+	}
+
+	mutex_unlock(&pbs_list_lock);
+	return ERR_PTR(-EINVAL);
+}
+
+static int qpnp_pbs_wait_for_ack(struct qpnp_pbs *pbs, u8 bit_pos)
+{
+	int rc = 0;
+	u16 retries = 2000, dly = 1000;
+	u8 val;
+
+	while (retries--) {
+		rc = qpnp_pbs_read(pbs, pbs->base +
+					PBS_CLIENT_SCRATCH2, &val, 1);
+		if (rc < 0) {
+			pr_err("Failed to read register %x rc = %d\n",
+						PBS_CLIENT_SCRATCH2, rc);
+			return rc;
+		}
+
+		if (val == 0xFF) {
+			/* PBS error - clear SCRATCH2 register */
+			rc = qpnp_pbs_write(pbs, pbs->base +
+					PBS_CLIENT_SCRATCH2, 0, 1);
+			if (rc < 0) {
+				pr_err("Failed to clear register %x rc=%d\n",
+						PBS_CLIENT_SCRATCH2, rc);
+				return rc;
+			}
+
+			pr_err("NACK from PBS for bit %d\n", bit_pos);
+			return -EINVAL;
+		}
+
+		if (val & BIT(bit_pos)) {
+			pr_debug("PBS sequence for bit %d executed!\n",
+						 bit_pos);
+			break;
+		}
+
+		usleep_range(dly, dly + 100);
+	}
+
+	if (!retries) {
+		pr_err("Timeout for PBS ACK/NACK for bit %d\n", bit_pos);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+/**
+ * qpnp_pbs_trigger_event - Trigger the PBS RAM sequence
+ *
+ * Returns = 0 If the PBS RAM sequence executed successfully.
+ *
+ * Returns < 0 for errors.
+ *
+ * This function is used to trigger the PBS RAM sequence to be
+ * executed by the client driver.
+ *
+ * The PBS trigger sequence involves
+ * 1. setting the PBS sequence bit in PBS_CLIENT_SCRATCH1
+ * 2. Initiating the SW PBS trigger
+ * 3. Checking the equivalent bit in PBS_CLIENT_SCRATCH2 for the
+ *    completion of the sequence.
+ * 4. If PBS_CLIENT_SCRATCH2 == 0xFF, the PBS sequence failed to execute
+ */
+int qpnp_pbs_trigger_event(struct device_node *dev_node, u8 bitmap)
+{
+	struct qpnp_pbs *pbs;
+	int rc = 0;
+	u16 bit_pos = 0;
+	u8 val, mask  = 0;
+
+	if (!dev_node)
+		return -EINVAL;
+
+	if (!bitmap) {
+		pr_err("Invalid bitmap passed by client\n");
+		return -EINVAL;
+	}
+
+	pbs = get_pbs_client_node(dev_node);
+	if (IS_ERR_OR_NULL(pbs)) {
+		pr_err("Unable to find the PBS dev_node\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&pbs->pbs_lock);
+	rc = qpnp_pbs_read(pbs, pbs->base + PBS_CLIENT_SCRATCH2, &val, 1);
+	if (rc < 0) {
+		pr_err("read register %x failed rc = %d\n",
+					PBS_CLIENT_SCRATCH2, rc);
+		goto out;
+	}
+
+	if (val == 0xFF) {
+		/* PBS error - clear SCRATCH2 register */
+		rc = qpnp_pbs_write(pbs, pbs->base + PBS_CLIENT_SCRATCH2, 0, 1);
+		if (rc < 0) {
+			pr_err("Failed to clear register %x rc=%d\n",
+						PBS_CLIENT_SCRATCH2, rc);
+			goto out;
+		}
+	}
+
+	for (bit_pos = 0; bit_pos < 8; bit_pos++) {
+		if (bitmap & BIT(bit_pos)) {
+			/*
+			 * Clear the PBS sequence bit position in
+			 * PBS_CLIENT_SCRATCH2 mask register.
+			 */
+			rc = qpnp_pbs_masked_write(pbs, pbs->base +
+					 PBS_CLIENT_SCRATCH2, BIT(bit_pos), 0);
+			if (rc < 0) {
+				pr_err("Failed to clear %x reg bit rc=%d\n",
+						PBS_CLIENT_SCRATCH2, rc);
+				goto error;
+			}
+
+			/*
+			 * Set the PBS sequence bit position in
+			 * PBS_CLIENT_SCRATCH1 register.
+			 */
+			val = mask = BIT(bit_pos);
+			rc = qpnp_pbs_masked_write(pbs, pbs->base +
+						PBS_CLIENT_SCRATCH1, mask, val);
+			if (rc < 0) {
+				pr_err("Failed to set %x reg bit rc=%d\n",
+						PBS_CLIENT_SCRATCH1, rc);
+				goto error;
+			}
+
+			/* Initiate the SW trigger */
+			val = mask = PBS_CLIENT_SW_TRIG_BIT;
+			rc = qpnp_pbs_masked_write(pbs, pbs->base +
+						PBS_CLIENT_TRIG_CTL, mask, val);
+			if (rc < 0) {
+				pr_err("Failed to write register %x rc=%d\n",
+						PBS_CLIENT_TRIG_CTL, rc);
+				goto error;
+			}
+
+			rc = qpnp_pbs_wait_for_ack(pbs, bit_pos);
+			if (rc < 0) {
+				pr_err("Error during wait_for_ack\n");
+				goto error;
+			}
+
+			/*
+			 * Clear the PBS sequence bit position in
+			 * PBS_CLIENT_SCRATCH1 register.
+			 */
+			rc = qpnp_pbs_masked_write(pbs, pbs->base +
+					PBS_CLIENT_SCRATCH1, BIT(bit_pos), 0);
+			if (rc < 0) {
+				pr_err("Failed to clear %x reg bit rc=%d\n",
+						PBS_CLIENT_SCRATCH1, rc);
+				goto error;
+			}
+
+			/*
+			 * Clear the PBS sequence bit position in
+			 * PBS_CLIENT_SCRATCH2 mask register.
+			 */
+			rc = qpnp_pbs_masked_write(pbs, pbs->base +
+					PBS_CLIENT_SCRATCH2, BIT(bit_pos), 0);
+			if (rc < 0) {
+				pr_err("Failed to clear %x reg bit rc=%d\n",
+						PBS_CLIENT_SCRATCH2, rc);
+				goto error;
+			}
+
+		}
+	}
+
+error:
+	/* Clear all the requested bitmap */
+	rc = qpnp_pbs_masked_write(pbs, pbs->base + PBS_CLIENT_SCRATCH1,
+						bitmap, 0);
+	if (rc < 0)
+		pr_err("Failed to clear %x reg bit rc=%d\n",
+					PBS_CLIENT_SCRATCH1, rc);
+out:
+	mutex_unlock(&pbs->pbs_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_pbs_trigger_event);
+
+static int qpnp_pbs_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	u32 val = 0;
+	struct qpnp_pbs *pbs;
+
+	pbs = devm_kzalloc(&pdev->dev, sizeof(*pbs), GFP_KERNEL);
+	if (!pbs)
+		return -ENOMEM;
+
+	pbs->pdev = pdev;
+	pbs->dev = &pdev->dev;
+	pbs->dev_node = pdev->dev.of_node;
+	pbs->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!pbs->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node, "reg", &val);
+	if (rc < 0) {
+		dev_err(&pdev->dev,
+			"Couldn't find reg in node = %s rc = %d\n",
+			pdev->dev.of_node->full_name, rc);
+		return rc;
+	}
+
+	pbs->base = val;
+	mutex_init(&pbs->pbs_lock);
+
+	dev_set_drvdata(&pdev->dev, pbs);
+
+	mutex_lock(&pbs_list_lock);
+	list_add(&pbs->link, &pbs_dev_list);
+	mutex_unlock(&pbs_list_lock);
+
+	return 0;
+}
+
+static const struct of_device_id qpnp_pbs_match_table[] = {
+	{ .compatible = QPNP_PBS_DEV_NAME },
+	{}
+};
+
+static struct platform_driver qpnp_pbs_driver = {
+	.driver	= {
+		.name		= QPNP_PBS_DEV_NAME,
+		.owner		= THIS_MODULE,
+		.of_match_table	= qpnp_pbs_match_table,
+	},
+	.probe	= qpnp_pbs_probe,
+};
+
+static int __init qpnp_pbs_init(void)
+{
+	return platform_driver_register(&qpnp_pbs_driver);
+}
+arch_initcall(qpnp_pbs_init);
+
+static void __exit qpnp_pbs_exit(void)
+{
+	return platform_driver_unregister(&qpnp_pbs_driver);
+}
+module_exit(qpnp_pbs_exit);
+
+MODULE_DESCRIPTION("QPNP PBS DRIVER");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" QPNP_PBS_DEV_NAME);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 996ce64..aeecf29 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -454,7 +454,7 @@
  * @n: The array of count of elements in each batch, 0 terminated.
  *
  * Write a request to the mailbox controller without caching. If the request
- * state is ACTIVE_ONLY, then the requests are treated as completion requests
+ * state is ACTIVE or AWAKE, then the requests are treated as completion request
  * and sent to the controller immediately. The function waits until all the
  * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
  * request is sent as fire-n-forget and no ack is expected.
@@ -468,7 +468,8 @@
 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
 	atomic_t wait_count = ATOMIC_INIT(0); /* overwritten */
 	int count = 0;
-	int ret, i = 0;
+	int ret, i, j, k;
+	bool complete_set;
 
 	if (rpmh_standalone)
 		return 0;
@@ -479,6 +480,27 @@
 	if (count >= RPMH_MAX_REQ_IN_BATCH)
 		return -EINVAL;
 
+	if (state == RPMH_ACTIVE_ONLY_STATE || state == RPMH_AWAKE_STATE) {
+		/*
+		 * Ensure the 'complete' bit is set for atleast one command in
+		 * each set for active/awake requests.
+		 */
+		for (i = 0, k = 0; i < count; i++, k += n[i]) {
+			complete_set = false;
+			for (j = 0; j < n[i]; j++) {
+				if (cmd[k + j].complete) {
+					complete_set = true;
+					break;
+				}
+			}
+			if (!complete_set) {
+				dev_err(rc->dev, "No completion set for batch");
+				return -EINVAL;
+			}
+		}
+	}
+
+	/* Create async request batches */
 	for (i = 0; i < count; i++) {
 		rpm_msg[i] = __get_rpmh_msg_async(rc, state, cmd, n[i], false);
 		if (IS_ERR_OR_NULL(rpm_msg[i]))
@@ -488,11 +510,11 @@
 		cmd += n[i];
 	}
 
-	if (state == RPMH_ACTIVE_ONLY_STATE) {
+	/* Send if Active or Awake and wait for the whole set to complete */
+	if (state == RPMH_ACTIVE_ONLY_STATE || state == RPMH_AWAKE_STATE) {
 		might_sleep();
 		atomic_set(&wait_count, count);
 		for (i = 0; i < count; i++) {
-			rpm_msg[i]->msg.is_complete = true;
 			/* Bypass caching and write to mailbox directly */
 			ret = mbox_send_message(rc->chan, &rpm_msg[i]->msg);
 			if (ret < 0)
@@ -501,6 +523,7 @@
 		return wait_event_interruptible(waitq,
 					atomic_read(&wait_count) == 0);
 	} else {
+		/* Send Sleep requests to the controller, expect no response */
 		for (i = 0; i < count; i++) {
 			ret = mbox_send_controller_data(rc->chan,
 						&rpm_msg[i]->msg);
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index b2627f2..f1e7347 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -365,28 +365,19 @@
 			int source_nelems, int *dest_vmids,
 			int *dest_perms, int dest_nelems)
 {
-	struct sg_table *table;
+	struct sg_table table;
 	int ret;
 
-	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
-	if (!table)
-		return -ENOMEM;
-	ret = sg_alloc_table(table, 1, GFP_KERNEL);
+	ret = sg_alloc_table(&table, 1, GFP_KERNEL);
 	if (ret)
-		goto err1;
+		return ret;
 
-	sg_set_page(table->sgl, phys_to_page(addr), size, 0);
+	sg_set_page(table.sgl, phys_to_page(addr), size, 0);
 
-	ret = hyp_assign_table(table, source_vm_list, source_nelems, dest_vmids,
-						dest_perms, dest_nelems);
-	if (ret)
-		goto err2;
+	ret = hyp_assign_table(&table, source_vm_list, source_nelems,
+			       dest_vmids, dest_perms, dest_nelems);
 
-	return ret;
-err2:
-	sg_free_table(table);
-err1:
-	kfree(table);
+	sg_free_table(&table);
 	return ret;
 }
 
diff --git a/drivers/soc/qcom/service-locator.c b/drivers/soc/qcom/service-locator.c
index 2f578c5..b40d678 100644
--- a/drivers/soc/qcom/service-locator.c
+++ b/drivers/soc/qcom/service-locator.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -111,6 +111,7 @@
 			qmi_handle_create(service_locator_clnt_notify, NULL);
 	if (!service_locator.clnt_handle) {
 		service_locator.clnt_handle = NULL;
+		complete_all(&service_locator.service_available);
 		mutex_unlock(&service_locator.service_mutex);
 		pr_err("Service locator QMI client handle alloc failed!\n");
 		return;
@@ -123,6 +124,7 @@
 	if (rc) {
 		qmi_handle_destroy(service_locator.clnt_handle);
 		service_locator.clnt_handle = NULL;
+		complete_all(&service_locator.service_available);
 		mutex_unlock(&service_locator.service_mutex);
 		pr_err("Unable to connnect to service rc:%d\n", rc);
 		return;
@@ -138,6 +140,7 @@
 	mutex_lock(&service_locator.service_mutex);
 	qmi_handle_destroy(service_locator.clnt_handle);
 	service_locator.clnt_handle = NULL;
+	complete_all(&service_locator.service_available);
 	mutex_unlock(&service_locator.service_mutex);
 	pr_info("Connection with service locator lost\n");
 }
@@ -263,10 +266,12 @@
 		if (!domains_read) {
 			db_rev_count = pd->db_rev_count = resp->db_rev_count;
 			pd->total_domains = resp->total_domains;
-			if (!pd->total_domains && resp->domain_list_len) {
-				pr_err("total domains not set\n");
-				pd->total_domains = resp->domain_list_len;
+			if (!resp->total_domains) {
+				pr_err("No matching domains found\n");
+				rc = -EIO;
+				goto out;
 			}
+
 			pd->domain_list = kmalloc(
 					sizeof(struct servreg_loc_entry_v01) *
 					resp->total_domains, GFP_KERNEL);
@@ -283,6 +288,10 @@
 			rc = -EAGAIN;
 			goto out;
 		}
+		if (resp->domain_list_len >  resp->total_domains) {
+			/* Always read total_domains from the response msg */
+			resp->domain_list_len = resp->total_domains;
+		}
 		/* Copy the response*/
 		store_get_domain_list_response(pd, resp, domains_read);
 		domains_read += resp->domain_list_len;
@@ -369,6 +378,7 @@
 	if (!pqw) {
 		rc = -ENOMEM;
 		pr_err("Allocation failed\n");
+		kfree(pqcd);
 		goto err;
 	}
 	pqw->notifier = locator_nb;
diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c
index 9e2416c..fca1c68 100644
--- a/drivers/soc/qcom/service-notifier.c
+++ b/drivers/soc/qcom/service-notifier.c
@@ -99,6 +99,7 @@
  */
 struct qmi_client_info {
 	int instance_id;
+	enum pd_subsys_state subsys_state;
 	struct work_struct svc_arrive;
 	struct work_struct svc_exit;
 	struct work_struct svc_rcv_msg;
@@ -436,7 +437,7 @@
 {
 	struct qmi_client_info *data = container_of(work,
 					struct qmi_client_info, svc_exit);
-	root_service_service_exit(data, ROOT_PD_DOWN);
+	root_service_service_exit(data, data->subsys_state);
 }
 
 static int service_event_notify(struct notifier_block *this,
@@ -453,6 +454,7 @@
 		break;
 	case QMI_SERVER_EXIT:
 		pr_debug("Root PD service DOWN\n");
+		data->subsys_state = ROOT_PD_DOWN;
 		queue_work(data->svc_event_wq, &data->svc_exit);
 		break;
 	default:
@@ -468,7 +470,6 @@
 	struct qmi_client_info *info = container_of(this,
 					struct qmi_client_info, ssr_notifier);
 	struct notif_data *notif = data;
-	enum pd_subsys_state state;
 
 	switch (code) {
 	case	SUBSYS_BEFORE_SHUTDOWN:
@@ -476,16 +477,16 @@
 						notif->crashed);
 		switch (notif->crashed) {
 		case CRASH_STATUS_ERR_FATAL:
-			state = ROOT_PD_ERR_FATAL;
+			info->subsys_state = ROOT_PD_ERR_FATAL;
 			break;
 		case CRASH_STATUS_WDOG_BITE:
-			state = ROOT_PD_WDOG_BITE;
+			info->subsys_state = ROOT_PD_WDOG_BITE;
 			break;
 		default:
-			state = ROOT_PD_SHUTDOWN;
+			info->subsys_state = ROOT_PD_SHUTDOWN;
 			break;
 		}
-		root_service_service_exit(info, state);
+		queue_work(info->svc_event_wq, &info->svc_exit);
 		break;
 	default:
 		break;
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index 0063ae1..b5e3814 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -1035,6 +1035,7 @@
 	d->desc.ops = &pil_ops_trusted;
 
 	d->desc.proxy_timeout = PROXY_TIMEOUT_MS;
+	d->desc.clear_fw_region = true;
 
 	rc = of_property_read_u32(pdev->dev.of_node, "qcom,proxy-timeout-ms",
 					&proxy_timeout);
@@ -1053,7 +1054,7 @@
 									rc);
 			return rc;
 		}
-		scm_pas_init(MSM_BUS_MASTER_CRYPTO_CORE0);
+		scm_pas_init(MSM_BUS_MASTER_CRYPTO_CORE_0);
 	}
 
 	rc = pil_desc_init(&d->desc);
diff --git a/drivers/soc/qcom/sysmon-qmi.c b/drivers/soc/qcom/sysmon-qmi.c
index a087ad6..f4c7779 100644
--- a/drivers/soc/qcom/sysmon-qmi.c
+++ b/drivers/soc/qcom/sysmon-qmi.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -153,10 +153,12 @@
 	struct sysmon_qmi_data *data = container_of(work,
 					struct sysmon_qmi_data, svc_arrive);
 
+	mutex_lock(&sysmon_lock);
 	/* Create a Local client port for QMI communication */
 	data->clnt_handle = qmi_handle_create(sysmon_clnt_notify, work);
 	if (!data->clnt_handle) {
 		pr_err("QMI client handle alloc failed for %s\n", data->name);
+		mutex_unlock(&sysmon_lock);
 		return;
 	}
 
@@ -167,6 +169,7 @@
 								data->name);
 		qmi_handle_destroy(data->clnt_handle);
 		data->clnt_handle = NULL;
+		mutex_unlock(&sysmon_lock);
 		return;
 	}
 	pr_info("Connection established between QMI handle and %s's SSCTL service\n"
@@ -177,6 +180,7 @@
 	if (rc < 0)
 		pr_warn("%s: Could not register the indication callback\n",
 								data->name);
+	mutex_unlock(&sysmon_lock);
 }
 
 static void sysmon_clnt_svc_exit(struct work_struct *work)
@@ -184,8 +188,10 @@
 	struct sysmon_qmi_data *data = container_of(work,
 					struct sysmon_qmi_data, svc_exit);
 
+	mutex_lock(&sysmon_lock);
 	qmi_handle_destroy(data->clnt_handle);
 	data->clnt_handle = NULL;
+	mutex_unlock(&sysmon_lock);
 }
 
 static void sysmon_clnt_recv_msg(struct work_struct *work)
diff --git a/drivers/soc/qcom/wcd-dsp-glink.c b/drivers/soc/qcom/wcd-dsp-glink.c
index 758f627..034ddd3 100644
--- a/drivers/soc/qcom/wcd-dsp-glink.c
+++ b/drivers/soc/qcom/wcd-dsp-glink.c
@@ -25,8 +25,10 @@
 #include "sound/wcd-dsp-glink.h"
 
 #define WDSP_GLINK_DRIVER_NAME "wcd-dsp-glink"
-#define WDSP_MAX_WRITE_SIZE (512 * 1024)
+#define WDSP_MAX_WRITE_SIZE (256 * 1024)
 #define WDSP_MAX_READ_SIZE (4 * 1024)
+#define WDSP_MAX_NO_OF_INTENTS (20)
+#define WDSP_MAX_NO_OF_CHANNELS (10)
 
 #define MINOR_NUMBER_COUNT 1
 #define WDSP_EDGE "wdsp"
@@ -532,15 +534,30 @@
 	payload = (u8 *)pkt->payload;
 	no_of_channels = pkt->no_of_channels;
 
+	if (no_of_channels > WDSP_MAX_NO_OF_CHANNELS) {
+		dev_info(wpriv->dev, "%s: no_of_channels = %d are limited to %d\n",
+			 __func__, no_of_channels, WDSP_MAX_NO_OF_CHANNELS);
+		no_of_channels = WDSP_MAX_NO_OF_CHANNELS;
+	}
 	ch = kcalloc(no_of_channels, sizeof(struct wdsp_glink_ch *),
 		     GFP_KERNEL);
 	if (!ch) {
 		ret = -ENOMEM;
 		goto done;
 	}
+	wpriv->ch = ch;
+	wpriv->no_of_channels = no_of_channels;
 
 	for (i = 0; i < no_of_channels; i++) {
 		ch_cfg = (struct wdsp_glink_ch_cfg *)payload;
+
+		if (ch_cfg->no_of_intents > WDSP_MAX_NO_OF_INTENTS) {
+			dev_err(wpriv->dev, "%s: Invalid no_of_intents = %d\n",
+				__func__, ch_cfg->no_of_intents);
+			ret = -EINVAL;
+			goto err_ch_mem;
+		}
+
 		ch_cfg_size = sizeof(struct wdsp_glink_ch_cfg) +
 					(sizeof(u32) * ch_cfg->no_of_intents);
 		ch_size = sizeof(struct wdsp_glink_ch) +
@@ -564,8 +581,6 @@
 		INIT_WORK(&ch[i]->lcl_ch_cls_wrk, wdsp_glink_lcl_ch_cls_wrk);
 		init_waitqueue_head(&ch[i]->ch_connect_wait);
 	}
-	wpriv->ch = ch;
-	wpriv->no_of_channels = no_of_channels;
 
 	INIT_WORK(&wpriv->ch_open_cls_wrk, wdsp_glink_ch_open_cls_wrk);
 
@@ -746,15 +761,17 @@
 		goto done;
 	}
 
-	dev_dbg(wpriv->dev, "%s: count = %zd\n", __func__, count);
-
-	if (count > WDSP_MAX_WRITE_SIZE) {
-		dev_info(wpriv->dev, "%s: count = %zd is more than WDSP_MAX_WRITE_SIZE\n",
+	if ((count < sizeof(struct wdsp_write_pkt)) ||
+	    (count > WDSP_MAX_WRITE_SIZE)) {
+		dev_err(wpriv->dev, "%s: Invalid count = %zd\n",
 			__func__, count);
-		count = WDSP_MAX_WRITE_SIZE;
+		ret = -EINVAL;
+		goto done;
 	}
 
-	tx_buf_size = count + sizeof(struct wdsp_glink_tx_buf);
+	dev_dbg(wpriv->dev, "%s: count = %zd\n", __func__, count);
+
+	tx_buf_size = WDSP_MAX_WRITE_SIZE + sizeof(struct wdsp_glink_tx_buf);
 	tx_buf = kzalloc(tx_buf_size, GFP_KERNEL);
 	if (!tx_buf) {
 		ret = -ENOMEM;
@@ -772,6 +789,13 @@
 	wpkt = (struct wdsp_write_pkt *)tx_buf->buf;
 	switch (wpkt->pkt_type) {
 	case WDSP_REG_PKT:
+		if (count <= (sizeof(struct wdsp_write_pkt) +
+			      sizeof(struct wdsp_reg_pkt))) {
+			dev_err(wpriv->dev, "%s: Invalid reg pkt size = %zd\n",
+				__func__, count);
+			ret = -EINVAL;
+			goto free_buf;
+		}
 		ret = wdsp_glink_ch_info_init(wpriv,
 					(struct wdsp_reg_pkt *)wpkt->payload);
 		if (ret < 0)
@@ -794,6 +818,13 @@
 		kfree(tx_buf);
 		break;
 	case WDSP_CMD_PKT:
+		if (count <= (sizeof(struct wdsp_write_pkt) +
+			      sizeof(struct wdsp_cmd_pkt))) {
+			dev_err(wpriv->dev, "%s: Invalid cmd pkt size = %zd\n",
+				__func__, count);
+			ret = -EINVAL;
+			goto free_buf;
+		}
 		mutex_lock(&wpriv->glink_mutex);
 		if (wpriv->glink_state.link_state == GLINK_LINK_STATE_DOWN) {
 			mutex_unlock(&wpriv->glink_mutex);
diff --git a/drivers/soundwire/swr-wcd-ctrl.c b/drivers/soundwire/swr-wcd-ctrl.c
index ea886c7..7e33e8b 100644
--- a/drivers/soundwire/swr-wcd-ctrl.c
+++ b/drivers/soundwire/swr-wcd-ctrl.c
@@ -64,6 +64,8 @@
 	{6, 10, 7800},		/* UC15: 2*(Spkr + SB + VI) */
 	{2, 3, 3600},		/* UC16: Spkr + VI */
 	{4, 6, 7200},		/* UC17: 2*(Spkr + VI) */
+	{3, 7, 4200},		/* UC18: Spkr + Comp + VI */
+	{6, 14, 8400},		/* UC19: 2*(Spkr + Comp + VI) */
 };
 #define MAX_USECASE	ARRAY_SIZE(uc)
 
@@ -178,6 +180,21 @@
 		{7, 6, 0},
 		{15, 10, 0},
 	},
+	/* UC 18 */
+	{
+		{7, 1, 0},
+		{31, 2, 0},
+		{15, 7, 0},
+	},
+	/* UC 19 */
+	{
+		{7, 1, 0},
+		{31, 2, 0},
+		{15, 7, 0},
+		{7, 6, 0},
+		{31, 18, 0},
+		{15, 10, 0},
+	},
 };
 
 enum {
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 4c86197..403c799 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -119,30 +119,30 @@
 	int div = 0;
 	int idx;
 	struct se_geni_rsc *rsc = &mas->spi_rsc;
-	int ret = 0;
 	u32 clk_sel = geni_read_reg(mas->base, SE_GENI_CLK_SEL);
 	u32 m_clk_cfg = geni_read_reg(mas->base, GENI_SER_M_CLK_CFG);
+	int ret;
 
 	clk_sel &= ~CLK_SEL_MSK;
 	m_clk_cfg &= ~CLK_DIV_MSK;
 
 	idx = get_sclk(speed_hz, &sclk_freq);
-	if (idx < 0) {
-		ret = -EINVAL;
-		goto spi_clk_cfg_exit;
-	}
-	div = (sclk_freq / (SPI_OVERSAMPLING / speed_hz));
+	if (idx < 0)
+		return -EINVAL;
+
+	div = ((sclk_freq / SPI_OVERSAMPLING) / speed_hz);
+	if (!div)
+		return -EINVAL;
 
 	clk_sel |= (idx & CLK_SEL_MSK);
 	m_clk_cfg |= ((div << CLK_DIV_SHFT) | SER_CLK_EN);
 	ret = clk_set_rate(rsc->se_clk, sclk_freq);
 	if (ret)
-		goto spi_clk_cfg_exit;
+		return ret;
 
 	geni_write_reg(clk_sel, mas->base, SE_GENI_CLK_SEL);
 	geni_write_reg(m_clk_cfg, mas->base, GENI_SER_M_CLK_CFG);
-spi_clk_cfg_exit:
-	return ret;
+	return 0;
 }
 
 static void spi_setup_word_len(struct spi_geni_master *mas, u32 mode,
@@ -195,7 +195,8 @@
 
 	ret = do_spi_clk_cfg(mas->cur_speed_hz, mas);
 	if (ret) {
-		dev_err(&spi_mas->dev, "Err setting clks ret %d\n", ret);
+		dev_err(&spi_mas->dev, "Err setting clks ret(%d) for %d\n",
+							ret, mas->cur_speed_hz);
 		goto prepare_message_exit;
 	}
 	spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 9125d93..ef1c8c1 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -154,7 +154,7 @@
 
 	buf = kzalloc(12, GFP_KERNEL);
 	if (!buf)
-		return;
+		goto out_free;
 
 	memset(cdb, 0, MAX_COMMAND_SIZE);
 	cdb[0] = MODE_SENSE;
@@ -169,9 +169,10 @@
 	 * If MODE_SENSE still returns zero, set the default value to 1024.
 	 */
 	sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
+out_free:
 	if (!sdev->sector_size)
 		sdev->sector_size = 1024;
-out_free:
+
 	kfree(buf);
 }
 
@@ -314,9 +315,10 @@
 				sd->lun, sd->queue_depth);
 	}
 
-	dev->dev_attrib.hw_block_size = sd->sector_size;
+	dev->dev_attrib.hw_block_size =
+		min_not_zero((int)sd->sector_size, 512);
 	dev->dev_attrib.hw_max_sectors =
-		min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
+		min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
 	dev->dev_attrib.hw_queue_depth = sd->queue_depth;
 
 	/*
@@ -339,8 +341,10 @@
 	/*
 	 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
 	 */
-	if (sd->type == TYPE_TAPE)
+	if (sd->type == TYPE_TAPE) {
 		pscsi_tape_read_blocksize(dev, sd);
+		dev->dev_attrib.hw_block_size = sd->sector_size;
+	}
 	return 0;
 }
 
@@ -406,7 +410,7 @@
 /*
  * Called with struct Scsi_Host->host_lock called.
  */
-static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
+static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
 	__releases(sh->host_lock)
 {
 	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
@@ -433,28 +437,6 @@
 	return 0;
 }
 
-/*
- * Called with struct Scsi_Host->host_lock called.
- */
-static int pscsi_create_type_other(struct se_device *dev,
-		struct scsi_device *sd)
-	__releases(sh->host_lock)
-{
-	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
-	struct Scsi_Host *sh = sd->host;
-	int ret;
-
-	spin_unlock_irq(sh->host_lock);
-	ret = pscsi_add_device_to_list(dev, sd);
-	if (ret)
-		return ret;
-
-	pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
-		phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
-		sd->channel, sd->id, sd->lun);
-	return 0;
-}
-
 static int pscsi_configure_device(struct se_device *dev)
 {
 	struct se_hba *hba = dev->se_hba;
@@ -542,11 +524,8 @@
 		case TYPE_DISK:
 			ret = pscsi_create_type_disk(dev, sd);
 			break;
-		case TYPE_ROM:
-			ret = pscsi_create_type_rom(dev, sd);
-			break;
 		default:
-			ret = pscsi_create_type_other(dev, sd);
+			ret = pscsi_create_type_nondisk(dev, sd);
 			break;
 		}
 
@@ -611,8 +590,7 @@
 		else if (pdv->pdv_lld_host)
 			scsi_host_put(pdv->pdv_lld_host);
 
-		if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
-			scsi_device_put(sd);
+		scsi_device_put(sd);
 
 		pdv->pdv_sd = NULL;
 	}
@@ -1069,7 +1047,6 @@
 	if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
 		return pdv->pdv_bd->bd_part->nr_sects;
 
-	dump_stack();
 	return 0;
 }
 
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index aabd660..a53fb23 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1104,9 +1104,15 @@
 			return ret;
 		break;
 	case VERIFY:
+	case VERIFY_16:
 		size = 0;
-		sectors = transport_get_sectors_10(cdb);
-		cmd->t_task_lba = transport_lba_32(cdb);
+		if (cdb[0] == VERIFY) {
+			sectors = transport_get_sectors_10(cdb);
+			cmd->t_task_lba = transport_lba_32(cdb);
+		} else {
+			sectors = transport_get_sectors_16(cdb);
+			cmd->t_task_lba = transport_lba_64(cdb);
+		}
 		cmd->execute_cmd = sbc_emulate_noop;
 		goto check_lba;
 	case REZERO_UNIT:
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 2fda339..2db473a 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -118,6 +118,16 @@
 	help
 	  Enable this to let the user space manage the platform thermals.
 
+config THERMAL_GOV_LOW_LIMITS
+	bool "Low limits mitigation governor"
+	help
+	  Enable this to manage platform limits using low limits
+	  governor.
+
+	  Enable this governor to monitor and trigger floor mitigation.
+	  This governor will monitor the limits going below a
+	  trip threshold to trigger a floor mitigation.
+
 config THERMAL_GOV_POWER_ALLOCATOR
 	bool "Power allocator thermal governor"
 	help
@@ -454,6 +464,16 @@
 	  the sensor. Also able to set threshold temperature for both hot and cold
 	  and update when a threshold is reached.
 
+config THERMAL_TSENS
+	tristate "Qualcomm Technologies Inc. TSENS Temperature driver"
+	depends on THERMAL
+	help
+	  This enables the thermal sysfs driver for the TSENS device. It shows
+	  up in Sysfs as a thermal zone with multiple trip points. Also able
+	  to set threshold temperature for both warm and cool and update
+	  thermal userspace client when a threshold is reached. Warm/Cool
+	  temperature thresholds can be set independently for each sensor.
+
 menu "Qualcomm thermal drivers"
 depends on (ARCH_QCOM && OF) || COMPILE_TEST
 source "drivers/thermal/qcom/Kconfig"
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index d9489a7..2faed7f 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -14,6 +14,7 @@
 thermal_sys-$(CONFIG_THERMAL_GOV_BANG_BANG)	+= gov_bang_bang.o
 thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE)	+= step_wise.o
 thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE)	+= user_space.o
+thermal_sys-$(CONFIG_THERMAL_GOV_LOW_LIMITS) += gov_low_limits.o
 thermal_sys-$(CONFIG_THERMAL_GOV_POWER_ALLOCATOR)	+= power_allocator.o
 
 # cpufreq cooling
@@ -57,3 +58,4 @@
 obj-$(CONFIG_MTK_THERMAL)	+= mtk_thermal.o
 obj-$(CONFIG_GENERIC_ADC_THERMAL)	+= thermal-generic-adc.o
 obj-$(CONFIG_THERMAL_QPNP_ADC_TM)	+= qpnp-adc-tm.o
+obj-$(CONFIG_THERMAL_TSENS)	+= msm-tsens.o tsens2xxx.o tsens-dbg.o
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 9ce0e9e..a6245d5 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -30,6 +30,8 @@
 #include <linux/slab.h>
 #include <linux/cpu.h>
 #include <linux/cpu_cooling.h>
+#include <linux/sched.h>
+#include <linux/of_device.h>
 
 #include <trace/events/thermal.h>
 
@@ -45,6 +47,7 @@
  *	level 0 --> 1st Max Freq
  *	level 1 --> 2nd Max Freq
  *	...
+ *	leven n --> core isolated
  */
 
 /**
@@ -70,8 +73,12 @@
  *	cooling	devices.
  * @clipped_freq: integer value representing the absolute value of the clipped
  *	frequency.
- * @max_level: maximum cooling level. One less than total number of valid
- *	cpufreq frequencies.
+ * @cpufreq_floor_state: integer value representing the frequency floor state
+ *	of cpufreq cooling devices.
+ * @floor_freq: integer value representing the absolute value of the floor
+ *	frequency.
+ * @max_level: maximum cooling level. [0..max_level-1: <freq>
+ *	max_level: Core unavailable]
  * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
  * @node: list_head to link all cpufreq_cooling_device together.
  * @last_load: load measured by the latest call to cpufreq_get_requested_power()
@@ -92,6 +99,8 @@
 	struct thermal_cooling_device *cool_dev;
 	unsigned int cpufreq_state;
 	unsigned int clipped_freq;
+	unsigned int cpufreq_floor_state;
+	unsigned int floor_freq;
 	unsigned int max_level;
 	unsigned int *freq_table;	/* In descending order */
 	struct cpumask allowed_cpus;
@@ -103,6 +112,7 @@
 	int dyn_power_table_entries;
 	struct device *cpu_dev;
 	get_static_t plat_get_static_power;
+	struct cpu_cooling_ops *plat_ops;
 };
 static DEFINE_IDR(cpufreq_idr);
 static DEFINE_MUTEX(cooling_cpufreq_lock);
@@ -162,7 +172,7 @@
 {
 	unsigned long level;
 
-	for (level = 0; level <= cpufreq_dev->max_level; level++) {
+	for (level = 0; level < cpufreq_dev->max_level; level++) {
 		if (freq == cpufreq_dev->freq_table[level])
 			return level;
 
@@ -218,7 +228,7 @@
 				    unsigned long event, void *data)
 {
 	struct cpufreq_policy *policy = data;
-	unsigned long clipped_freq;
+	unsigned long clipped_freq, floor_freq;
 	struct cpufreq_cooling_device *cpufreq_dev;
 
 	if (event != CPUFREQ_ADJUST)
@@ -239,11 +249,16 @@
 		 *
 		 * But, if clipped_freq is greater than policy->max, we don't
 		 * need to do anything.
+		 *
+		 * Similarly, if policy minimum set by the user is less than
+		 * the floor_frequency, then adjust the policy->min.
 		 */
 		clipped_freq = cpufreq_dev->clipped_freq;
+		floor_freq = cpufreq_dev->floor_freq;
 
-		if (policy->max > clipped_freq)
-			cpufreq_verify_within_limits(policy, 0, clipped_freq);
+		if (policy->max > clipped_freq || policy->min < floor_freq)
+			cpufreq_verify_within_limits(policy, floor_freq,
+						     clipped_freq);
 		break;
 	}
 	mutex_unlock(&cooling_list_lock);
@@ -491,6 +506,58 @@
 }
 
 /**
+ * cpufreq_get_min_state - callback function to get the device floor state.
+ * @cdev: thermal cooling device pointer.
+ * @state: fill this variable with the cooling device floor.
+ *
+ * Callback for the thermal cooling device to return the cpufreq
+ * floor state.
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+static int cpufreq_get_min_state(struct thermal_cooling_device *cdev,
+				 unsigned long *state)
+{
+	struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+
+	*state = cpufreq_device->cpufreq_floor_state;
+
+	return 0;
+}
+
+/**
+ * cpufreq_set_min_state - callback function to set the device floor state.
+ * @cdev: thermal cooling device pointer.
+ * @state: set this variable to the current cooling state.
+ *
+ * Callback for the thermal cooling device to change the cpufreq
+ * floor state.
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+static int cpufreq_set_min_state(struct thermal_cooling_device *cdev,
+				 unsigned long state)
+{
+	struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+	unsigned int cpu = cpumask_any(&cpufreq_device->allowed_cpus);
+	unsigned int floor_freq;
+
+	if (state > cpufreq_device->max_level)
+		state = cpufreq_device->max_level;
+
+	if (cpufreq_device->cpufreq_floor_state == state)
+		return 0;
+
+	floor_freq = cpufreq_device->freq_table[state];
+	cpufreq_device->cpufreq_floor_state = state;
+	cpufreq_device->floor_freq = floor_freq;
+
+	cpufreq_update_policy(cpu);
+
+	return 0;
+}
+
+/**
  * cpufreq_get_cur_state - callback function to get the current cooling state.
  * @cdev: thermal cooling device pointer.
  * @state: fill this variable with the current cooling state.
@@ -535,11 +602,27 @@
 	if (cpufreq_device->cpufreq_state == state)
 		return 0;
 
+	/* If state is the last, isolate the CPU */
+	if (state == cpufreq_device->max_level)
+		return sched_isolate_cpu(cpu);
+	else if (state < cpufreq_device->max_level)
+		sched_unisolate_cpu(cpu);
+
 	clip_freq = cpufreq_device->freq_table[state];
 	cpufreq_device->cpufreq_state = state;
 	cpufreq_device->clipped_freq = clip_freq;
 
-	cpufreq_update_policy(cpu);
+	/* Check if the device has a platform mitigation function that
+	 * can handle the CPU freq mitigation, if not, notify cpufreq
+	 * framework.
+	 */
+	if (cpufreq_device->plat_ops) {
+		if (cpufreq_device->plat_ops->ceil_limit)
+			cpufreq_device->plat_ops->ceil_limit(cpu,
+						clip_freq);
+	} else {
+		cpufreq_update_policy(cpu);
+	}
 
 	return 0;
 }
@@ -745,6 +828,8 @@
 	.get_max_state = cpufreq_get_max_state,
 	.get_cur_state = cpufreq_get_cur_state,
 	.set_cur_state = cpufreq_set_cur_state,
+	.set_min_state = cpufreq_set_min_state,
+	.get_min_state = cpufreq_get_min_state,
 };
 
 static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = {
@@ -783,6 +868,9 @@
  * @capacitance: dynamic power coefficient for these cpus
  * @plat_static_func: function to calculate the static power consumed by these
  *                    cpus (optional)
+ * @plat_mitig_func: function that does the mitigation by changing the
+ *                   frequencies (Optional). By default, cpufreq framweork will
+ *                   be notified of the new limits.
  *
  * This interface function registers the cpufreq cooling device with the name
  * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -795,7 +883,8 @@
 static struct thermal_cooling_device *
 __cpufreq_cooling_register(struct device_node *np,
 			const struct cpumask *clip_cpus, u32 capacitance,
-			get_static_t plat_static_func)
+			get_static_t plat_static_func,
+			struct cpu_cooling_ops *plat_ops)
 {
 	struct cpufreq_policy *policy;
 	struct thermal_cooling_device *cool_dev;
@@ -848,7 +937,9 @@
 	cpufreq_for_each_valid_entry(pos, table)
 		cpufreq_dev->max_level++;
 
-	cpufreq_dev->freq_table = kmalloc(sizeof(*cpufreq_dev->freq_table) *
+	/* Last level will indicate the core will be isolated. */
+	cpufreq_dev->max_level++;
+	cpufreq_dev->freq_table = kzalloc(sizeof(*cpufreq_dev->freq_table) *
 					  cpufreq_dev->max_level, GFP_KERNEL);
 	if (!cpufreq_dev->freq_table) {
 		cool_dev = ERR_PTR(-ENOMEM);
@@ -874,6 +965,8 @@
 		cooling_ops = &cpufreq_cooling_ops;
 	}
 
+	cpufreq_dev->plat_ops = plat_ops;
+
 	ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
 	if (ret) {
 		cool_dev = ERR_PTR(ret);
@@ -881,7 +974,7 @@
 	}
 
 	/* Fill freq-table in descending order of frequencies */
-	for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
+	for (i = 0, freq = -1; i < cpufreq_dev->max_level; i++) {
 		freq = find_next_max(table, freq);
 		cpufreq_dev->freq_table[i] = freq;
 
@@ -901,6 +994,9 @@
 		goto remove_idr;
 
 	cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
+	cpufreq_dev->floor_freq =
+		cpufreq_dev->freq_table[cpufreq_dev->max_level];
+	cpufreq_dev->cpufreq_floor_state = cpufreq_dev->max_level;
 	cpufreq_dev->cool_dev = cool_dev;
 
 	mutex_lock(&cooling_cpufreq_lock);
@@ -949,7 +1045,7 @@
 struct thermal_cooling_device *
 cpufreq_cooling_register(const struct cpumask *clip_cpus)
 {
-	return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL);
+	return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL, NULL);
 }
 EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
 
@@ -973,7 +1069,7 @@
 	if (!np)
 		return ERR_PTR(-EINVAL);
 
-	return __cpufreq_cooling_register(np, clip_cpus, 0, NULL);
+	return __cpufreq_cooling_register(np, clip_cpus, 0, NULL, NULL);
 }
 EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
 
@@ -1003,11 +1099,34 @@
 			       get_static_t plat_static_func)
 {
 	return __cpufreq_cooling_register(NULL, clip_cpus, capacitance,
-				plat_static_func);
+				plat_static_func, NULL);
 }
 EXPORT_SYMBOL(cpufreq_power_cooling_register);
 
 /**
+ * cpufreq_platform_cooling_register() - create cpufreq cooling device with
+ * additional platform specific mitigation function.
+ *
+ * @clip_cpus: cpumask of cpus where the frequency constraints will happen
+ * @plat_ops: the platform mitigation functions that will be called insted of
+ * cpufreq, if provided.
+ *
+ * Return: a valid struct thermal_cooling_device pointer on success,
+ * on failure, it returns a corresponding ERR_PTR().
+ */
+struct thermal_cooling_device *
+cpufreq_platform_cooling_register(const struct cpumask *clip_cpus,
+				struct cpu_cooling_ops *plat_ops)
+{
+	struct device_node *cpu_node;
+
+	cpu_node = of_cpu_device_node_get(cpumask_first(clip_cpus));
+	return __cpufreq_cooling_register(cpu_node, clip_cpus, 0, NULL,
+						plat_ops);
+}
+EXPORT_SYMBOL(cpufreq_platform_cooling_register);
+
+/**
  * of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
  * @np:	a valid struct device_node to the cooling device device tree node
  * @clip_cpus:	cpumask of cpus where the frequency constraints will happen
@@ -1040,7 +1159,7 @@
 		return ERR_PTR(-EINVAL);
 
 	return __cpufreq_cooling_register(np, clip_cpus, capacitance,
-				plat_static_func);
+				plat_static_func, NULL);
 }
 EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
 
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index 81631b1..b2990c1 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -51,6 +51,7 @@
 	struct thermal_cooling_device *cdev;
 	struct devfreq *devfreq;
 	unsigned long cooling_state;
+	unsigned long cooling_min_state;
 	u32 *power_table;
 	u32 *freq_table;
 	size_t freq_table_size;
@@ -96,13 +97,15 @@
 /**
  * partition_enable_opps() - disable all opps above a given state
  * @dfc:	Pointer to devfreq we are operating on
- * @cdev_state:	cooling device state we're setting
+ * @cdev_max_state: Max cooling device state we're setting
+ * @cdev_min_state: Min cooling device state we're setting
  *
  * Go through the OPPs of the device, enabling all OPPs until
  * @cdev_state and disabling those frequencies above it.
  */
 static int partition_enable_opps(struct devfreq_cooling_device *dfc,
-				 unsigned long cdev_state)
+				 unsigned long cdev_max_state,
+				 unsigned long cdev_min_state)
 {
 	int i;
 	struct device *dev = dfc->devfreq->dev.parent;
@@ -111,7 +114,8 @@
 		struct dev_pm_opp *opp;
 		int ret = 0;
 		unsigned int freq = dfc->freq_table[i];
-		bool want_enable = i >= cdev_state ? true : false;
+		bool want_enable = (i >= cdev_max_state) &&
+				      (i <= cdev_min_state) ? true : false;
 
 		rcu_read_lock();
 		opp = dev_pm_opp_find_freq_exact(dev, freq, !want_enable);
@@ -144,6 +148,41 @@
 	return 0;
 }
 
+static int devfreq_cooling_get_min_state(struct thermal_cooling_device *cdev,
+					 unsigned long *state)
+{
+	struct devfreq_cooling_device *dfc = cdev->devdata;
+
+	*state = dfc->cooling_min_state;
+
+	return 0;
+}
+
+static int devfreq_cooling_set_min_state(struct thermal_cooling_device *cdev,
+					 unsigned long state)
+{
+	struct devfreq_cooling_device *dfc = cdev->devdata;
+	struct devfreq *df = dfc->devfreq;
+	struct device *dev = df->dev.parent;
+	int ret;
+
+	if (state == dfc->cooling_min_state)
+		return 0;
+
+	dev_dbg(dev, "Setting cooling min state %lu\n", state);
+
+	if (state >= dfc->freq_table_size)
+		state = dfc->freq_table_size - 1;
+
+	ret = partition_enable_opps(dfc, dfc->cooling_state, state);
+	if (ret)
+		return ret;
+
+	dfc->cooling_min_state = state;
+
+	return 0;
+}
+
 static int devfreq_cooling_get_cur_state(struct thermal_cooling_device *cdev,
 					 unsigned long *state)
 {
@@ -170,7 +209,7 @@
 	if (state >= dfc->freq_table_size)
 		return -EINVAL;
 
-	ret = partition_enable_opps(dfc, state);
+	ret = partition_enable_opps(dfc, state, dfc->cooling_min_state);
 	if (ret)
 		return ret;
 
@@ -361,6 +400,8 @@
 	.get_max_state = devfreq_cooling_get_max_state,
 	.get_cur_state = devfreq_cooling_get_cur_state,
 	.set_cur_state = devfreq_cooling_set_cur_state,
+	.get_min_state = devfreq_cooling_get_min_state,
+	.set_min_state = devfreq_cooling_set_min_state,
 };
 
 /**
@@ -499,6 +540,7 @@
 	if (err)
 		goto free_tables;
 
+	dfc->cooling_min_state = dfc->freq_table_size - 1;
 	snprintf(dev_name, sizeof(dev_name), "thermal-devfreq-%d", dfc->id);
 
 	cdev = thermal_of_cooling_device_register(np, dev_name, dfc,
diff --git a/drivers/thermal/gov_low_limits.c b/drivers/thermal/gov_low_limits.c
new file mode 100644
index 0000000..cf2dbc4
--- /dev/null
+++ b/drivers/thermal/gov_low_limits.c
@@ -0,0 +1,130 @@
+/*
+ *  Copyright (C) 2012 Intel Corp
+ *  Copyright (C) 2012 Durgadoss R <durgadoss.r@intel.com>
+ *  Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/thermal.h>
+#include <trace/events/thermal.h>
+
+#include "thermal_core.h"
+
+static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
+{
+	int trip_temp, trip_hyst;
+	enum thermal_trip_type trip_type;
+	struct thermal_instance *instance;
+	bool throttle;
+	int old_target;
+
+	tz->ops->get_trip_temp(tz, trip, &trip_temp);
+	tz->ops->get_trip_type(tz, trip, &trip_type);
+	if (tz->ops->get_trip_hyst) {
+		tz->ops->get_trip_hyst(tz, trip, &trip_hyst);
+		trip_hyst = trip_temp + trip_hyst;
+	} else {
+		trip_hyst = trip_temp;
+	}
+
+	mutex_lock(&tz->lock);
+
+	list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+		if (instance->trip != trip)
+			continue;
+
+		if ((tz->temperature <= trip_temp) ||
+			(instance->target != THERMAL_NO_TARGET
+				&& tz->temperature <= trip_hyst))
+			throttle = true;
+		else
+			throttle = false;
+
+		dev_dbg(&tz->device,
+			"Trip%d[type=%d,temp=%d,hyst=%d],throttle=%d\n",
+			trip, trip_type, trip_temp, trip_hyst, throttle);
+
+		old_target = instance->target;
+		instance->target = (throttle) ? instance->upper
+					: THERMAL_NO_TARGET;
+		dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
+					old_target, (int)instance->target);
+
+		if (old_target == instance->target)
+			continue;
+
+		if (old_target == THERMAL_NO_TARGET &&
+				instance->target != THERMAL_NO_TARGET) {
+			trace_thermal_zone_trip(tz, trip, trip_type);
+			tz->passive += 1;
+		} else if (old_target != THERMAL_NO_TARGET &&
+				instance->target == THERMAL_NO_TARGET) {
+			tz->passive -= 1;
+		}
+
+		instance->cdev->updated = false; /* cdev needs update */
+	}
+
+	mutex_unlock(&tz->lock);
+}
+
+/**
+ * low_limits_throttle - throttles devices associated with the given zone
+ * @tz - thermal_zone_device
+ * @trip - the trip point
+ *
+ * Throttling Logic: If the sensor reading goes below a trip point, the
+ * pre-defined mitigation will be applied for the cooling device.
+ * If the sensor reading goes above the trip hysteresis, the
+ * mitigation will be removed.
+ */
+static int low_limits_throttle(struct thermal_zone_device *tz, int trip)
+{
+	struct thermal_instance *instance;
+
+	thermal_zone_trip_update(tz, trip);
+
+	mutex_lock(&tz->lock);
+
+	list_for_each_entry(instance, &tz->thermal_instances, tz_node)
+		thermal_cdev_update(instance->cdev);
+
+	mutex_unlock(&tz->lock);
+
+	return 0;
+}
+
+static struct thermal_governor thermal_gov_low_limits_floor = {
+	.name		= "low_limits_floor",
+	.throttle	= low_limits_throttle,
+	.min_state_throttle = 1,
+};
+
+static struct thermal_governor thermal_gov_low_limits_cap = {
+	.name		= "low_limits_cap",
+	.throttle	= low_limits_throttle,
+};
+
+int thermal_gov_low_limits_register(void)
+{
+	thermal_register_governor(&thermal_gov_low_limits_cap);
+	return thermal_register_governor(&thermal_gov_low_limits_floor);
+}
+
+void thermal_gov_low_limits_unregister(void)
+{
+	thermal_unregister_governor(&thermal_gov_low_limits_cap);
+	thermal_unregister_governor(&thermal_gov_low_limits_floor);
+}
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
new file mode 100644
index 0000000..2013e7e
--- /dev/null
+++ b/drivers/thermal/msm-tsens.c
@@ -0,0 +1,288 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include "tsens.h"
+
+LIST_HEAD(tsens_device_list);
+
+static int tsens_get_temp(struct tsens_sensor *s, int *temp)
+{
+	struct tsens_device *tmdev = s->tmdev;
+
+	return tmdev->ops->get_temp(s, temp);
+}
+
+static int tsens_set_trip_temp(struct tsens_sensor *s, int trip, int temp)
+{
+	struct tsens_device *tmdev = s->tmdev;
+
+	if (tmdev->ops->set_trip_temp)
+		return tmdev->ops->set_trip_temp(s, trip, temp);
+
+	return 0;
+}
+
+static int tsens_init(struct tsens_device *tmdev)
+{
+	if (tmdev->ops->hw_init)
+		return tmdev->ops->hw_init(tmdev);
+
+	return 0;
+}
+
+static int tsens_register_interrupts(struct tsens_device *tmdev)
+{
+	if (tmdev->ops->interrupts_reg)
+		return tmdev->ops->interrupts_reg(tmdev);
+
+	return 0;
+}
+
+static const struct of_device_id tsens_table[] = {
+	{	.compatible = "qcom,msm8996-tsens",
+		.data = &data_tsens2xxx,
+	},
+	{	.compatible = "qcom,msm8953-tsens",
+		.data = &data_tsens2xxx,
+	},
+	{	.compatible = "qcom,msm8998-tsens",
+		.data = &data_tsens2xxx,
+	},
+	{	.compatible = "qcom,msmhamster-tsens",
+		.data = &data_tsens2xxx,
+	},
+	{	.compatible = "qcom,sdm660-tsens",
+		.data = &data_tsens23xx,
+	},
+	{	.compatible = "qcom,sdm630-tsens",
+		.data = &data_tsens23xx,
+	},
+	{	.compatible = "qcom,sdm845-tsens",
+		.data = &data_tsens24xx,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, tsens_table);
+
+static struct thermal_zone_of_device_ops tsens_tm_thermal_zone_ops = {
+	.get_temp = tsens_get_temp,
+	.set_trip_temp = tsens_set_trip_temp,
+};
+
+static int get_device_tree_data(struct platform_device *pdev,
+				struct tsens_device *tmdev)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	u32 *hw_id, *client_id;
+	u32 rc = 0, i, tsens_num_sensors = 0;
+	int tsens_len;
+	const struct of_device_id *id;
+	const struct tsens_data *data;
+	struct resource *res_tsens_mem, *res_mem = NULL;
+
+	if (!of_match_node(tsens_table, of_node)) {
+		pr_err("Need to read SoC specific fuse map\n");
+		return -ENODEV;
+	}
+
+	id = of_match_node(tsens_table, of_node);
+	if (id == NULL) {
+		pr_err("can not find tsens_table of_node\n");
+		return -ENODEV;
+	}
+
+	data = id->data;
+	hw_id = devm_kzalloc(&pdev->dev,
+		tsens_num_sensors * sizeof(u32), GFP_KERNEL);
+	if (!hw_id)
+		return -ENOMEM;
+
+	client_id = devm_kzalloc(&pdev->dev,
+		tsens_num_sensors * sizeof(u32), GFP_KERNEL);
+	if (!client_id)
+		return -ENOMEM;
+
+	tmdev->ops = data->ops;
+	tmdev->ctrl_data = data;
+	tmdev->pdev = pdev;
+
+	if (!tmdev->ops || !tmdev->ops->hw_init || !tmdev->ops->get_temp) {
+		pr_err("Invalid ops\n");
+		return -EINVAL;
+	}
+
+	/* TSENS register region */
+	res_tsens_mem = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "tsens_physical");
+	if (!res_tsens_mem) {
+		pr_err("Could not get tsens physical address resource\n");
+		return -EINVAL;
+	}
+
+	tsens_len = res_tsens_mem->end - res_tsens_mem->start + 1;
+
+	res_mem = request_mem_region(res_tsens_mem->start,
+				tsens_len, res_tsens_mem->name);
+	if (!res_mem) {
+		pr_err("Request tsens physical memory region failed\n");
+		return -EINVAL;
+	}
+
+	tmdev->tsens_addr = ioremap(res_mem->start, tsens_len);
+	if (!tmdev->tsens_addr) {
+		pr_err("Failed to IO map TSENS registers.\n");
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of_node,
+		"qcom,sensor-id", hw_id, tsens_num_sensors);
+	if (rc) {
+		pr_err("Default sensor id mapping\n");
+		for (i = 0; i < tsens_num_sensors; i++)
+			tmdev->sensor[i].hw_id = i;
+	} else {
+		pr_err("Use specified sensor id mapping\n");
+		for (i = 0; i < tsens_num_sensors; i++)
+			tmdev->sensor[i].hw_id = hw_id[i];
+	}
+
+	rc = of_property_read_u32_array(of_node,
+		"qcom,client-id", client_id, tsens_num_sensors);
+	if (rc) {
+		for (i = 0; i < tsens_num_sensors; i++)
+			tmdev->sensor[i].id = i;
+		pr_debug("Default client id mapping\n");
+	} else {
+		for (i = 0; i < tsens_num_sensors; i++)
+			tmdev->sensor[i].id = client_id[i];
+		pr_debug("Use specified client id mapping\n");
+	}
+
+	return 0;
+}
+
+static int tsens_thermal_zone_register(struct tsens_device *tmdev)
+{
+	int rc = 0, i = 0;
+
+	for (i = 0; i < tmdev->num_sensors; i++) {
+		tmdev->sensor[i].tmdev = tmdev;
+		tmdev->sensor[i].tzd = devm_thermal_zone_of_sensor_register(
+					&tmdev->pdev->dev, i, &tmdev->sensor[i],
+					&tsens_tm_thermal_zone_ops);
+		if (IS_ERR(tmdev->sensor[i].tzd)) {
+			pr_err("Error registering sensor:%d\n", i);
+			continue;
+		}
+	}
+
+	return rc;
+}
+
+static int tsens_tm_remove(struct platform_device *pdev)
+{
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+int tsens_tm_probe(struct platform_device *pdev)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	struct tsens_device *tmdev = NULL;
+	u32 tsens_num_sensors = 0;
+	int rc;
+
+	if (!(pdev->dev.of_node))
+		return -ENODEV;
+
+	rc = of_property_read_u32(of_node,
+			"qcom,sensors", &tsens_num_sensors);
+	if (rc || (!tsens_num_sensors)) {
+		dev_err(&pdev->dev, "missing sensors\n");
+		return -ENODEV;
+	}
+
+	tmdev = devm_kzalloc(&pdev->dev,
+			sizeof(struct tsens_device) +
+			tsens_num_sensors *
+			sizeof(struct tsens_sensor),
+			GFP_KERNEL);
+	if (tmdev == NULL) {
+		pr_err("%s: kzalloc() failed.\n", __func__);
+		return -ENOMEM;
+	}
+
+	tmdev->num_sensors = tsens_num_sensors;
+
+	rc = get_device_tree_data(pdev, tmdev);
+	if (rc) {
+		pr_err("Error reading TSENS DT\n");
+		return rc;
+	}
+
+	rc = tsens_init(tmdev);
+	if (rc)
+		return rc;
+
+	rc = tsens_thermal_zone_register(tmdev);
+	if (rc) {
+		pr_err("Error registering the thermal zone\n");
+		return rc;
+	}
+
+	rc = tsens_register_interrupts(tmdev);
+	if (rc < 0) {
+		pr_err("TSENS interrupt register failed:%d\n", rc);
+		return rc;
+	}
+
+	list_add_tail(&tmdev->list, &tsens_device_list);
+	platform_set_drvdata(pdev, tmdev);
+
+	return rc;
+}
+
+static struct platform_driver tsens_tm_driver = {
+	.probe = tsens_tm_probe,
+	.remove = tsens_tm_remove,
+	.driver = {
+		.name = "msm-tsens",
+		.owner = THIS_MODULE,
+		.of_match_table = tsens_table,
+	},
+};
+
+int __init tsens_tm_init_driver(void)
+{
+	return platform_driver_register(&tsens_tm_driver);
+}
+subsys_initcall(tsens_tm_init_driver);
+
+static void __exit tsens_tm_deinit(void)
+{
+	platform_driver_unregister(&tsens_tm_driver);
+}
+module_exit(tsens_tm_deinit);
+
+MODULE_ALIAS("platform:" TSENS_DRIVER_NAME);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index d04ec3b..b337ad7 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -31,11 +31,16 @@
 #include <linux/export.h>
 #include <linux/string.h>
 #include <linux/thermal.h>
+#include <linux/list.h>
 
 #include "thermal_core.h"
 
-/***   Private data structures to represent thermal device tree data ***/
+#define for_each_tz_sibling(pos, head)                                         \
+	for (pos = list_first_entry((head), struct __thermal_zone, list);\
+		&(pos->list) != (head);                                  \
+		pos = list_next_entry(pos, list))                        \
 
+/***   Private data structures to represent thermal device tree data ***/
 /**
  * struct __thermal_bind_param - a match between trip and cooling device
  * @cooling_device: a pointer to identify the referred cooling device
@@ -54,18 +59,36 @@
 };
 
 /**
+ * struct __sensor_param - Holds individual sensor data
+ * @sensor_data: sensor driver private data passed as input argument
+ * @ops: sensor driver ops
+ * @trip_high: last trip high value programmed in the sensor driver
+ * @trip_low: last trip low value programmed in the sensor driver
+ * @lock: mutex lock acquired before updating the trip temperatures
+ * @first_tz: list head pointing the first thermal zone
+ */
+struct __sensor_param {
+	void *sensor_data;
+	const struct thermal_zone_of_device_ops *ops;
+	int trip_high, trip_low;
+	struct mutex lock;
+	struct list_head first_tz;
+};
+
+/**
  * struct __thermal_zone - internal representation of a thermal zone
  * @mode: current thermal zone device mode (enabled/disabled)
  * @passive_delay: polling interval while passive cooling is activated
  * @polling_delay: zone polling interval
  * @slope: slope of the temperature adjustment curve
  * @offset: offset of the temperature adjustment curve
+ * @tzd: thermal zone device pointer for this sensor
  * @ntrips: number of trip points
  * @trips: an array of trip points (0..ntrips - 1)
  * @num_tbps: number of thermal bind params
  * @tbps: an array of thermal bind params (0..num_tbps - 1)
- * @sensor_data: sensor private data used while reading temperature and trend
- * @ops: set of callbacks to handle the thermal zone based on DT
+ * @list: sibling thermal zone pointer
+ * @senps: sensor related parameters
  */
 
 struct __thermal_zone {
@@ -74,6 +97,7 @@
 	int polling_delay;
 	int slope;
 	int offset;
+	struct thermal_zone_device *tzd;
 
 	/* trip data */
 	int ntrips;
@@ -83,11 +107,14 @@
 	int num_tbps;
 	struct __thermal_bind_params *tbps;
 
+	struct list_head list;
 	/* sensor interface */
-	void *sensor_data;
-	const struct thermal_zone_of_device_ops *ops;
+	struct __sensor_param *senps;
 };
 
+static int of_thermal_aggregate_trip_types(struct thermal_zone_device *tz,
+		unsigned int trip_type_mask, int *low, int *high);
+
 /***   DT thermal zone device callbacks   ***/
 
 static int of_thermal_get_temp(struct thermal_zone_device *tz,
@@ -95,21 +122,36 @@
 {
 	struct __thermal_zone *data = tz->devdata;
 
-	if (!data->ops->get_temp)
+	if (!data->senps || !data->senps->ops->get_temp)
 		return -EINVAL;
 
-	return data->ops->get_temp(data->sensor_data, temp);
+	return data->senps->ops->get_temp(data->senps->sensor_data, temp);
 }
 
 static int of_thermal_set_trips(struct thermal_zone_device *tz,
-				int low, int high)
+				int inp_low, int inp_high)
 {
 	struct __thermal_zone *data = tz->devdata;
+	int high = INT_MAX, low = INT_MIN, ret = 0;
 
-	if (!data->ops || !data->ops->set_trips)
+	if (!data->senps || !data->senps->ops->set_trips)
 		return -EINVAL;
 
-	return data->ops->set_trips(data->sensor_data, low, high);
+	mutex_lock(&data->senps->lock);
+	of_thermal_aggregate_trip_types(tz, GENMASK(THERMAL_TRIP_CRITICAL, 0),
+					&low, &high);
+	if (low == data->senps->trip_low
+		&& high == data->senps->trip_high)
+		goto set_trips_exit;
+
+	data->senps->trip_low = low;
+	data->senps->trip_high = high;
+	ret = data->senps->ops->set_trips(data->senps->sensor_data,
+					  low, high);
+
+set_trips_exit:
+	mutex_unlock(&data->senps->lock);
+	return ret;
 }
 
 /**
@@ -192,7 +234,10 @@
 {
 	struct __thermal_zone *data = tz->devdata;
 
-	return data->ops->set_emul_temp(data->sensor_data, temp);
+	if (!data->senps || !data->senps->ops->set_emul_temp)
+		return -EINVAL;
+
+	return data->senps->ops->set_emul_temp(data->senps->sensor_data, temp);
 }
 
 static int of_thermal_get_trend(struct thermal_zone_device *tz, int trip,
@@ -200,10 +245,11 @@
 {
 	struct __thermal_zone *data = tz->devdata;
 
-	if (!data->ops->get_trend)
+	if (!data->senps || !data->senps->ops->get_trend)
 		return -EINVAL;
 
-	return data->ops->get_trend(data->sensor_data, trip, trend);
+	return data->senps->ops->get_trend(data->senps->sensor_data,
+					   trip, trend);
 }
 
 static int of_thermal_bind(struct thermal_zone_device *thermal,
@@ -325,10 +371,11 @@
 	if (trip >= data->ntrips || trip < 0)
 		return -EDOM;
 
-	if (data->ops->set_trip_temp) {
+	if (data->senps && data->senps->ops->set_trip_temp) {
 		int ret;
 
-		ret = data->ops->set_trip_temp(data->sensor_data, trip, temp);
+		ret = data->senps->ops->set_trip_temp(data->senps->sensor_data,
+						      trip, temp);
 		if (ret)
 			return ret;
 	}
@@ -381,6 +428,89 @@
 	return -EINVAL;
 }
 
+static int of_thermal_aggregate_trip_types(struct thermal_zone_device *tz,
+		unsigned int trip_type_mask, int *low, int *high)
+{
+	int min = INT_MIN;
+	int max = INT_MAX;
+	int tt, th, trip;
+	int temp = tz->temperature;
+	struct thermal_zone_device *zone = NULL;
+	struct __thermal_zone *data = tz->devdata;
+	struct list_head *head;
+	enum thermal_trip_type type = 0;
+
+	head = &data->senps->first_tz;
+	for_each_tz_sibling(data, head) {
+		zone = data->tzd;
+		for (trip = 0; trip < data->ntrips; trip++) {
+			of_thermal_get_trip_type(zone, trip, &type);
+			if (!(BIT(type) & trip_type_mask))
+				continue;
+
+			if (!zone->tzp->tracks_low) {
+				tt = data->trips[trip].temperature;
+				if (tt > temp && tt < max)
+					max = tt;
+				th = tt - data->trips[trip].hysteresis;
+				if (th < temp && th > min)
+					min = th;
+			} else {
+				tt = data->trips[trip].temperature;
+				if (tt < temp && tt > min)
+					min = tt;
+				th = tt + data->trips[trip].hysteresis;
+				if (th > temp && th < max)
+					max = th;
+			}
+		}
+	}
+
+	*high = max;
+	*low = min;
+
+	return 0;
+}
+
+/*
+ * of_thermal_aggregate_trip - aggregate trip temperatures across sibling
+ *				thermal zones.
+ * @tz: pointer to the primary thermal zone.
+ * @type: the thermal trip type to be aggregated upon
+ * @low: the low trip threshold which the most lesser than the @temp
+ * @high: the high trip threshold which is the least greater than the @temp
+ */
+int of_thermal_aggregate_trip(struct thermal_zone_device *tz,
+				enum thermal_trip_type type,
+				int *low, int *high)
+{
+	if (type <= THERMAL_TRIP_CRITICAL)
+		return of_thermal_aggregate_trip_types(tz, BIT(type), low,
+						       high);
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(of_thermal_aggregate_trip);
+
+/*
+ * of_thermal_handle_trip - Handle thermal trip from sensors
+ *
+ * @tz: pointer to the primary thermal zone.
+ */
+void of_thermal_handle_trip(struct thermal_zone_device *tz)
+{
+	struct thermal_zone_device *zone;
+	struct __thermal_zone *data = tz->devdata;
+	struct list_head *head;
+
+	head = &data->senps->first_tz;
+	for_each_tz_sibling(data, head) {
+		zone = data->tzd;
+		thermal_zone_device_update(zone, THERMAL_EVENT_UNSPECIFIED);
+	}
+}
+EXPORT_SYMBOL(of_thermal_handle_trip);
+
 static struct thermal_zone_device_ops of_thermal_ops = {
 	.get_mode = of_thermal_get_mode,
 	.set_mode = of_thermal_set_mode,
@@ -400,8 +530,8 @@
 
 static struct thermal_zone_device *
 thermal_zone_of_add_sensor(struct device_node *zone,
-			   struct device_node *sensor, void *data,
-			   const struct thermal_zone_of_device_ops *ops)
+			   struct device_node *sensor,
+			   struct __sensor_param *sens_param)
 {
 	struct thermal_zone_device *tzd;
 	struct __thermal_zone *tz;
@@ -412,12 +542,11 @@
 
 	tz = tzd->devdata;
 
-	if (!ops)
+	if (!sens_param->ops)
 		return ERR_PTR(-EINVAL);
 
 	mutex_lock(&tzd->lock);
-	tz->ops = ops;
-	tz->sensor_data = data;
+	tz->senps = sens_param;
 
 	tzd->ops->get_temp = of_thermal_get_temp;
 	tzd->ops->get_trend = of_thermal_get_trend;
@@ -426,12 +555,13 @@
 	 * The thermal zone core will calculate the window if they have set the
 	 * optional set_trips pointer.
 	 */
-	if (ops->set_trips)
+	if (sens_param->ops->set_trips)
 		tzd->ops->set_trips = of_thermal_set_trips;
 
-	if (ops->set_emul_temp)
+	if (sens_param->ops->set_emul_temp)
 		tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
 
+	list_add_tail(&tz->list, &sens_param->first_tz);
 	mutex_unlock(&tzd->lock);
 
 	return tzd;
@@ -462,11 +592,10 @@
  * 01 - This function must enqueue the new sensor instead of using
  * it as the only source of temperature values.
  *
- * 02 - There must be a way to match the sensor with all thermal zones
- * that refer to it.
- *
  * Return: On success returns a valid struct thermal_zone_device,
- * otherwise, it returns a corresponding ERR_PTR(). Caller must
+ * otherwise, it returns a corresponding ERR_PTR(). Incase there are multiple
+ * thermal zones referencing the same sensor, the return value will be
+ * thermal_zone_device pointer of the first thermal zone. Caller must
  * check the return value with help of IS_ERR() helper.
  */
 struct thermal_zone_device *
@@ -475,6 +604,8 @@
 {
 	struct device_node *np, *child, *sensor_np;
 	struct thermal_zone_device *tzd = ERR_PTR(-ENODEV);
+	struct thermal_zone_device *first_tzd = NULL;
+	struct __sensor_param *sens_param = NULL;
 
 	np = of_find_node_by_name(NULL, "thermal-zones");
 	if (!np)
@@ -485,6 +616,17 @@
 		return ERR_PTR(-EINVAL);
 	}
 
+	sens_param = kzalloc(sizeof(*sens_param), GFP_KERNEL);
+	if (!sens_param) {
+		of_node_put(np);
+		return ERR_PTR(-ENOMEM);
+	}
+	sens_param->sensor_data = data;
+	sens_param->ops = ops;
+	INIT_LIST_HEAD(&sens_param->first_tz);
+	sens_param->trip_high = INT_MAX;
+	sens_param->trip_low = INT_MIN;
+	mutex_init(&sens_param->lock);
 	sensor_np = of_node_get(dev->of_node);
 
 	for_each_available_child_of_node(np, child) {
@@ -509,21 +651,23 @@
 
 		if (sensor_specs.np == sensor_np && id == sensor_id) {
 			tzd = thermal_zone_of_add_sensor(child, sensor_np,
-							 data, ops);
-			if (!IS_ERR(tzd))
+							 sens_param);
+			if (!IS_ERR(tzd)) {
+				if (!first_tzd)
+					first_tzd = tzd;
 				tzd->ops->set_mode(tzd, THERMAL_DEVICE_ENABLED);
-
-			of_node_put(sensor_specs.np);
-			of_node_put(child);
-			goto exit;
+			}
 		}
 		of_node_put(sensor_specs.np);
 	}
-exit:
 	of_node_put(sensor_np);
 	of_node_put(np);
 
-	return tzd;
+	if (!first_tzd) {
+		first_tzd = ERR_PTR(-ENODEV);
+		kfree(sens_param);
+	}
+	return first_tzd;
 }
 EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_register);
 
@@ -546,6 +690,8 @@
 				       struct thermal_zone_device *tzd)
 {
 	struct __thermal_zone *tz;
+	struct thermal_zone_device *pos;
+	struct list_head *head;
 
 	if (!dev || !tzd || !tzd->devdata)
 		return;
@@ -556,14 +702,20 @@
 	if (!tz)
 		return;
 
-	mutex_lock(&tzd->lock);
-	tzd->ops->get_temp = NULL;
-	tzd->ops->get_trend = NULL;
-	tzd->ops->set_emul_temp = NULL;
+	head = &tz->senps->first_tz;
+	for_each_tz_sibling(tz, head) {
+		pos = tz->tzd;
+		mutex_lock(&pos->lock);
+		pos->ops->get_temp = NULL;
+		pos->ops->get_trend = NULL;
+		pos->ops->set_emul_temp = NULL;
 
-	tz->ops = NULL;
-	tz->sensor_data = NULL;
-	mutex_unlock(&tzd->lock);
+		list_del(&tz->list);
+		if (list_empty(&tz->senps->first_tz))
+			kfree(tz->senps);
+		tz->senps = NULL;
+		mutex_unlock(&pos->lock);
+	}
 }
 EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_unregister);
 
@@ -832,6 +984,7 @@
 	if (!tz)
 		return ERR_PTR(-ENOMEM);
 
+	INIT_LIST_HEAD(&tz->list);
 	ret = of_property_read_u32(np, "polling-delay-passive", &prop);
 	if (ret < 0) {
 		pr_err("missing polling-delay-passive property\n");
@@ -975,6 +1128,7 @@
 		struct thermal_zone_params *tzp;
 		int i, mask = 0;
 		u32 prop;
+		const char *governor_name;
 
 		tz = thermal_of_build_thermal_zone(child);
 		if (IS_ERR(tz)) {
@@ -997,6 +1151,11 @@
 		/* No hwmon because there might be hwmon drivers registering */
 		tzp->no_hwmon = true;
 
+		if (!of_property_read_string(child, "thermal-governor",
+						&governor_name))
+			strlcpy(tzp->governor_name, governor_name,
+					THERMAL_NAME_LENGTH);
+
 		if (!of_property_read_u32(child, "sustainable-power", &prop))
 			tzp->sustainable_power = prop;
 
@@ -1007,6 +1166,9 @@
 		tzp->slope = tz->slope;
 		tzp->offset = tz->offset;
 
+		if (of_property_read_bool(child, "tracks-low"))
+			tzp->tracks_low = true;
+
 		zone = thermal_zone_device_register(child->name, tz->ntrips,
 						    mask, tz,
 						    ops, tzp,
@@ -1019,7 +1181,9 @@
 			kfree(ops);
 			of_thermal_free_zone(tz);
 			/* attempting to build remaining zones still */
+			continue;
 		}
+		tz->tzd = zone;
 	}
 	of_node_put(np);
 
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index be32e5a..473d15a 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -9,3 +9,24 @@
 	  thermal zone device via the mode file results in disabling the sensor.
 	  Also able to set threshold temperature for both hot and cold and update
 	  when a threshold is reached.
+
+config MSM_BCL_PERIPHERAL_CTL
+	bool "BCL driver to control the PMIC BCL peripheral"
+	depends on SPMI && THERMAL_OF
+	help
+	  Say Y here to enable this BCL PMIC peripheral driver. This driver
+	  provides routines to configure and monitor the BCL
+	  PMIC peripheral. This driver registers the battery current and
+	  voltage sensors with the thermal core framework and can take
+	  threshold input and notify the thermal core when the threshold is
+	  reached.
+
+config QTI_THERMAL_LIMITS_DCVS
+	bool "QTI LMH DCVS Driver"
+	depends on THERMAL_OF
+	help
+	  This enables the driver for Limits Management Hardware - DCVS block
+	  for the application processors. The h/w block that is available for
+	  each cluster can be used to perform quick thermal mitigations by
+	  tracking temperatures of the CPUs and taking thermal action in the
+	  hardware without s/w intervention.
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
index 2cc2193..d1a53b0 100644
--- a/drivers/thermal/qcom/Makefile
+++ b/drivers/thermal/qcom/Makefile
@@ -1,2 +1,4 @@
 obj-$(CONFIG_QCOM_TSENS)	+= qcom_tsens.o
 qcom_tsens-y			+= tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o tsens-8996.o
+obj-$(CONFIG_MSM_BCL_PERIPHERAL_CTL) += bcl_peripheral.o
+obj-$(CONFIG_QTI_THERMAL_LIMITS_DCVS) += msm_lmh_dcvs.o
diff --git a/drivers/thermal/qcom/bcl_peripheral.c b/drivers/thermal/qcom/bcl_peripheral.c
new file mode 100644
index 0000000..55ff770
--- /dev/null
+++ b/drivers/thermal/qcom/bcl_peripheral.c
@@ -0,0 +1,787 @@
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/power_supply.h>
+#include <linux/thermal.h>
+
+#include "../thermal_core.h"
+
+#define BCL_DRIVER_NAME       "bcl_peripheral"
+#define BCL_VBAT_INT          "bcl-low-vbat"
+#define BCL_VLOW_VBAT_INT     "bcl-very-low-vbat"
+#define BCL_CLOW_VBAT_INT     "bcl-crit-low-vbat"
+#define BCL_IBAT_INT          "bcl-high-ibat"
+#define BCL_VHIGH_IBAT_INT    "bcl-very-high-ibat"
+#define BCL_MONITOR_EN        0x46
+#define BCL_VBAT_MIN          0x5C
+#define BCL_IBAT_MAX          0x5D
+#define BCL_MAX_MIN_CLR       0x48
+#define BCL_IBAT_MAX_CLR      3
+#define BCL_VBAT_MIN_CLR      2
+#define BCL_VBAT_ADC_LOW      0x72
+#define BCL_VBAT_COMP_LOW     0x75
+#define BCL_VBAT_COMP_TLOW    0x76
+#define BCL_IBAT_HIGH         0x78
+#define BCL_IBAT_TOO_HIGH     0x79
+#define BCL_LMH_CFG           0xA3
+#define BCL_CFG               0x6A
+#define LMH_INT_POL_HIGH      0x12
+#define LMH_INT_EN            0x15
+#define BCL_VBAT_SCALING      39000
+#define BCL_IBAT_SCALING      80
+#define BCL_LMH_CFG_VAL       0x3
+#define BCL_CFG_VAL           0x81
+#define LMH_INT_VAL           0x7
+#define BCL_READ_RETRY_LIMIT  3
+#define VAL_CP_REG_BUF_LEN    3
+#define VAL_REG_BUF_OFFSET    0
+#define VAL_CP_REG_BUF_OFFSET 2
+#define BCL_STD_VBAT_NR       9
+#define BCL_VBAT_NO_READING   127
+
+enum bcl_dev_type {
+	BCL_HIGH_IBAT,
+	BCL_VHIGH_IBAT,
+	BCL_LOW_VBAT,
+	BCL_VLOW_VBAT,
+	BCL_CLOW_VBAT,
+	BCL_SOC_MONITOR,
+	BCL_TYPE_MAX,
+};
+
+struct bcl_peripheral_data {
+	int                     irq_num;
+	long int		trip_temp;
+	int                     trip_val;
+	int                     last_val;
+	struct mutex            state_trans_lock;
+	bool			irq_enabled;
+	struct thermal_zone_of_device_ops ops;
+	struct thermal_zone_device *tz_dev;
+};
+
+struct bcl_device {
+	struct regmap			*regmap;
+	uint16_t			fg_bcl_addr;
+	uint16_t			fg_lmh_addr;
+	struct notifier_block		psy_nb;
+	struct work_struct		soc_eval_work;
+	struct bcl_peripheral_data	param[BCL_TYPE_MAX];
+};
+
+static struct bcl_device *bcl_perph;
+static int vbat_low[BCL_STD_VBAT_NR] = {
+		2400, 2500, 2600, 2700, 2800, 2900,
+		3000, 3100, 3200};
+
+static int bcl_read_multi_register(int16_t reg_offset, uint8_t *data, int len)
+{
+	int  ret = 0;
+
+	if (!bcl_perph) {
+		pr_err("BCL device not initialized\n");
+		return -EINVAL;
+	}
+	ret = regmap_bulk_read(bcl_perph->regmap,
+			       (bcl_perph->fg_bcl_addr + reg_offset),
+			       data, len);
+	if (ret < 0) {
+		pr_err("Error reading register %d. err:%d", reg_offset, ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int bcl_write_general_register(int16_t reg_offset,
+					uint16_t base, uint8_t data)
+{
+	int  ret = 0;
+	uint8_t *write_buf = &data;
+
+	if (!bcl_perph) {
+		pr_err("BCL device not initialized\n");
+		return -EINVAL;
+	}
+	ret = regmap_write(bcl_perph->regmap, (base + reg_offset), *write_buf);
+	if (ret < 0) {
+		pr_err("Error reading register %d. err:%d", reg_offset, ret);
+		return ret;
+	}
+	pr_debug("wrote 0x%02x to 0x%04x\n", data, base + reg_offset);
+
+	return ret;
+}
+
+static int bcl_write_register(int16_t reg_offset, uint8_t data)
+{
+	return bcl_write_general_register(reg_offset,
+			bcl_perph->fg_bcl_addr, data);
+}
+
+static void convert_vbat_to_adc_val(int *val)
+{
+	*val = (*val * 1000) / BCL_VBAT_SCALING;
+}
+
+static void convert_adc_to_vbat_val(int *val)
+{
+	*val = *val * BCL_VBAT_SCALING / 1000;
+}
+
+static void convert_ibat_to_adc_val(int *val)
+{
+	*val = *val / BCL_IBAT_SCALING;
+}
+
+static void convert_adc_to_ibat_val(int *val)
+{
+	*val = *val * BCL_IBAT_SCALING;
+}
+
+static int bcl_set_ibat(void *data, int low, int high)
+{
+	int ret = 0, ibat_ua, thresh_value;
+	int8_t val = 0;
+	int16_t addr;
+	struct bcl_peripheral_data *bat_data =
+		(struct bcl_peripheral_data *)data;
+
+	thresh_value = high;
+	if (bat_data->trip_temp == thresh_value)
+		return 0;
+
+	mutex_lock(&bat_data->state_trans_lock);
+	if (bat_data->irq_num && bat_data->irq_enabled) {
+		disable_irq_nosync(bat_data->irq_num);
+		bat_data->irq_enabled = false;
+	}
+	if (thresh_value == INT_MAX) {
+		bat_data->trip_temp = thresh_value;
+		goto set_trip_exit;
+	}
+
+	ibat_ua = thresh_value;
+	convert_ibat_to_adc_val(&thresh_value);
+	val = (int8_t)thresh_value;
+	if (&bcl_perph->param[BCL_HIGH_IBAT] == bat_data) {
+		addr = BCL_IBAT_HIGH;
+		pr_debug("ibat high threshold:%d mA ADC:0x%02x\n",
+				ibat_ua, val);
+	} else if (&bcl_perph->param[BCL_VHIGH_IBAT] == bat_data) {
+		addr = BCL_IBAT_TOO_HIGH;
+		pr_debug("ibat too high threshold:%d mA ADC:0x%02x\n",
+				ibat_ua, val);
+	} else {
+		goto set_trip_exit;
+	}
+	ret = bcl_write_register(addr, val);
+	if (ret) {
+		pr_err("Error accessing BCL peripheral. err:%d\n", ret);
+		goto set_trip_exit;
+	}
+	bat_data->trip_temp = ibat_ua;
+
+	if (bat_data->irq_num && !bat_data->irq_enabled) {
+		enable_irq(bat_data->irq_num);
+		bat_data->irq_enabled = true;
+	}
+
+set_trip_exit:
+	mutex_unlock(&bat_data->state_trans_lock);
+
+	return ret;
+}
+
+static int bcl_set_vbat(void *data, int low, int high)
+{
+	int ret = 0, vbat_uv, vbat_idx, thresh_value;
+	int8_t val = 0;
+	struct bcl_peripheral_data *bat_data =
+		(struct bcl_peripheral_data *)data;
+	uint16_t addr;
+
+	thresh_value = low;
+	if (bat_data->trip_temp == thresh_value)
+		return 0;
+
+	mutex_lock(&bat_data->state_trans_lock);
+
+	if (bat_data->irq_num && bat_data->irq_enabled) {
+		disable_irq_nosync(bat_data->irq_num);
+		bat_data->irq_enabled = false;
+	}
+	if (thresh_value == INT_MIN) {
+		bat_data->trip_temp = thresh_value;
+		goto set_trip_exit;
+	}
+	vbat_uv = thresh_value;
+	convert_vbat_to_adc_val(&thresh_value);
+	val = (int8_t)thresh_value;
+	/*
+	 * very low and critical low trip can support only standard
+	 * trip thresholds
+	 */
+	if (&bcl_perph->param[BCL_LOW_VBAT] == bat_data) {
+		addr = BCL_VBAT_ADC_LOW;
+		pr_debug("vbat low threshold:%d mv ADC:0x%02x\n",
+				vbat_uv, val);
+	} else if (&bcl_perph->param[BCL_VLOW_VBAT] == bat_data) {
+		/*
+		 * Scan the standard voltage table, sorted in ascending order
+		 * and find the closest threshold that is lower or equal to
+		 * the requested value. Passive trip supports thresholds
+		 * indexed from 1...BCL_STD_VBAT_NR in the voltage table.
+		 */
+		for (vbat_idx = 2; vbat_idx < BCL_STD_VBAT_NR;
+			vbat_idx++) {
+			if (vbat_uv > vbat_low[vbat_idx])
+				continue;
+			break;
+		}
+		addr = BCL_VBAT_COMP_LOW;
+		val = vbat_idx - 2;
+		vbat_uv = vbat_low[vbat_idx - 1];
+		pr_debug("vbat too low threshold:%d mv ADC:0x%02x\n",
+				vbat_uv, val);
+	} else if (&bcl_perph->param[BCL_CLOW_VBAT] == bat_data) {
+		/* Hot trip supports thresholds indexed from
+		 * 0...BCL_STD_VBAT_NR-1 in the voltage table.
+		 */
+		for (vbat_idx = 1; vbat_idx < (BCL_STD_VBAT_NR - 1);
+			vbat_idx++) {
+			if (vbat_uv > vbat_low[vbat_idx])
+				continue;
+			break;
+		}
+		addr = BCL_VBAT_COMP_TLOW;
+		val = vbat_idx - 1;
+		vbat_uv = vbat_low[vbat_idx - 1];
+		pr_debug("vbat critic low threshold:%d mv ADC:0x%02x\n",
+				vbat_uv, val);
+	} else {
+		goto set_trip_exit;
+	}
+
+	ret = bcl_write_register(addr, val);
+	if (ret) {
+		pr_err("Error accessing BCL peripheral. err:%d\n", ret);
+		goto set_trip_exit;
+	}
+	bat_data->trip_temp = vbat_uv;
+	if (bat_data->irq_num && !bat_data->irq_enabled) {
+		enable_irq(bat_data->irq_num);
+		bat_data->irq_enabled = true;
+	}
+
+set_trip_exit:
+	mutex_unlock(&bat_data->state_trans_lock);
+	return ret;
+}
+
+static int bcl_clear_vbat_min(void)
+{
+	int ret  = 0;
+
+	ret = bcl_write_register(BCL_MAX_MIN_CLR,
+			BIT(BCL_VBAT_MIN_CLR));
+	if (ret)
+		pr_err("Error in clearing vbat min reg. err:%d", ret);
+
+	return ret;
+}
+
+static int bcl_clear_ibat_max(void)
+{
+	int ret  = 0;
+
+	ret = bcl_write_register(BCL_MAX_MIN_CLR,
+			BIT(BCL_IBAT_MAX_CLR));
+	if (ret)
+		pr_err("Error in clearing ibat max reg. err:%d", ret);
+
+	return ret;
+}
+
+static int bcl_read_ibat(void *data, int *adc_value)
+{
+	int ret = 0, timeout = 0;
+	int8_t val[VAL_CP_REG_BUF_LEN] = {0};
+	struct bcl_peripheral_data *bat_data =
+		(struct bcl_peripheral_data *)data;
+
+	*adc_value = (int)val[VAL_REG_BUF_OFFSET];
+	do {
+		ret = bcl_read_multi_register(BCL_IBAT_MAX, val,
+			VAL_CP_REG_BUF_LEN);
+		if (ret) {
+			pr_err("BCL register read error. err:%d\n", ret);
+			goto bcl_read_exit;
+		}
+	} while (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]
+		&& timeout++ < BCL_READ_RETRY_LIMIT);
+	if (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]) {
+		ret = -ENODEV;
+		*adc_value = bat_data->last_val;
+		goto bcl_read_exit;
+	}
+	*adc_value = (int)val[VAL_REG_BUF_OFFSET];
+	if (*adc_value == 0) {
+		/*
+		 * The sensor sometime can read a value 0 if there is
+		 * consequtive reads
+		 */
+		*adc_value = bat_data->last_val;
+	} else {
+		convert_adc_to_ibat_val(adc_value);
+		bat_data->last_val = *adc_value;
+	}
+	pr_debug("ibat:%d mA\n", bat_data->last_val);
+
+bcl_read_exit:
+	return ret;
+}
+
+static int bcl_read_ibat_and_clear(void *data, int *adc_value)
+{
+	int ret = 0;
+
+	ret = bcl_read_ibat(data, adc_value);
+	if (ret)
+		return ret;
+	return bcl_clear_ibat_max();
+}
+
+static int bcl_read_vbat(void *data, int *adc_value)
+{
+	int ret = 0, timeout = 0;
+	int8_t val[VAL_CP_REG_BUF_LEN] = {0};
+	struct bcl_peripheral_data *bat_data =
+		(struct bcl_peripheral_data *)data;
+
+	*adc_value = (int)val[VAL_REG_BUF_OFFSET];
+	do {
+		ret = bcl_read_multi_register(BCL_VBAT_MIN, val,
+			VAL_CP_REG_BUF_LEN);
+		if (ret) {
+			pr_err("BCL register read error. err:%d\n", ret);
+			goto bcl_read_exit;
+		}
+	} while (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]
+		&& timeout++ < BCL_READ_RETRY_LIMIT);
+	if (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]) {
+		ret = -ENODEV;
+		goto bcl_read_exit;
+	}
+	*adc_value = (int)val[VAL_REG_BUF_OFFSET];
+	if (*adc_value == BCL_VBAT_NO_READING) {
+		*adc_value = bat_data->last_val;
+	} else {
+		convert_adc_to_vbat_val(adc_value);
+		bat_data->last_val = *adc_value;
+	}
+	pr_debug("vbat:%d mv\n", bat_data->last_val);
+
+bcl_read_exit:
+	return ret;
+}
+
+static int bcl_read_vbat_and_clear(void *data, int *adc_value)
+{
+	int ret;
+
+	ret = bcl_read_vbat(data, adc_value);
+	if (ret)
+		return ret;
+	return bcl_clear_vbat_min();
+}
+
+static irqreturn_t bcl_handle_ibat(int irq, void *data)
+{
+	struct bcl_peripheral_data *perph_data =
+		(struct bcl_peripheral_data *)data;
+
+	mutex_lock(&perph_data->state_trans_lock);
+	if (!perph_data->irq_enabled) {
+		WARN_ON(1);
+		disable_irq_nosync(irq);
+		perph_data->irq_enabled = false;
+		goto exit_intr;
+	}
+	mutex_unlock(&perph_data->state_trans_lock);
+	of_thermal_handle_trip(perph_data->tz_dev);
+
+	return IRQ_HANDLED;
+
+exit_intr:
+	mutex_unlock(&perph_data->state_trans_lock);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bcl_handle_vbat(int irq, void *data)
+{
+	struct bcl_peripheral_data *perph_data =
+		(struct bcl_peripheral_data *)data;
+
+	mutex_lock(&perph_data->state_trans_lock);
+	if (!perph_data->irq_enabled) {
+		WARN_ON(1);
+		disable_irq_nosync(irq);
+		perph_data->irq_enabled = false;
+		goto exit_intr;
+	}
+	mutex_unlock(&perph_data->state_trans_lock);
+	of_thermal_handle_trip(perph_data->tz_dev);
+
+	return IRQ_HANDLED;
+
+exit_intr:
+	mutex_unlock(&perph_data->state_trans_lock);
+	return IRQ_HANDLED;
+}
+
+static int bcl_get_devicetree_data(struct platform_device *pdev)
+{
+	int ret = 0;
+	const __be32 *prop = NULL;
+	struct device_node *dev_node = pdev->dev.of_node;
+
+	prop = of_get_address(dev_node, 0, NULL, NULL);
+	if (prop) {
+		bcl_perph->fg_bcl_addr = be32_to_cpu(*prop);
+		pr_debug("fg_user_adc@%04x\n", bcl_perph->fg_bcl_addr);
+	} else {
+		dev_err(&pdev->dev, "No fg_user_adc registers found\n");
+		return -ENODEV;
+	}
+
+	prop = of_get_address(dev_node, 1, NULL, NULL);
+	if (prop) {
+		bcl_perph->fg_lmh_addr = be32_to_cpu(*prop);
+		pr_debug("fg_lmh@%04x\n", bcl_perph->fg_lmh_addr);
+	} else {
+		dev_err(&pdev->dev, "No fg_lmh registers found\n");
+		return -ENODEV;
+	}
+
+	return ret;
+}
+
+static int bcl_set_soc(void *data, int low, int high)
+{
+	struct bcl_peripheral_data *bat_data =
+		(struct bcl_peripheral_data *)data;
+
+	if (low == bat_data->trip_temp)
+		return 0;
+
+	mutex_lock(&bat_data->state_trans_lock);
+	pr_debug("low soc threshold:%d\n", low);
+	bat_data->trip_temp = low;
+	if (low == INT_MIN) {
+		bat_data->irq_enabled = false;
+		goto unlock_and_exit;
+	}
+	bat_data->irq_enabled = true;
+	schedule_work(&bcl_perph->soc_eval_work);
+
+unlock_and_exit:
+	mutex_unlock(&bat_data->state_trans_lock);
+	return 0;
+}
+
+static int bcl_read_soc(void *data, int *val)
+{
+	static struct power_supply *batt_psy;
+	union power_supply_propval ret = {0,};
+	int err = 0;
+
+	*val = 100;
+	if (!batt_psy)
+		batt_psy = power_supply_get_by_name("battery");
+	if (batt_psy) {
+		err = power_supply_get_property(batt_psy,
+				POWER_SUPPLY_PROP_CAPACITY, &ret);
+		if (err) {
+			pr_err("battery percentage read error:%d\n",
+				err);
+			return err;
+		}
+		*val = ret.intval;
+	}
+	pr_debug("soc:%d\n", *val);
+
+	return err;
+}
+
+static void bcl_evaluate_soc(struct work_struct *work)
+{
+	int battery_percentage;
+	struct bcl_peripheral_data *perph_data =
+		&bcl_perph->param[BCL_SOC_MONITOR];
+
+	if (bcl_read_soc((void *)perph_data, &battery_percentage))
+		return;
+
+	mutex_lock(&perph_data->state_trans_lock);
+	if (!perph_data->irq_enabled)
+		goto eval_exit;
+	if (battery_percentage > perph_data->trip_temp)
+		goto eval_exit;
+
+	perph_data->trip_val = battery_percentage;
+	mutex_unlock(&perph_data->state_trans_lock);
+	of_thermal_handle_trip(perph_data->tz_dev);
+
+	return;
+eval_exit:
+	mutex_unlock(&perph_data->state_trans_lock);
+}
+
+static int battery_supply_callback(struct notifier_block *nb,
+			unsigned long event, void *data)
+{
+	struct power_supply *psy = data;
+
+	if (strcmp(psy->desc->name, "battery"))
+		return NOTIFY_OK;
+	schedule_work(&bcl_perph->soc_eval_work);
+
+	return NOTIFY_OK;
+}
+
+static void bcl_fetch_trip(struct platform_device *pdev, const char *int_name,
+		struct bcl_peripheral_data *data,
+		irqreturn_t (*handle)(int, void *))
+{
+	int ret = 0, irq_num = 0;
+
+	/*
+	 * Allow flexibility for the HLOS to set the trip temperature for
+	 * all the thresholds but handle the interrupt for only one vbat
+	 * and ibat interrupt. The LMH-DCVSh will handle and mitigate for the
+	 * rest of the ibat/vbat interrupts.
+	 */
+	if (!handle) {
+		mutex_lock(&data->state_trans_lock);
+		data->irq_num = 0;
+		data->irq_enabled = false;
+		mutex_unlock(&data->state_trans_lock);
+		return;
+	}
+
+	irq_num = platform_get_irq_byname(pdev, int_name);
+	if (irq_num) {
+		mutex_lock(&data->state_trans_lock);
+		ret = devm_request_threaded_irq(&pdev->dev,
+				irq_num, NULL, handle,
+				IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+				int_name, data);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"Error requesting trip irq. err:%d",
+				ret);
+			mutex_unlock(&data->state_trans_lock);
+			return;
+		}
+		disable_irq_nosync(irq_num);
+		data->irq_num = irq_num;
+		data->irq_enabled = false;
+		mutex_unlock(&data->state_trans_lock);
+	}
+}
+
+static void bcl_probe_soc(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct bcl_peripheral_data *soc_data;
+
+	soc_data = &bcl_perph->param[BCL_SOC_MONITOR];
+	mutex_init(&soc_data->state_trans_lock);
+	soc_data->ops.get_temp = bcl_read_soc;
+	soc_data->ops.set_trips = bcl_set_soc;
+	INIT_WORK(&bcl_perph->soc_eval_work, bcl_evaluate_soc);
+	bcl_perph->psy_nb.notifier_call = battery_supply_callback;
+	ret = power_supply_reg_notifier(&bcl_perph->psy_nb);
+	if (ret < 0) {
+		pr_err("Unable to register soc notifier. err:%d\n", ret);
+		return;
+	}
+	soc_data->tz_dev = thermal_zone_of_sensor_register(&pdev->dev,
+				BCL_SOC_MONITOR, soc_data, &soc_data->ops);
+	if (IS_ERR(soc_data->tz_dev)) {
+		pr_err("vbat register failed. err:%ld\n",
+				PTR_ERR(soc_data->tz_dev));
+		return;
+	}
+	thermal_zone_device_update(soc_data->tz_dev, THERMAL_DEVICE_UP);
+	schedule_work(&bcl_perph->soc_eval_work);
+}
+
+static void bcl_vbat_init(struct platform_device *pdev,
+		struct bcl_peripheral_data *vbat, enum bcl_dev_type type)
+{
+	mutex_init(&vbat->state_trans_lock);
+	switch (type) {
+	case BCL_LOW_VBAT:
+		bcl_fetch_trip(pdev, BCL_VBAT_INT, vbat, bcl_handle_vbat);
+		break;
+	case BCL_VLOW_VBAT:
+		bcl_fetch_trip(pdev, BCL_VLOW_VBAT_INT, vbat, NULL);
+		break;
+	case BCL_CLOW_VBAT:
+		bcl_fetch_trip(pdev, BCL_CLOW_VBAT_INT, vbat, NULL);
+		break;
+	default:
+		return;
+	}
+	vbat->ops.get_temp = bcl_read_vbat_and_clear;
+	vbat->ops.set_trips = bcl_set_vbat;
+	vbat->tz_dev = thermal_zone_of_sensor_register(&pdev->dev,
+				type, vbat, &vbat->ops);
+	if (IS_ERR(vbat->tz_dev)) {
+		pr_err("vbat register failed. err:%ld\n",
+				PTR_ERR(vbat->tz_dev));
+		return;
+	}
+	thermal_zone_device_update(vbat->tz_dev, THERMAL_DEVICE_UP);
+}
+
+static void bcl_probe_vbat(struct platform_device *pdev)
+{
+	bcl_vbat_init(pdev, &bcl_perph->param[BCL_LOW_VBAT], BCL_LOW_VBAT);
+	bcl_vbat_init(pdev, &bcl_perph->param[BCL_VLOW_VBAT], BCL_VLOW_VBAT);
+	bcl_vbat_init(pdev, &bcl_perph->param[BCL_CLOW_VBAT], BCL_CLOW_VBAT);
+}
+
+static void bcl_ibat_init(struct platform_device *pdev,
+		struct bcl_peripheral_data *ibat, enum bcl_dev_type type)
+{
+	mutex_init(&ibat->state_trans_lock);
+	if (type == BCL_HIGH_IBAT)
+		bcl_fetch_trip(pdev, BCL_IBAT_INT, ibat, bcl_handle_ibat);
+	else
+		bcl_fetch_trip(pdev, BCL_VHIGH_IBAT_INT, ibat, NULL);
+	ibat->ops.get_temp = bcl_read_ibat_and_clear;
+	ibat->ops.set_trips = bcl_set_ibat;
+	ibat->tz_dev = thermal_zone_of_sensor_register(&pdev->dev,
+				type, ibat, &ibat->ops);
+	if (IS_ERR(ibat->tz_dev)) {
+		pr_err("ibat register failed. err:%ld\n",
+				PTR_ERR(ibat->tz_dev));
+		return;
+	}
+	thermal_zone_device_update(ibat->tz_dev, THERMAL_DEVICE_UP);
+}
+
+static void bcl_probe_ibat(struct platform_device *pdev)
+{
+	bcl_ibat_init(pdev, &bcl_perph->param[BCL_HIGH_IBAT], BCL_HIGH_IBAT);
+	bcl_ibat_init(pdev, &bcl_perph->param[BCL_VHIGH_IBAT], BCL_VHIGH_IBAT);
+}
+
+static void bcl_configure_lmh_peripheral(void)
+{
+	bcl_write_register(BCL_LMH_CFG, BCL_LMH_CFG_VAL);
+	bcl_write_register(BCL_CFG, BCL_CFG_VAL);
+	bcl_write_general_register(LMH_INT_POL_HIGH,
+			bcl_perph->fg_lmh_addr, LMH_INT_VAL);
+	bcl_write_general_register(LMH_INT_EN,
+			bcl_perph->fg_lmh_addr, LMH_INT_VAL);
+}
+
+static int bcl_remove(struct platform_device *pdev)
+{
+	int i = 0;
+
+	for (; i < BCL_TYPE_MAX; i++) {
+		if (!bcl_perph->param[i].tz_dev)
+			continue;
+		if (i == BCL_SOC_MONITOR) {
+			power_supply_unreg_notifier(&bcl_perph->psy_nb);
+			flush_work(&bcl_perph->soc_eval_work);
+		}
+		thermal_zone_of_sensor_unregister(&pdev->dev,
+				bcl_perph->param[i].tz_dev);
+	}
+	bcl_perph = NULL;
+
+	return 0;
+}
+
+static int bcl_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	bcl_perph = devm_kzalloc(&pdev->dev, sizeof(*bcl_perph), GFP_KERNEL);
+	if (!bcl_perph)
+		return -ENOMEM;
+
+	bcl_perph->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!bcl_perph->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+
+	bcl_get_devicetree_data(pdev);
+	bcl_probe_ibat(pdev);
+	bcl_probe_vbat(pdev);
+	bcl_probe_soc(pdev);
+	bcl_configure_lmh_peripheral();
+
+	dev_set_drvdata(&pdev->dev, bcl_perph);
+	ret = bcl_write_register(BCL_MONITOR_EN, BIT(7));
+	if (ret) {
+		pr_err("Error accessing BCL peripheral. err:%d\n", ret);
+		goto bcl_probe_exit;
+	}
+
+	return 0;
+
+bcl_probe_exit:
+	bcl_remove(pdev);
+	return ret;
+}
+
+static const struct of_device_id bcl_match[] = {
+	{
+		.compatible = "qcom,msm-bcl-lmh",
+	},
+	{},
+};
+
+static struct platform_driver bcl_driver = {
+	.probe  = bcl_probe,
+	.remove = bcl_remove,
+	.driver = {
+		.name           = BCL_DRIVER_NAME,
+		.owner          = THIS_MODULE,
+		.of_match_table = bcl_match,
+	},
+};
+
+builtin_platform_driver(bcl_driver);
diff --git a/drivers/thermal/qcom/msm_lmh_dcvs.c b/drivers/thermal/qcom/msm_lmh_dcvs.c
new file mode 100644
index 0000000..c93d650
--- /dev/null
+++ b/drivers/thermal/qcom/msm_lmh_dcvs.c
@@ -0,0 +1,518 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/pm_opp.h>
+#include <linux/cpu_cooling.h>
+#include <linux/atomic.h>
+
+#include <asm/smp_plat.h>
+#include <asm/cacheflush.h>
+
+#include <soc/qcom/scm.h>
+
+#include "../thermal_core.h"
+
+#define LIMITS_DCVSH                0x10
+#define LIMITS_PROFILE_CHANGE       0x01
+#define LIMITS_NODE_DCVS            0x44435653
+
+#define LIMITS_SUB_FN_THERMAL       0x54484D4C
+#define LIMITS_SUB_FN_CRNT          0x43524E54
+#define LIMITS_SUB_FN_REL           0x52454C00
+#define LIMITS_SUB_FN_BCL           0x42434C00
+#define LIMITS_SUB_FN_GENERAL       0x47454E00
+
+#define LIMITS_ALGO_MODE_ENABLE     0x454E424C
+
+#define LIMITS_HI_THRESHOLD         0x48494748
+#define LIMITS_LOW_THRESHOLD        0x4C4F5700
+#define LIMITS_ARM_THRESHOLD        0x41524D00
+
+#define LIMITS_CLUSTER_0            0x6370302D
+#define LIMITS_CLUSTER_1            0x6370312D
+
+#define LIMITS_DOMAIN_MAX           0x444D4158
+#define LIMITS_DOMAIN_MIN           0x444D494E
+
+#define LIMITS_TEMP_DEFAULT         75000
+#define LIMITS_LOW_THRESHOLD_OFFSET 500
+#define LIMITS_POLLING_DELAY_MS     10
+#define LIMITS_CLUSTER_0_REQ        0x179C1B04
+#define LIMITS_CLUSTER_1_REQ        0x179C3B04
+#define LIMITS_CLUSTER_0_INT_CLR    0x179CE808
+#define LIMITS_CLUSTER_1_INT_CLR    0x179CC808
+#define LIMITS_CLUSTER_0_MIN_FREQ   0x17D78BC0
+#define LIMITS_CLUSTER_1_MIN_FREQ   0x17D70BC0
+#define dcvsh_get_frequency(_val, _max) do { \
+	_max = (_val) & 0x3FF; \
+	_max *= 19200; \
+} while (0)
+#define FREQ_KHZ_TO_HZ(_val) ((_val) * 1000)
+#define FREQ_HZ_TO_KHZ(_val) ((_val) / 1000)
+
+enum lmh_hw_trips {
+	LIMITS_TRIP_ARM,
+	LIMITS_TRIP_HI,
+	LIMITS_TRIP_MAX,
+};
+
+struct limits_dcvs_hw {
+	char sensor_name[THERMAL_NAME_LENGTH];
+	uint32_t affinity;
+	uint32_t temp_limits[LIMITS_TRIP_MAX];
+	int irq_num;
+	void *osm_hw_reg;
+	void *int_clr_reg;
+	void *min_freq_reg;
+	cpumask_t core_map;
+	struct timer_list poll_timer;
+	unsigned long max_freq;
+	unsigned long min_freq;
+	unsigned long hw_freq_limit;
+	struct list_head list;
+	atomic_t is_irq_enabled;
+};
+
+LIST_HEAD(lmh_dcvs_hw_list);
+
+static int limits_dcvs_get_freq_limits(uint32_t cpu, unsigned long *max_freq,
+					 unsigned long *min_freq)
+{
+	unsigned long freq_ceil = UINT_MAX, freq_floor = 0;
+	struct device *cpu_dev = NULL;
+	int ret = 0;
+
+	cpu_dev = get_cpu_device(cpu);
+	if (!cpu_dev) {
+		pr_err("Error in get CPU%d device\n", cpu);
+		return -ENODEV;
+	}
+
+	rcu_read_lock();
+	dev_pm_opp_find_freq_floor(cpu_dev, &freq_ceil);
+	dev_pm_opp_find_freq_ceil(cpu_dev, &freq_floor);
+	rcu_read_unlock();
+
+	*max_freq = freq_ceil / 1000;
+	*min_freq = freq_floor / 1000;
+
+	return ret;
+}
+
+static unsigned long limits_mitigation_notify(struct limits_dcvs_hw *hw)
+{
+	uint32_t val = 0;
+	struct device *cpu_dev = NULL;
+	unsigned long freq_val, max_limit = 0;
+	struct dev_pm_opp *opp_entry;
+
+	val = readl_relaxed(hw->osm_hw_reg);
+	dcvsh_get_frequency(val, max_limit);
+	cpu_dev = get_cpu_device(cpumask_first(&hw->core_map));
+	if (!cpu_dev) {
+		pr_err("Error in get CPU%d device\n",
+			cpumask_first(&hw->core_map));
+		goto notify_exit;
+	}
+
+	freq_val = FREQ_KHZ_TO_HZ(max_limit);
+	rcu_read_lock();
+	opp_entry = dev_pm_opp_find_freq_floor(cpu_dev, &freq_val);
+	/*
+	 * Hardware mitigation frequency can be lower than the lowest
+	 * possible CPU frequency. In that case freq floor call will
+	 * fail with -ERANGE and we need to match to the lowest
+	 * frequency using freq_ceil.
+	 */
+	if (IS_ERR(opp_entry) && PTR_ERR(opp_entry) == -ERANGE) {
+		opp_entry = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_val);
+		if (IS_ERR(opp_entry))
+			dev_err(cpu_dev, "frequency:%lu. opp error:%ld\n",
+					freq_val, PTR_ERR(opp_entry));
+	}
+	rcu_read_unlock();
+	max_limit = FREQ_HZ_TO_KHZ(freq_val);
+
+	sched_update_cpu_freq_min_max(&hw->core_map, 0, max_limit);
+
+notify_exit:
+	hw->hw_freq_limit = max_limit;
+	return max_limit;
+}
+
+static void limits_dcvs_poll(unsigned long data)
+{
+	unsigned long max_limit = 0;
+	struct limits_dcvs_hw *hw = (struct limits_dcvs_hw *)data;
+
+	if (hw->max_freq == UINT_MAX)
+		limits_dcvs_get_freq_limits(cpumask_first(&hw->core_map),
+			&hw->max_freq, &hw->min_freq);
+	max_limit = limits_mitigation_notify(hw);
+	if (max_limit >= hw->max_freq) {
+		del_timer(&hw->poll_timer);
+		writel_relaxed(0xFF, hw->int_clr_reg);
+		atomic_set(&hw->is_irq_enabled, 1);
+		enable_irq(hw->irq_num);
+	} else {
+		mod_timer(&hw->poll_timer, jiffies + msecs_to_jiffies(
+			LIMITS_POLLING_DELAY_MS));
+	}
+}
+
+static void lmh_dcvs_notify(struct limits_dcvs_hw *hw)
+{
+	if (atomic_dec_and_test(&hw->is_irq_enabled)) {
+		disable_irq_nosync(hw->irq_num);
+		limits_mitigation_notify(hw);
+		mod_timer(&hw->poll_timer, jiffies + msecs_to_jiffies(
+			LIMITS_POLLING_DELAY_MS));
+	}
+}
+
+static irqreturn_t lmh_dcvs_handle_isr(int irq, void *data)
+{
+	struct limits_dcvs_hw *hw = data;
+
+	lmh_dcvs_notify(hw);
+
+	return IRQ_HANDLED;
+}
+
+static int limits_dcvs_write(uint32_t node_id, uint32_t fn,
+			      uint32_t setting, uint32_t val)
+{
+	int ret;
+	struct scm_desc desc_arg;
+	uint32_t *payload = NULL;
+
+	payload = kzalloc(sizeof(uint32_t) * 5, GFP_KERNEL);
+	if (!payload)
+		return -ENOMEM;
+
+	payload[0] = fn; /* algorithm */
+	payload[1] = 0; /* unused sub-algorithm */
+	payload[2] = setting;
+	payload[3] = 1; /* number of values */
+	payload[4] = val;
+
+	desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
+	desc_arg.args[1] = sizeof(uint32_t) * 5;
+	desc_arg.args[2] = LIMITS_NODE_DCVS;
+	desc_arg.args[3] = node_id;
+	desc_arg.args[4] = 0; /* version */
+	desc_arg.arginfo = SCM_ARGS(5, SCM_RO, SCM_VAL, SCM_VAL,
+					SCM_VAL, SCM_VAL);
+
+	dmac_flush_range(payload, (void *)payload + 5 * (sizeof(uint32_t)));
+	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, LIMITS_DCVSH), &desc_arg);
+
+	kfree(payload);
+
+	return ret;
+}
+
+static int lmh_get_temp(void *data, int *val)
+{
+	/*
+	 * LMH DCVSh hardware doesn't support temperature read.
+	 * return a default value for the thermal core to aggregate
+	 * the thresholds
+	 */
+	*val = LIMITS_TEMP_DEFAULT;
+
+	return 0;
+}
+
+static int lmh_set_trips(void *data, int low, int high)
+{
+	struct limits_dcvs_hw *hw = (struct limits_dcvs_hw *)data;
+	int ret = 0;
+
+	if (high < LIMITS_LOW_THRESHOLD_OFFSET || low < 0) {
+		pr_err("Value out of range low:%d high:%d\n",
+				low, high);
+		return -EINVAL;
+	}
+
+	/* Sanity check limits before writing to the hardware */
+	if (low >= high)
+		return -EINVAL;
+
+	hw->temp_limits[LIMITS_TRIP_HI] = (uint32_t)high;
+	hw->temp_limits[LIMITS_TRIP_ARM] = (uint32_t)low;
+
+	ret =  limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_THERMAL,
+				  LIMITS_ARM_THRESHOLD, low);
+	if (ret)
+		return ret;
+	ret =  limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_THERMAL,
+				  LIMITS_HI_THRESHOLD, high);
+	if (ret)
+		return ret;
+	ret =  limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_THERMAL,
+				  LIMITS_LOW_THRESHOLD,
+				  high - LIMITS_LOW_THRESHOLD_OFFSET);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+static struct thermal_zone_of_device_ops limits_sensor_ops = {
+	.get_temp   = lmh_get_temp,
+	.set_trips  = lmh_set_trips,
+};
+
+static struct limits_dcvs_hw *get_dcvsh_hw_from_cpu(int cpu)
+{
+	struct limits_dcvs_hw *hw;
+
+	list_for_each_entry(hw, &lmh_dcvs_hw_list, list) {
+		if (cpumask_test_cpu(cpu, &hw->core_map))
+			return hw;
+	}
+
+	return NULL;
+}
+
+static int enable_lmh(void)
+{
+	int ret = 0;
+	struct scm_desc desc_arg;
+
+	desc_arg.args[0] = 1;
+	desc_arg.arginfo = SCM_ARGS(1, SCM_VAL);
+	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, LIMITS_PROFILE_CHANGE),
+			&desc_arg);
+	if (ret) {
+		pr_err("Error switching profile:[1]. err:%d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int lmh_set_max_limit(int cpu, u32 freq)
+{
+	struct limits_dcvs_hw *hw = get_dcvsh_hw_from_cpu(cpu);
+
+	if (!hw)
+		return -EINVAL;
+
+	return limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_GENERAL,
+				  LIMITS_DOMAIN_MAX, freq);
+}
+
+static int lmh_set_min_limit(int cpu, u32 freq)
+{
+	struct limits_dcvs_hw *hw = get_dcvsh_hw_from_cpu(cpu);
+
+	if (!hw)
+		return -EINVAL;
+
+	if (freq != hw->min_freq)
+		writel_relaxed(0x01, hw->min_freq_reg);
+	else
+		writel_relaxed(0x00, hw->min_freq_reg);
+
+	return 0;
+}
+static struct cpu_cooling_ops cd_ops = {
+	.ceil_limit = lmh_set_max_limit,
+	.floor_limit = lmh_set_min_limit,
+};
+
+static int limits_dcvs_probe(struct platform_device *pdev)
+{
+	int ret;
+	int affinity = -1;
+	struct limits_dcvs_hw *hw;
+	struct thermal_zone_device *tzdev;
+	struct thermal_cooling_device *cdev;
+	struct device_node *dn = pdev->dev.of_node;
+	struct device_node *cpu_node, *lmh_node;
+	uint32_t request_reg, clear_reg, min_reg;
+	unsigned long max_freq, min_freq;
+	int cpu;
+	cpumask_t mask = { CPU_BITS_NONE };
+
+	for_each_possible_cpu(cpu) {
+		cpu_node = of_cpu_device_node_get(cpu);
+		if (!cpu_node)
+			continue;
+		lmh_node = of_parse_phandle(cpu_node, "qcom,lmh-dcvs", 0);
+		if (lmh_node == dn) {
+			affinity = MPIDR_AFFINITY_LEVEL(
+					cpu_logical_map(cpu), 1);
+			/*set the cpumask*/
+			cpumask_set_cpu(cpu, &(mask));
+		}
+		of_node_put(cpu_node);
+		of_node_put(lmh_node);
+	}
+
+	/*
+	 * We return error if none of the CPUs have
+	 * reference to our LMH node
+	 */
+	if (affinity == -1)
+		return -EINVAL;
+
+	ret = limits_dcvs_get_freq_limits(cpumask_first(&mask), &max_freq,
+				     &min_freq);
+	if (ret)
+		return ret;
+	hw = devm_kzalloc(&pdev->dev, sizeof(*hw), GFP_KERNEL);
+	if (!hw)
+		return -ENOMEM;
+
+	cpumask_copy(&hw->core_map, &mask);
+	switch (affinity) {
+	case 0:
+		hw->affinity = LIMITS_CLUSTER_0;
+		break;
+	case 1:
+		hw->affinity = LIMITS_CLUSTER_1;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	/* Enable the thermal algorithm early */
+	ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_THERMAL,
+		 LIMITS_ALGO_MODE_ENABLE, 1);
+	if (ret)
+		return ret;
+	/* Enable the LMH outer loop algorithm */
+	ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_CRNT,
+		 LIMITS_ALGO_MODE_ENABLE, 1);
+	if (ret)
+		return ret;
+	/* Enable the Reliability algorithm */
+	ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_REL,
+		 LIMITS_ALGO_MODE_ENABLE, 1);
+	if (ret)
+		return ret;
+	/* Enable the BCL algorithm */
+	ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_BCL,
+		 LIMITS_ALGO_MODE_ENABLE, 1);
+	if (ret)
+		return ret;
+	ret = enable_lmh();
+	if (ret)
+		return ret;
+
+	/*
+	 * Setup virtual thermal zones for each LMH-DCVS hardware
+	 * The sensor does not do actual thermal temperature readings
+	 * but does support setting thresholds for trips.
+	 * Let's register with thermal framework, so we have the ability
+	 * to set low/high thresholds.
+	 */
+	hw->temp_limits[LIMITS_TRIP_HI] = INT_MAX;
+	hw->temp_limits[LIMITS_TRIP_ARM] = 0;
+	hw->hw_freq_limit = hw->max_freq = max_freq;
+	hw->min_freq = min_freq;
+	snprintf(hw->sensor_name, sizeof(hw->sensor_name), "limits_sensor-%02d",
+			affinity);
+	tzdev = thermal_zone_of_sensor_register(&pdev->dev, 0, hw,
+			&limits_sensor_ops);
+	if (IS_ERR_OR_NULL(tzdev))
+		return PTR_ERR(tzdev);
+
+	/* Setup cooling devices to request mitigation states */
+	cdev = cpufreq_platform_cooling_register(&hw->core_map, &cd_ops);
+	if (IS_ERR_OR_NULL(cdev))
+		return PTR_ERR(cdev);
+
+	switch (affinity) {
+	case 0:
+		request_reg = LIMITS_CLUSTER_0_REQ;
+		clear_reg = LIMITS_CLUSTER_0_INT_CLR;
+		min_reg = LIMITS_CLUSTER_0_MIN_FREQ;
+		break;
+	case 1:
+		request_reg = LIMITS_CLUSTER_1_REQ;
+		clear_reg = LIMITS_CLUSTER_1_INT_CLR;
+		min_reg = LIMITS_CLUSTER_1_MIN_FREQ;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	hw->osm_hw_reg = devm_ioremap(&pdev->dev, request_reg, 0x4);
+	if (!hw->osm_hw_reg) {
+		pr_err("register remap failed\n");
+		return -ENOMEM;
+	}
+	hw->int_clr_reg = devm_ioremap(&pdev->dev, clear_reg, 0x4);
+	if (!hw->int_clr_reg) {
+		pr_err("interrupt clear reg remap failed\n");
+		return -ENOMEM;
+	}
+	hw->min_freq_reg = devm_ioremap(&pdev->dev, min_reg, 0x4);
+	if (!hw->min_freq_reg) {
+		pr_err("min frequency enable register remap failed\n");
+		return -ENOMEM;
+	}
+	init_timer_deferrable(&hw->poll_timer);
+	hw->poll_timer.data = (unsigned long)hw;
+	hw->poll_timer.function = limits_dcvs_poll;
+
+	hw->irq_num = of_irq_get(pdev->dev.of_node, 0);
+	if (hw->irq_num < 0) {
+		ret = hw->irq_num;
+		pr_err("Error getting IRQ number. err:%d\n", ret);
+		return ret;
+	}
+	atomic_set(&hw->is_irq_enabled, 1);
+	ret = devm_request_threaded_irq(&pdev->dev, hw->irq_num, NULL,
+		lmh_dcvs_handle_isr, IRQF_TRIGGER_HIGH | IRQF_ONESHOT
+		| IRQF_NO_SUSPEND, hw->sensor_name, hw);
+	if (ret) {
+		pr_err("Error registering for irq. err:%d\n", ret);
+		return ret;
+	}
+
+	INIT_LIST_HEAD(&hw->list);
+	list_add(&hw->list, &lmh_dcvs_hw_list);
+
+	return ret;
+}
+
+static const struct of_device_id limits_dcvs_match[] = {
+	{ .compatible = "qcom,msm-hw-limits", },
+	{},
+};
+
+static struct platform_driver limits_dcvs_driver = {
+	.probe		= limits_dcvs_probe,
+	.driver		= {
+		.name = KBUILD_MODNAME,
+		.of_match_table = limits_dcvs_match,
+	},
+};
+builtin_platform_driver(limits_dcvs_driver);
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index bcef2e7..4fa7f82 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -37,7 +37,7 @@
  *       for this trip point
  *    d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit
  *       for this trip point
- * If the temperature is lower than a trip point,
+ * If the temperature is lower than a hysteresis temperature,
  *    a. if the trend is THERMAL_TREND_RAISING, do nothing
  *    b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
  *       state for this trip point, if the cooling state already
@@ -126,7 +126,7 @@
 
 static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
 {
-	int trip_temp;
+	int trip_temp, hyst_temp;
 	enum thermal_trip_type trip_type;
 	enum thermal_trend trend;
 	struct thermal_instance *instance;
@@ -134,22 +134,24 @@
 	int old_target;
 
 	if (trip == THERMAL_TRIPS_NONE) {
-		trip_temp = tz->forced_passive;
+		hyst_temp = trip_temp = tz->forced_passive;
 		trip_type = THERMAL_TRIPS_NONE;
 	} else {
 		tz->ops->get_trip_temp(tz, trip, &trip_temp);
+		if (tz->ops->get_trip_hyst) {
+			tz->ops->get_trip_hyst(tz, trip, &hyst_temp);
+			hyst_temp = trip_temp - hyst_temp;
+		} else {
+			hyst_temp = trip_temp;
+		}
 		tz->ops->get_trip_type(tz, trip, &trip_type);
 	}
 
 	trend = get_tz_trend(tz, trip);
 
-	if (tz->temperature >= trip_temp) {
-		throttle = true;
-		trace_thermal_zone_trip(tz, trip, trip_type);
-	}
-
-	dev_dbg(&tz->device, "Trip%d[type=%d,temp=%d]:trend=%d,throttle=%d\n",
-				trip, trip_type, trip_temp, trend, throttle);
+	dev_dbg(&tz->device,
+		"Trip%d[type=%d,temp=%d,hyst=%d]:trend=%d,throttle=%d\n",
+		trip, trip_type, trip_temp, hyst_temp, trend, throttle);
 
 	mutex_lock(&tz->lock);
 
@@ -158,6 +160,22 @@
 			continue;
 
 		old_target = instance->target;
+		/*
+		 * Step wise has to lower the mitigation only if the
+		 * temperature goes below the hysteresis temperature.
+		 * Atleast, it has to hold on to mitigation device lower
+		 * limit if the temperature is above the hysteresis
+		 * temperature.
+		 */
+		if (tz->temperature >= trip_temp ||
+			(tz->temperature >= hyst_temp &&
+			 old_target != THERMAL_NO_TARGET)) {
+			throttle = true;
+			trace_thermal_zone_trip(tz, trip, trip_type);
+		} else {
+			throttle = false;
+		}
+
 		instance->target = get_target_state(instance, trend, throttle);
 		dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
 					old_target, (int)instance->target);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 226b0b4ac..f905103 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -49,6 +49,8 @@
 MODULE_DESCRIPTION("Generic thermal management sysfs support");
 MODULE_LICENSE("GPL v2");
 
+#define THERMAL_MAX_ACTIVE	16
+
 static DEFINE_IDR(thermal_tz_idr);
 static DEFINE_IDR(thermal_cdev_idr);
 static DEFINE_MUTEX(thermal_idr_lock);
@@ -64,6 +66,8 @@
 
 static struct thermal_governor *def_governor;
 
+static struct workqueue_struct *thermal_passive_wq;
+
 static struct thermal_governor *__find_governor(const char *name)
 {
 	struct thermal_governor *pos;
@@ -392,14 +396,15 @@
 	mutex_unlock(&thermal_list_lock);
 }
 
-static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
+static void thermal_zone_device_set_polling(struct workqueue_struct *queue,
+					    struct thermal_zone_device *tz,
 					    int delay)
 {
 	if (delay > 1000)
-		mod_delayed_work(system_freezable_wq, &tz->poll_queue,
+		mod_delayed_work(queue, &tz->poll_queue,
 				 round_jiffies(msecs_to_jiffies(delay)));
 	else if (delay)
-		mod_delayed_work(system_freezable_wq, &tz->poll_queue,
+		mod_delayed_work(queue, &tz->poll_queue,
 				 msecs_to_jiffies(delay));
 	else
 		cancel_delayed_work(&tz->poll_queue);
@@ -410,11 +415,13 @@
 	mutex_lock(&tz->lock);
 
 	if (tz->passive)
-		thermal_zone_device_set_polling(tz, tz->passive_delay);
+		thermal_zone_device_set_polling(thermal_passive_wq,
+						tz, tz->passive_delay);
 	else if (tz->polling_delay)
-		thermal_zone_device_set_polling(tz, tz->polling_delay);
+		thermal_zone_device_set_polling(system_freezable_wq,
+						tz, tz->polling_delay);
 	else
-		thermal_zone_device_set_polling(tz, 0);
+		thermal_zone_device_set_polling(NULL, tz, 0);
 
 	mutex_unlock(&tz->lock);
 }
@@ -450,7 +457,7 @@
 	}
 }
 
-static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
+void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
 {
 	enum thermal_trip_type type;
 
@@ -513,7 +520,6 @@
 		if (!ret && *temp < crit_temp)
 			*temp = tz->emul_temperature;
 	}
- 
 	mutex_unlock(&tz->lock);
 exit:
 	return ret;
@@ -1194,6 +1200,25 @@
 }
 
 static ssize_t
+thermal_cooling_device_min_state_show(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct thermal_cooling_device *cdev = to_cooling_device(dev);
+	unsigned long state;
+	int ret;
+
+	if (cdev->ops->get_min_state)
+		ret = cdev->ops->get_min_state(cdev, &state);
+	else
+		ret = -EPERM;
+
+	if (ret)
+		return ret;
+
+	return snprintf(buf, PAGE_SIZE, "%lu\n", state);
+}
+
+static ssize_t
 thermal_cooling_device_cur_state_show(struct device *dev,
 				      struct device_attribute *attr, char *buf)
 {
@@ -1213,8 +1238,7 @@
 				       const char *buf, size_t count)
 {
 	struct thermal_cooling_device *cdev = to_cooling_device(dev);
-	unsigned long state;
-	int result;
+	long state;
 
 	if (!sscanf(buf, "%ld\n", &state))
 		return -EINVAL;
@@ -1222,9 +1246,35 @@
 	if ((long)state < 0)
 		return -EINVAL;
 
-	result = cdev->ops->set_cur_state(cdev, state);
-	if (result)
-		return result;
+	cdev->sysfs_cur_state_req = state;
+
+	cdev->updated = false;
+	thermal_cdev_update(cdev);
+
+	return count;
+}
+
+static ssize_t
+thermal_cooling_device_min_state_store(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf, size_t count)
+{
+	struct thermal_cooling_device *cdev = to_cooling_device(dev);
+	long state;
+	int ret = 0;
+
+	ret = sscanf(buf, "%ld\n", &state);
+	if (ret <= 0)
+		return (ret < 0) ? ret : -EINVAL;
+
+	if ((long)state < 0)
+		return -EINVAL;
+
+	cdev->sysfs_min_state_req = state;
+
+	cdev->updated = false;
+	thermal_cdev_update(cdev);
+
 	return count;
 }
 
@@ -1235,6 +1285,9 @@
 static DEVICE_ATTR(cur_state, 0644,
 		   thermal_cooling_device_cur_state_show,
 		   thermal_cooling_device_cur_state_store);
+static DEVICE_ATTR(min_state, 0644,
+		   thermal_cooling_device_min_state_show,
+		   thermal_cooling_device_min_state_store);
 
 static ssize_t
 thermal_cooling_device_trip_point_show(struct device *dev,
@@ -1255,6 +1308,7 @@
 	&dev_attr_cdev_type.attr,
 	&dev_attr_max_state.attr,
 	&dev_attr_cur_state.attr,
+	&dev_attr_min_state.attr,
 	NULL,
 };
 
@@ -1548,6 +1602,8 @@
 	cdev->device.class = &thermal_class;
 	cdev->device.groups = cooling_device_attr_groups;
 	cdev->devdata = devdata;
+	cdev->sysfs_cur_state_req = 0;
+	cdev->sysfs_min_state_req = ULONG_MAX;
 	dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
 	result = device_register(&cdev->device);
 	if (result) {
@@ -1682,7 +1738,7 @@
 void thermal_cdev_update(struct thermal_cooling_device *cdev)
 {
 	struct thermal_instance *instance;
-	unsigned long target = 0;
+	unsigned long current_target = 0, min_target = ULONG_MAX;
 
 	mutex_lock(&cdev->lock);
 	/* cooling device is updated*/
@@ -1692,19 +1748,29 @@
 	}
 
 	/* Make sure cdev enters the deepest cooling state */
+	current_target = cdev->sysfs_cur_state_req;
+	min_target = cdev->sysfs_min_state_req;
 	list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) {
 		dev_dbg(&cdev->device, "zone%d->target=%lu\n",
 				instance->tz->id, instance->target);
 		if (instance->target == THERMAL_NO_TARGET)
 			continue;
-		if (instance->target > target)
-			target = instance->target;
+		if (instance->tz->governor->min_state_throttle) {
+			if (instance->target < min_target)
+				min_target = instance->target;
+		} else {
+			if (instance->target > current_target)
+				current_target = instance->target;
+		}
 	}
-	cdev->ops->set_cur_state(cdev, target);
+	cdev->ops->set_cur_state(cdev, current_target);
+	if (cdev->ops->set_min_state)
+		cdev->ops->set_min_state(cdev, min_target);
 	cdev->updated = true;
 	mutex_unlock(&cdev->lock);
-	trace_cdev_update(cdev, target);
-	dev_dbg(&cdev->device, "set to state %lu\n", target);
+	trace_cdev_update(cdev, current_target);
+	dev_dbg(&cdev->device, "set to state %lu min state %lu\n",
+				current_target, min_target);
 }
 EXPORT_SYMBOL(thermal_cdev_update);
 
@@ -2069,7 +2135,7 @@
 
 	mutex_unlock(&thermal_list_lock);
 
-	thermal_zone_device_set_polling(tz, 0);
+	thermal_zone_device_set_polling(NULL, tz, 0);
 
 	if (tz->type[0])
 		device_remove_file(&tz->device, &dev_attr_type);
@@ -2172,6 +2238,8 @@
 	.n_mcgrps = ARRAY_SIZE(thermal_event_mcgrps),
 };
 
+static int allow_netlink_events;
+
 int thermal_generate_netlink_event(struct thermal_zone_device *tz,
 					enum events event)
 {
@@ -2186,6 +2254,9 @@
 	if (!tz)
 		return -EINVAL;
 
+	if (!allow_netlink_events)
+		return -ENODEV;
+
 	/* allocate memory */
 	size = nla_total_size(sizeof(struct thermal_genl_event)) +
 	       nla_total_size(0);
@@ -2237,7 +2308,13 @@
 
 static int genetlink_init(void)
 {
-	return genl_register_family(&thermal_event_genl_family);
+	int ret;
+
+	ret = genl_register_family(&thermal_event_genl_family);
+	if (!ret)
+		allow_netlink_events = true;
+
+	return ret;
 }
 
 static void genetlink_exit(void)
@@ -2247,6 +2324,8 @@
 #else /* !CONFIG_NET */
 static inline int genetlink_init(void) { return 0; }
 static inline void genetlink_exit(void) {}
+static inline int thermal_generate_netlink_event(struct thermal_zone_device *tz,
+		enum events event) { return -ENODEV; }
 #endif /* !CONFIG_NET */
 
 static int __init thermal_register_governors(void)
@@ -2269,6 +2348,10 @@
 	if (result)
 		return result;
 
+	result = thermal_gov_low_limits_register();
+	if (result)
+		return result;
+
 	return thermal_gov_power_allocator_register();
 }
 
@@ -2278,6 +2361,7 @@
 	thermal_gov_fair_share_unregister();
 	thermal_gov_bang_bang_unregister();
 	thermal_gov_user_space_unregister();
+	thermal_gov_low_limits_unregister();
 	thermal_gov_power_allocator_unregister();
 }
 
@@ -2316,21 +2400,26 @@
 {
 	int result;
 
+	thermal_passive_wq = alloc_workqueue("thermal_passive_wq",
+						WQ_HIGHPRI | WQ_UNBOUND
+						| WQ_FREEZABLE,
+						THERMAL_MAX_ACTIVE);
+	if (!thermal_passive_wq) {
+		result = -ENOMEM;
+		goto init_exit;
+	}
+
 	result = thermal_register_governors();
 	if (result)
-		goto error;
+		goto destroy_wq;
 
 	result = class_register(&thermal_class);
 	if (result)
 		goto unregister_governors;
 
-	result = genetlink_init();
-	if (result)
-		goto unregister_class;
-
 	result = of_parse_thermal_zones();
 	if (result)
-		goto exit_netlink;
+		goto exit_zone_parse;
 
 	result = register_pm_notifier(&thermal_pm_nb);
 	if (result)
@@ -2339,13 +2428,13 @@
 
 	return 0;
 
-exit_netlink:
-	genetlink_exit();
-unregister_class:
+exit_zone_parse:
 	class_unregister(&thermal_class);
 unregister_governors:
 	thermal_unregister_governors();
-error:
+destroy_wq:
+	destroy_workqueue(thermal_passive_wq);
+init_exit:
 	idr_destroy(&thermal_tz_idr);
 	idr_destroy(&thermal_cdev_idr);
 	mutex_destroy(&thermal_idr_lock);
@@ -2354,10 +2443,11 @@
 	return result;
 }
 
-static void __exit thermal_exit(void)
+static void thermal_exit(void)
 {
 	unregister_pm_notifier(&thermal_pm_nb);
 	of_thermal_destroy_zones();
+	destroy_workqueue(thermal_passive_wq);
 	genetlink_exit();
 	class_unregister(&thermal_class);
 	thermal_unregister_governors();
@@ -2368,5 +2458,19 @@
 	mutex_destroy(&thermal_governor_lock);
 }
 
-fs_initcall(thermal_init);
+static int __init thermal_netlink_init(void)
+{
+	int ret = 0;
+
+	ret = genetlink_init();
+	if (!ret)
+		goto exit_netlink;
+
+	thermal_exit();
+exit_netlink:
+	return ret;
+}
+
+subsys_initcall(thermal_init);
+fs_initcall(thermal_netlink_init);
 module_exit(thermal_exit);
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 749d41a..eca8c3c 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -56,6 +56,7 @@
 
 int thermal_register_governor(struct thermal_governor *);
 void thermal_unregister_governor(struct thermal_governor *);
+void handle_thermal_trip(struct thermal_zone_device *tz, int trip);
 
 #ifdef CONFIG_THERMAL_GOV_STEP_WISE
 int thermal_gov_step_wise_register(void);
@@ -97,6 +98,14 @@
 static inline void thermal_gov_power_allocator_unregister(void) {}
 #endif /* CONFIG_THERMAL_GOV_POWER_ALLOCATOR */
 
+#ifdef CONFIG_THERMAL_GOV_LOW_LIMITS
+int thermal_gov_low_limits_register(void);
+void thermal_gov_low_limits_unregister(void);
+#else
+static inline int thermal_gov_low_limits_register(void) { return 0; }
+static inline void thermal_gov_low_limits_unregister(void) {}
+#endif /* CONFIG_THERMAL_GOV_LOW_LIMITS */
+
 /* device tree support */
 #ifdef CONFIG_THERMAL_OF
 int of_parse_thermal_zones(void);
@@ -105,6 +114,10 @@
 bool of_thermal_is_trip_valid(struct thermal_zone_device *, int);
 const struct thermal_trip *
 of_thermal_get_trip_points(struct thermal_zone_device *);
+int of_thermal_aggregate_trip(struct thermal_zone_device *tz,
+			      enum thermal_trip_type type,
+			      int *low, int *high);
+void of_thermal_handle_trip(struct thermal_zone_device *tz);
 #else
 static inline int of_parse_thermal_zones(void) { return 0; }
 static inline void of_thermal_destroy_zones(void) { }
@@ -122,6 +135,15 @@
 {
 	return NULL;
 }
+static inline int of_thermal_aggregate_trip(struct thermal_zone_device *tz,
+					    enum thermal_trip_type type,
+					    int *low, int *high)
+{
+	return -ENODEV;
+}
+static inline
+void of_thermal_handle_trip(struct thermal_zone_device *tz)
+{ }
 #endif
 
 #endif /* __THERMAL_CORE_H__ */
diff --git a/drivers/thermal/tsens-dbg.c b/drivers/thermal/tsens-dbg.c
new file mode 100644
index 0000000..7cd8c86
--- /dev/null
+++ b/drivers/thermal/tsens-dbg.c
@@ -0,0 +1,104 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/arch_timer.h>
+#include "tsens.h"
+
+/* debug defines */
+#define	TSENS_DBG_BUS_ID_0			0
+#define	TSENS_DBG_BUS_ID_1			1
+#define	TSENS_DBG_BUS_ID_2			2
+#define	TSENS_DBG_BUS_ID_15			15
+#define	TSENS_DEBUG_LOOP_COUNT_ID_0		2
+#define	TSENS_DEBUG_LOOP_COUNT			5
+#define	TSENS_DEBUG_STATUS_REG_START		10
+#define	TSENS_DEBUG_OFFSET_RANGE		16
+#define	TSENS_DEBUG_OFFSET_WORD1		0x4
+#define	TSENS_DEBUG_OFFSET_WORD2		0x8
+#define	TSENS_DEBUG_OFFSET_WORD3		0xc
+#define	TSENS_DEBUG_OFFSET_ROW			0x10
+#define	TSENS_DEBUG_DECIDEGC			-950
+#define	TSENS_DEBUG_CYCLE_MS			64
+#define	TSENS_DEBUG_POLL_MS			200
+#define	TSENS_DEBUG_BUS_ID2_MIN_CYCLE		50
+#define	TSENS_DEBUG_BUS_ID2_MAX_CYCLE		51
+#define	TSENS_DEBUG_ID_MASK_1_4			0xffffffe1
+#define	DEBUG_SIZE				10
+
+#define TSENS_DEBUG_CONTROL(n)			((n) + 0x1130)
+#define TSENS_DEBUG_DATA(n)			((n) + 0x1134)
+
+struct tsens_dbg_func {
+	int (*dbg_func)(struct tsens_device *, u32, u32, int *);
+};
+
+static int tsens_dbg_log_temp_reads(struct tsens_device *data, u32 id,
+					u32 dbg_type, int *temp)
+{
+	struct tsens_sensor *sensor;
+	struct tsens_device *tmdev = NULL;
+	u32 idx = 0;
+
+	if (!data)
+		return -EINVAL;
+
+	pr_debug("%d %d\n", id, dbg_type);
+	tmdev = data;
+	sensor = &tmdev->sensor[id];
+	idx = tmdev->tsens_dbg.sensor_dbg_info[sensor->hw_id].idx;
+	tmdev->tsens_dbg.sensor_dbg_info[sensor->hw_id].temp[idx%10] = *temp;
+	tmdev->tsens_dbg.sensor_dbg_info[sensor->hw_id].time_stmp[idx%10] =
+					sched_clock();
+	idx++;
+	tmdev->tsens_dbg.sensor_dbg_info[sensor->hw_id].idx = idx;
+
+	return 0;
+}
+
+static int tsens_dbg_log_interrupt_timestamp(struct tsens_device *data,
+						u32 id, u32 dbg_type, int *val)
+{
+	struct tsens_device *tmdev = NULL;
+	u32 idx = 0;
+
+	if (!data)
+		return -EINVAL;
+
+	pr_debug("%d %d\n", id, dbg_type);
+	tmdev = data;
+	/* debug */
+	idx = tmdev->tsens_dbg.tsens_thread_iq_dbg.idx;
+	tmdev->tsens_dbg.tsens_thread_iq_dbg.dbg_count[idx%10]++;
+	tmdev->tsens_dbg.tsens_thread_iq_dbg.time_stmp[idx%10] =
+							sched_clock();
+	tmdev->tsens_dbg.tsens_thread_iq_dbg.idx++;
+
+	return 0;
+}
+
+static struct tsens_dbg_func dbg_arr[] = {
+	[TSENS_DBG_LOG_TEMP_READS] = {tsens_dbg_log_temp_reads},
+	[TSENS_DBG_LOG_INTERRUPT_TIMESTAMP] = {
+			tsens_dbg_log_interrupt_timestamp},
+};
+
+int tsens2xxx_dbg(struct tsens_device *data, u32 id, u32 dbg_type, int *val)
+{
+	if (dbg_type >= TSENS_DBG_LOG_MAX)
+		return -EINVAL;
+
+	dbg_arr[dbg_type].dbg_func(data, id, dbg_type, val);
+
+	return 0;
+}
+EXPORT_SYMBOL(tsens2xxx_dbg);
diff --git a/drivers/thermal/tsens.h b/drivers/thermal/tsens.h
new file mode 100644
index 0000000..b9ebb65
--- /dev/null
+++ b/drivers/thermal/tsens.h
@@ -0,0 +1,135 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __QCOM_TSENS_H__
+#define __QCOM_TSENS_H__
+
+#include <linux/kernel.h>
+#include <linux/thermal.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#define DEBUG_SIZE				10
+#define TSENS_MAX_SENSORS			16
+#define TSENS_CONTROLLER_ID(n)			((n) + 0x1000)
+#define TSENS_CTRL_ADDR(n)			(n)
+#define TSENS_TM_SN_STATUS(n)			((n) + 0x10a0)
+
+enum tsens_dbg_type {
+	TSENS_DBG_POLL,
+	TSENS_DBG_LOG_TEMP_READS,
+	TSENS_DBG_LOG_INTERRUPT_TIMESTAMP,
+	TSENS_DBG_LOG_MAX
+};
+
+#define tsens_sec_to_msec_value		1000
+
+struct tsens_device;
+
+#if defined(CONFIG_THERMAL_TSENS)
+int tsens2xxx_dbg(struct tsens_device *data, u32 id, u32 dbg_type, int *temp);
+#else
+static inline int tsens2xxx_dbg(struct tsens_device *data, u32 id,
+						u32 dbg_type, int *temp)
+{ return -ENXIO; }
+#endif
+
+struct tsens_dbg {
+	u32				dbg_count[DEBUG_SIZE];
+	u32				idx;
+	unsigned long long		time_stmp[DEBUG_SIZE];
+	unsigned long			temp[DEBUG_SIZE];
+};
+
+struct tsens_dbg_context {
+	struct tsens_device		*tmdev;
+	struct tsens_dbg		tsens_thread_iq_dbg;
+	struct tsens_dbg		sensor_dbg_info[TSENS_MAX_SENSORS];
+	int				tsens_critical_wd_cnt;
+	struct delayed_work		tsens_critical_poll_test;
+};
+
+struct tsens_context {
+	enum thermal_device_mode	high_th_state;
+	enum thermal_device_mode	low_th_state;
+	enum thermal_device_mode	crit_th_state;
+	int				high_temp;
+	int				low_temp;
+	int				crit_temp;
+};
+
+struct tsens_sensor {
+	struct tsens_device		*tmdev;
+	struct thermal_zone_device	*tzd;
+	u32				hw_id;
+	u32				id;
+	const char			*sensor_name;
+	struct tsens_context		thr_state;
+};
+
+/**
+ * struct tsens_ops - operations as supported by the tsens device
+ * @init: Function to initialize the tsens device
+ * @get_temp: Function which returns the temp in millidegC
+ */
+struct tsens_ops {
+	int (*hw_init)(struct tsens_device *);
+	int (*get_temp)(struct tsens_sensor *, int *);
+	int (*set_trip_temp)(struct tsens_sensor *, int, int);
+	int (*interrupts_reg)(struct tsens_device *);
+	int (*dbg)(struct tsens_device *, u32, u32, int *);
+};
+
+struct tsens_irqs {
+	const char			*name;
+	irqreturn_t (*handler)(int, void *);
+};
+
+/**
+ * struct tsens_data - tsens instance specific data
+ * @num_sensors: Max number of sensors supported by platform
+ * @ops: operations the tsens instance supports
+ * @hw_ids: Subset of sensors ids supported by platform, if not the first n
+ */
+struct tsens_data {
+	const u32			num_sensors;
+	const struct tsens_ops		*ops;
+	unsigned int			*hw_ids;
+	u32				temp_factor;
+	bool				cycle_monitor;
+	u32				cycle_compltn_monitor_val;
+	bool				wd_bark;
+	u32				wd_bark_val;
+};
+
+struct tsens_device {
+	struct device			*dev;
+	struct platform_device		*pdev;
+	struct list_head		list;
+	u32				num_sensors;
+	struct regmap			*map;
+	struct regmap_field		*status_field;
+	void				*tsens_addr;
+	const struct tsens_ops		*ops;
+	struct tsens_dbg_context	tsens_dbg;
+	spinlock_t			tsens_crit_lock;
+	spinlock_t			tsens_upp_low_lock;
+	const struct tsens_data		*ctrl_data;
+	struct tsens_sensor		sensor[0];
+};
+
+extern const struct tsens_data data_tsens2xxx, data_tsens23xx, data_tsens24xx;
+
+#endif /* __QCOM_TSENS_H__ */
diff --git a/drivers/thermal/tsens2xxx.c b/drivers/thermal/tsens2xxx.c
new file mode 100644
index 0000000..1f0bee9
--- /dev/null
+++ b/drivers/thermal/tsens2xxx.c
@@ -0,0 +1,543 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/vmalloc.h>
+#include "tsens.h"
+
+#define TSENS_DRIVER_NAME			"msm-tsens"
+
+#define TSENS_TM_INT_EN(n)			((n) + 0x1004)
+#define TSENS_TM_CRITICAL_INT_STATUS(n)		((n) + 0x1014)
+#define TSENS_TM_CRITICAL_INT_CLEAR(n)		((n) + 0x1018)
+#define TSENS_TM_CRITICAL_INT_MASK(n)		((n) + 0x101c)
+#define TSENS_TM_CRITICAL_WD_BARK		BIT(31)
+#define TSENS_TM_CRITICAL_CYCLE_MONITOR		BIT(30)
+#define TSENS_TM_CRITICAL_INT_EN		BIT(2)
+#define TSENS_TM_UPPER_INT_EN			BIT(1)
+#define TSENS_TM_LOWER_INT_EN			BIT(0)
+#define TSENS_TM_SN_UPPER_LOWER_THRESHOLD(n)	((n) + 0x1020)
+#define TSENS_TM_SN_ADDR_OFFSET			0x4
+#define TSENS_TM_UPPER_THRESHOLD_SET(n)		((n) << 12)
+#define TSENS_TM_UPPER_THRESHOLD_VALUE_SHIFT(n)	((n) >> 12)
+#define TSENS_TM_LOWER_THRESHOLD_VALUE(n)	((n) & 0xfff)
+#define TSENS_TM_UPPER_THRESHOLD_VALUE(n)	(((n) & 0xfff000) >> 12)
+#define TSENS_TM_UPPER_THRESHOLD_MASK		0xfff000
+#define TSENS_TM_LOWER_THRESHOLD_MASK		0xfff
+#define TSENS_TM_UPPER_THRESHOLD_SHIFT		12
+#define TSENS_TM_SN_CRITICAL_THRESHOLD(n)	((n) + 0x1060)
+#define TSENS_STATUS_ADDR_OFFSET		2
+#define TSENS_TM_UPPER_INT_MASK(n)		(((n) & 0xffff0000) >> 16)
+#define TSENS_TM_LOWER_INT_MASK(n)		((n) & 0xffff)
+#define TSENS_TM_UPPER_LOWER_INT_STATUS(n)	((n) + 0x1008)
+#define TSENS_TM_UPPER_LOWER_INT_CLEAR(n)	((n) + 0x100c)
+#define TSENS_TM_UPPER_LOWER_INT_MASK(n)	((n) + 0x1010)
+#define TSENS_TM_UPPER_INT_SET(n)		(1 << (n + 16))
+#define TSENS_TM_SN_CRITICAL_THRESHOLD_MASK	0xfff
+#define TSENS_TM_SN_STATUS_VALID_BIT		BIT(21)
+#define TSENS_TM_SN_STATUS_CRITICAL_STATUS	BIT(19)
+#define TSENS_TM_SN_STATUS_UPPER_STATUS		BIT(18)
+#define TSENS_TM_SN_STATUS_LOWER_STATUS		BIT(17)
+#define TSENS_TM_SN_LAST_TEMP_MASK		0xfff
+#define TSENS_TM_CODE_BIT_MASK			0xfff
+#define TSENS_TM_CODE_SIGN_BIT			0x800
+
+#define TSENS_EN				BIT(0)
+
+static void msm_tsens_convert_temp(int last_temp, int *temp)
+{
+	int code_mask = ~TSENS_TM_CODE_BIT_MASK;
+
+	if (last_temp & TSENS_TM_CODE_SIGN_BIT) {
+		/* Sign extension for negative value */
+		last_temp |= code_mask;
+	}
+
+	*temp = last_temp * 100;
+}
+
+static int tsens2xxx_get_temp(struct tsens_sensor *sensor, int *temp)
+{
+	struct tsens_device *tmdev = NULL;
+	unsigned int code;
+	void __iomem *sensor_addr;
+	int last_temp = 0, last_temp2 = 0, last_temp3 = 0;
+
+	if (!sensor)
+		return -EINVAL;
+
+	tmdev = sensor->tmdev;
+	sensor_addr = TSENS_TM_SN_STATUS(tmdev->tsens_addr);
+
+	code = readl_relaxed_no_log(sensor_addr +
+			(sensor->hw_id << TSENS_STATUS_ADDR_OFFSET));
+	last_temp = code & TSENS_TM_SN_LAST_TEMP_MASK;
+
+	if (code & TSENS_TM_SN_STATUS_VALID_BIT) {
+		msm_tsens_convert_temp(last_temp, temp);
+		return 0;
+	}
+
+	code = readl_relaxed_no_log(sensor_addr +
+		(sensor->hw_id << TSENS_STATUS_ADDR_OFFSET));
+	last_temp2 = code & TSENS_TM_SN_LAST_TEMP_MASK;
+	if (code & TSENS_TM_SN_STATUS_VALID_BIT) {
+		last_temp = last_temp2;
+		msm_tsens_convert_temp(last_temp, temp);
+		return 0;
+	}
+
+	code = readl_relaxed_no_log(sensor_addr +
+			(sensor->hw_id <<
+			TSENS_STATUS_ADDR_OFFSET));
+	last_temp3 = code & TSENS_TM_SN_LAST_TEMP_MASK;
+	if (code & TSENS_TM_SN_STATUS_VALID_BIT) {
+		last_temp = last_temp3;
+		msm_tsens_convert_temp(last_temp, temp);
+		return 0;
+	}
+
+	if (last_temp == last_temp2)
+		last_temp = last_temp2;
+	else if (last_temp2 == last_temp3)
+		last_temp = last_temp3;
+
+	msm_tsens_convert_temp(last_temp, temp);
+
+	if (tmdev->ops->dbg)
+		tmdev->ops->dbg(tmdev, (u32) sensor->hw_id,
+					TSENS_DBG_LOG_TEMP_READS, temp);
+
+	return 0;
+}
+
+static int tsens_tm_activate_trip_type(struct tsens_sensor *tm_sensor,
+			int trip, enum thermal_trip_activation_mode mode)
+{
+	struct tsens_device *tmdev = NULL;
+	unsigned int reg_cntl, mask;
+	unsigned long flags;
+	int rc = 0;
+
+	/* clear the interrupt and unmask */
+	if (!tm_sensor || trip < 0)
+		return -EINVAL;
+
+	tmdev = tm_sensor->tmdev;
+	if (!tmdev)
+		return -EINVAL;
+
+	spin_lock_irqsave(&tmdev->tsens_upp_low_lock, flags);
+	mask = (tm_sensor->hw_id);
+	switch (trip) {
+	case THERMAL_TRIP_CRITICAL:
+		tmdev->sensor[tm_sensor->hw_id].
+			thr_state.crit_th_state = mode;
+		reg_cntl = readl_relaxed(TSENS_TM_CRITICAL_INT_MASK
+							(tmdev->tsens_addr));
+		if (mode == THERMAL_TRIP_ACTIVATION_DISABLED)
+			writel_relaxed(reg_cntl | (1 << mask),
+				(TSENS_TM_CRITICAL_INT_MASK
+				(tmdev->tsens_addr)));
+		else
+			writel_relaxed(reg_cntl & ~(1 << mask),
+				(TSENS_TM_CRITICAL_INT_MASK
+				(tmdev->tsens_addr)));
+		break;
+	case THERMAL_TRIP_ACTIVE:
+		tmdev->sensor[tm_sensor->hw_id].
+			thr_state.high_th_state = mode;
+		reg_cntl = readl_relaxed(TSENS_TM_UPPER_LOWER_INT_MASK
+						(tmdev->tsens_addr));
+		if (mode == THERMAL_TRIP_ACTIVATION_DISABLED)
+			writel_relaxed(reg_cntl |
+				(TSENS_TM_UPPER_INT_SET(mask)),
+				(TSENS_TM_UPPER_LOWER_INT_MASK
+				(tmdev->tsens_addr)));
+		else
+			writel_relaxed(reg_cntl &
+				~(TSENS_TM_UPPER_INT_SET(mask)),
+				(TSENS_TM_UPPER_LOWER_INT_MASK
+				(tmdev->tsens_addr)));
+		break;
+	case THERMAL_TRIP_PASSIVE:
+		tmdev->sensor[tm_sensor->hw_id].
+			thr_state.low_th_state = mode;
+		reg_cntl = readl_relaxed(TSENS_TM_UPPER_LOWER_INT_MASK
+						(tmdev->tsens_addr));
+		if (mode == THERMAL_TRIP_ACTIVATION_DISABLED)
+			writel_relaxed(reg_cntl | (1 << mask),
+			(TSENS_TM_UPPER_LOWER_INT_MASK(tmdev->tsens_addr)));
+		else
+			writel_relaxed(reg_cntl & ~(1 << mask),
+			(TSENS_TM_UPPER_LOWER_INT_MASK(tmdev->tsens_addr)));
+		break;
+	default:
+		rc = -EINVAL;
+	}
+
+	spin_unlock_irqrestore(&tmdev->tsens_upp_low_lock, flags);
+	/* Activate and enable the respective trip threshold setting */
+	mb();
+
+	return rc;
+}
+
+static int tsens2xxx_set_trip_temp(struct tsens_sensor *tm_sensor,
+							int trip, int temp)
+{
+	unsigned int reg_cntl;
+	unsigned long flags;
+	struct tsens_device *tmdev = NULL;
+	int rc = 0;
+
+	if (!tm_sensor || trip < 0)
+		return -EINVAL;
+
+	tmdev = tm_sensor->tmdev;
+	if (!tmdev)
+		return -EINVAL;
+
+	spin_lock_irqsave(&tmdev->tsens_upp_low_lock, flags);
+	switch (trip) {
+	case THERMAL_TRIP_CRITICAL:
+		tmdev->sensor[tm_sensor->hw_id].
+				thr_state.crit_temp = temp;
+		temp &= TSENS_TM_SN_CRITICAL_THRESHOLD_MASK;
+		writel_relaxed(temp,
+			(TSENS_TM_SN_CRITICAL_THRESHOLD(tmdev->tsens_addr) +
+			(tm_sensor->hw_id * TSENS_TM_SN_ADDR_OFFSET)));
+		break;
+	case THERMAL_TRIP_ACTIVE:
+		tmdev->sensor[tm_sensor->hw_id].
+				thr_state.high_temp = temp;
+		reg_cntl = readl_relaxed((TSENS_TM_SN_UPPER_LOWER_THRESHOLD
+				(tmdev->tsens_addr)) +
+				(tm_sensor->hw_id *
+				TSENS_TM_SN_ADDR_OFFSET));
+		temp = TSENS_TM_UPPER_THRESHOLD_SET(temp);
+		temp &= TSENS_TM_UPPER_THRESHOLD_MASK;
+		reg_cntl &= ~TSENS_TM_UPPER_THRESHOLD_MASK;
+		writel_relaxed(reg_cntl | temp,
+			(TSENS_TM_SN_UPPER_LOWER_THRESHOLD(tmdev->tsens_addr) +
+			(tm_sensor->hw_id * TSENS_TM_SN_ADDR_OFFSET)));
+		break;
+	case THERMAL_TRIP_PASSIVE:
+		tmdev->sensor[tm_sensor->hw_id].
+				thr_state.low_temp = temp;
+		reg_cntl = readl_relaxed((TSENS_TM_SN_UPPER_LOWER_THRESHOLD
+				(tmdev->tsens_addr)) +
+				(tm_sensor->hw_id *
+				TSENS_TM_SN_ADDR_OFFSET));
+		temp &= TSENS_TM_LOWER_THRESHOLD_MASK;
+		reg_cntl &= ~TSENS_TM_LOWER_THRESHOLD_MASK;
+		writel_relaxed(reg_cntl | temp,
+			(TSENS_TM_SN_UPPER_LOWER_THRESHOLD(tmdev->tsens_addr) +
+			(tm_sensor->hw_id * TSENS_TM_SN_ADDR_OFFSET)));
+		break;
+	default:
+		pr_err("Invalid trip to TSENS: %d\n", trip);
+		rc = -EINVAL;
+	}
+
+	spin_unlock_irqrestore(&tmdev->tsens_upp_low_lock, flags);
+	/* Set trip temperature thresholds */
+	mb();
+
+	rc = tsens_tm_activate_trip_type(tm_sensor, trip,
+				THERMAL_TRIP_ACTIVATION_ENABLED);
+	if (rc)
+		pr_err("Error during trip activation :%d\n", rc);
+
+	return rc;
+}
+
+static irqreturn_t tsens_tm_critical_irq_thread(int irq, void *data)
+{
+	struct tsens_device *tm = data;
+	unsigned int i, status;
+	unsigned long flags;
+	void __iomem *sensor_status_addr;
+	void __iomem *sensor_int_mask_addr;
+	void __iomem *sensor_critical_addr;
+	void __iomem *wd_critical_addr;
+	int wd_mask;
+
+	sensor_status_addr = TSENS_TM_SN_STATUS(tm->tsens_addr);
+	sensor_int_mask_addr =
+		TSENS_TM_CRITICAL_INT_MASK(tm->tsens_addr);
+	sensor_critical_addr =
+		TSENS_TM_SN_CRITICAL_THRESHOLD(tm->tsens_addr);
+	wd_critical_addr =
+		TSENS_TM_CRITICAL_INT_STATUS(tm->tsens_addr);
+
+	if (tm->ctrl_data->wd_bark) {
+		wd_mask = readl_relaxed(wd_critical_addr);
+		if (wd_mask & TSENS_TM_CRITICAL_WD_BARK) {
+			/*
+			 * Clear watchdog interrupt and
+			 * increment global wd count
+			 */
+			writel_relaxed(wd_mask | TSENS_TM_CRITICAL_WD_BARK,
+				(TSENS_TM_CRITICAL_INT_CLEAR
+				(tm->tsens_addr)));
+			writel_relaxed(wd_mask & ~(TSENS_TM_CRITICAL_WD_BARK),
+				(TSENS_TM_CRITICAL_INT_CLEAR
+				(tm->tsens_addr)));
+			tm->tsens_dbg.tsens_critical_wd_cnt++;
+			return IRQ_HANDLED;
+		}
+	}
+
+	for (i = 0; i < tm->num_sensors; i++) {
+		int int_mask, int_mask_val;
+		u32 addr_offset;
+
+		spin_lock_irqsave(&tm->tsens_crit_lock, flags);
+		addr_offset = tm->sensor[i].hw_id *
+						TSENS_TM_SN_ADDR_OFFSET;
+		status = readl_relaxed(sensor_status_addr + addr_offset);
+		int_mask = readl_relaxed(sensor_int_mask_addr);
+
+		if ((status & TSENS_TM_SN_STATUS_CRITICAL_STATUS) &&
+			!(int_mask & (1 << tm->sensor[i].hw_id))) {
+			int_mask = readl_relaxed(sensor_int_mask_addr);
+			int_mask_val = (1 << tm->sensor[i].hw_id);
+			/* Mask the corresponding interrupt for the sensors */
+			writel_relaxed(int_mask | int_mask_val,
+				TSENS_TM_CRITICAL_INT_MASK(
+					tm->tsens_addr));
+			/* Clear the corresponding sensors interrupt */
+			writel_relaxed(int_mask_val,
+				TSENS_TM_CRITICAL_INT_CLEAR(tm->tsens_addr));
+			writel_relaxed(0,
+				TSENS_TM_CRITICAL_INT_CLEAR(
+					tm->tsens_addr));
+			tm->sensor[i].thr_state.
+					crit_th_state = THERMAL_DEVICE_DISABLED;
+		}
+		spin_unlock_irqrestore(&tm->tsens_crit_lock, flags);
+	}
+
+	/* Mask critical interrupt */
+	mb();
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t tsens_tm_irq_thread(int irq, void *data)
+{
+	struct tsens_device *tm = data;
+	unsigned int i, status, threshold;
+	unsigned long flags;
+	void __iomem *sensor_status_addr;
+	void __iomem *sensor_int_mask_addr;
+	void __iomem *sensor_upper_lower_addr;
+	u32 addr_offset = 0;
+
+	sensor_status_addr = TSENS_TM_SN_STATUS(tm->tsens_addr);
+	sensor_int_mask_addr =
+		TSENS_TM_UPPER_LOWER_INT_MASK(tm->tsens_addr);
+	sensor_upper_lower_addr =
+		TSENS_TM_SN_UPPER_LOWER_THRESHOLD(tm->tsens_addr);
+
+	for (i = 0; i < tm->num_sensors; i++) {
+		bool upper_thr = false, lower_thr = false;
+		int int_mask, int_mask_val = 0;
+
+		spin_lock_irqsave(&tm->tsens_upp_low_lock, flags);
+		addr_offset = tm->sensor[i].hw_id *
+						TSENS_TM_SN_ADDR_OFFSET;
+		status = readl_relaxed(sensor_status_addr + addr_offset);
+		threshold = readl_relaxed(sensor_upper_lower_addr +
+								addr_offset);
+		int_mask = readl_relaxed(sensor_int_mask_addr);
+
+		if ((status & TSENS_TM_SN_STATUS_UPPER_STATUS) &&
+			!(int_mask &
+				(1 << (tm->sensor[i].hw_id + 16)))) {
+			int_mask = readl_relaxed(sensor_int_mask_addr);
+			int_mask_val = TSENS_TM_UPPER_INT_SET(
+					tm->sensor[i].hw_id);
+			/* Mask the corresponding interrupt for the sensors */
+			writel_relaxed(int_mask | int_mask_val,
+				TSENS_TM_UPPER_LOWER_INT_MASK(
+					tm->tsens_addr));
+			/* Clear the corresponding sensors interrupt */
+			writel_relaxed(int_mask_val,
+				TSENS_TM_UPPER_LOWER_INT_CLEAR(
+					tm->tsens_addr));
+			writel_relaxed(0,
+				TSENS_TM_UPPER_LOWER_INT_CLEAR(
+					tm->tsens_addr));
+			upper_thr = true;
+			tm->sensor[i].thr_state.
+					high_th_state = THERMAL_DEVICE_DISABLED;
+		}
+
+		if ((status & TSENS_TM_SN_STATUS_LOWER_STATUS) &&
+			!(int_mask &
+				(1 << tm->sensor[i].hw_id))) {
+			int_mask = readl_relaxed(sensor_int_mask_addr);
+			int_mask_val = (1 << tm->sensor[i].hw_id);
+			/* Mask the corresponding interrupt for the sensors */
+			writel_relaxed(int_mask | int_mask_val,
+				TSENS_TM_UPPER_LOWER_INT_MASK(
+					tm->tsens_addr));
+			/* Clear the corresponding sensors interrupt */
+			writel_relaxed(int_mask_val,
+				TSENS_TM_UPPER_LOWER_INT_CLEAR(
+					tm->tsens_addr));
+			writel_relaxed(0,
+				TSENS_TM_UPPER_LOWER_INT_CLEAR(
+					tm->tsens_addr));
+			lower_thr = true;
+			tm->sensor[i].thr_state.
+					low_th_state = THERMAL_DEVICE_DISABLED;
+		}
+		spin_unlock_irqrestore(&tm->tsens_upp_low_lock, flags);
+
+		if (upper_thr || lower_thr) {
+			int temp;
+			enum thermal_trip_type trip =
+					THERMAL_TRIP_CONFIGURABLE_LOW;
+
+			if (upper_thr)
+				trip = THERMAL_TRIP_CONFIGURABLE_HI;
+			tsens2xxx_get_temp(&tm->sensor[i], &temp);
+			/* Use id for multiple controllers */
+			pr_debug("sensor:%d trigger temp (%d degC)\n",
+				tm->sensor[i].hw_id,
+				(status & TSENS_TM_SN_LAST_TEMP_MASK));
+		}
+	}
+
+	/* Disable monitoring sensor trip threshold for triggered sensor */
+	mb();
+
+	if (tm->ops->dbg)
+		tm->ops->dbg(tm, 0, TSENS_DBG_LOG_INTERRUPT_TIMESTAMP, NULL);
+
+	return IRQ_HANDLED;
+}
+
+static int tsens2xxx_hw_init(struct tsens_device *tmdev)
+{
+	void __iomem *srot_addr;
+	void __iomem *sensor_int_mask_addr;
+	unsigned int srot_val;
+	int crit_mask;
+
+	srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_addr + 0x4);
+	srot_val = readl_relaxed(srot_addr);
+	if (!(srot_val & TSENS_EN)) {
+		pr_err("TSENS device is not enabled\n");
+		return -ENODEV;
+	}
+
+	if (tmdev->ctrl_data->cycle_monitor) {
+		sensor_int_mask_addr =
+			TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_addr);
+		crit_mask = readl_relaxed(sensor_int_mask_addr);
+		writel_relaxed(
+			crit_mask | tmdev->ctrl_data->cycle_compltn_monitor_val,
+			(TSENS_TM_CRITICAL_INT_MASK
+			(tmdev->tsens_addr)));
+		/*Update critical cycle monitoring*/
+		mb();
+	}
+	writel_relaxed(TSENS_TM_CRITICAL_INT_EN |
+		TSENS_TM_UPPER_INT_EN | TSENS_TM_LOWER_INT_EN,
+		TSENS_TM_INT_EN(tmdev->tsens_addr));
+
+	spin_lock_init(&tmdev->tsens_crit_lock);
+	spin_lock_init(&tmdev->tsens_upp_low_lock);
+
+	return 0;
+}
+
+static const struct tsens_irqs tsens2xxx_irqs[] = {
+	{ "tsens-upper-lower", tsens_tm_irq_thread},
+	{ "tsens-critical", tsens_tm_critical_irq_thread},
+};
+
+static int tsens2xxx_register_interrupts(struct tsens_device *tmdev)
+{
+	struct platform_device *pdev;
+	int i, rc;
+
+	if (!tmdev)
+		return -EINVAL;
+
+	pdev = tmdev->pdev;
+
+	for (i = 0; i < ARRAY_SIZE(tsens2xxx_irqs); i++) {
+		int irq;
+
+		irq = platform_get_irq_byname(pdev, tsens2xxx_irqs[i].name);
+		if (irq < 0) {
+			dev_err(&pdev->dev, "failed to get irq %s\n",
+					tsens2xxx_irqs[i].name);
+			return irq;
+		}
+
+		rc = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+				tsens2xxx_irqs[i].handler,
+				IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+				tsens2xxx_irqs[i].name, tmdev);
+		if (rc) {
+			dev_err(&pdev->dev, "failed to get irq %s\n",
+					tsens2xxx_irqs[i].name);
+			return rc;
+		}
+		enable_irq_wake(irq);
+	}
+
+	return 0;
+}
+
+static const struct tsens_ops ops_tsens2xxx = {
+	.hw_init	= tsens2xxx_hw_init,
+	.get_temp	= tsens2xxx_get_temp,
+	.set_trip_temp	= tsens2xxx_set_trip_temp,
+	.interrupts_reg	= tsens2xxx_register_interrupts,
+	.dbg		= tsens2xxx_dbg,
+};
+
+const struct tsens_data data_tsens2xxx = {
+	.cycle_monitor			= false,
+	.cycle_compltn_monitor_val	= 0,
+	.wd_bark			= false,
+	.wd_bark_val			= 0,
+	.ops				= &ops_tsens2xxx,
+};
+
+const struct tsens_data data_tsens23xx = {
+	.cycle_monitor			= true,
+	.cycle_compltn_monitor_val	= 0,
+	.wd_bark			= true,
+	.wd_bark_val			= 0,
+	.ops				= &ops_tsens2xxx,
+};
+
+const struct tsens_data data_tsens24xx = {
+	.cycle_monitor			= true,
+	.cycle_compltn_monitor_val	= 0,
+	.wd_bark			= true,
+	.wd_bark_val			= 1,
+	.ops				= &ops_tsens2xxx,
+};
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 574da15..7a2d45b 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -88,6 +88,27 @@
 	 driver. This console is used through a JTAG only on ARM. If you don't have
 	 a JTAG then you probably don't want this option.
 
+config HVC_DCC_SERIALIZE_SMP
+	bool "Use DCC only on core 0"
+	depends on SMP && HVC_DCC
+	help
+	  Some debuggers, such as Trace32 from Lauterbach GmbH, do not handle
+	  reads/writes from/to DCC on more than one core.  Each core has its
+	  own DCC device registers, so when a core reads or writes from/to DCC,
+	  it only accesses its own DCC device.  Since kernel code can run on
+	  any core, every time the kernel wants to write to the console, it
+	  might write to a different DCC.
+
+	  In SMP mode, Trace32 only uses the DCC on core 0.  In AMP mode, it
+	  creates multiple windows, and each window shows the DCC output
+	  only from that core's DCC.  The result is that console output is
+	  either lost or scattered across windows.
+
+	  Selecting this option will enable code that serializes all console
+	  input and output to core 0.  The DCC driver will create input and
+	  output FIFOs that all cores will use.  Reads and writes from/to DCC
+	  are handled by a workqueue that runs only core 0.
+
 config HVC_BFIN_JTAG
 	bool "Blackfin JTAG console"
 	depends on BLACKFIN
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 82f240f..c987697 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010, 2014 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010, 2014, 2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,6 +11,10 @@
  */
 
 #include <linux/init.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+#include <linux/moduleparam.h>
+#include <linux/console.h>
 
 #include <asm/dcc.h>
 #include <asm/processor.h>
@@ -48,6 +52,12 @@
 	return i;
 }
 
+/*
+ * Check if the DCC is enabled.  If CONFIG_HVC_DCC_SERIALIZE_SMP is enabled,
+ * then we assume then this function will be called first on core 0.  That
+ * way, dcc_core0_available will be true only if it's available on core 0.
+ */
+#ifndef CONFIG_HVC_DCC_SERIALIZE_SMP
 static bool hvc_dcc_check(void)
 {
 	unsigned long time = jiffies + (HZ / 10);
@@ -62,12 +72,173 @@
 
 	return false;
 }
+#endif
+
+#ifdef CONFIG_HVC_DCC_SERIALIZE_SMP
+static bool hvc_dcc_check(void)
+{
+	unsigned long time = jiffies + (HZ / 10);
+
+	static bool dcc_core0_available;
+
+	/*
+	 * If we're not on core 0, but we previously confirmed that DCC is
+	 * active, then just return true.
+	 */
+	if (smp_processor_id() && dcc_core0_available)
+		return true;
+
+	/* Write a test character to check if it is handled */
+	__dcc_putchar('\n');
+
+	while (time_is_after_jiffies(time)) {
+		if (!(__dcc_getstatus() & DCC_STATUS_TX)) {
+			dcc_core0_available = true;
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static void dcc_put_work_fn(struct work_struct *work);
+static void dcc_get_work_fn(struct work_struct *work);
+static DECLARE_WORK(dcc_pwork, dcc_put_work_fn);
+static DECLARE_WORK(dcc_gwork, dcc_get_work_fn);
+static DEFINE_SPINLOCK(dcc_lock);
+static DEFINE_KFIFO(inbuf, unsigned char, 128);
+static DEFINE_KFIFO(outbuf, unsigned char, 1024);
+
+/*
+ * Workqueue function that writes the output FIFO to the DCC on core 0.
+ */
+static void dcc_put_work_fn(struct work_struct *work)
+{
+	unsigned char ch;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dcc_lock, irqflags);
+
+	/* While there's data in the output FIFO, write it to the DCC */
+	while (kfifo_get(&outbuf, &ch))
+		hvc_dcc_put_chars(0, &ch, 1);
+
+	/* While we're at it, check for any input characters */
+	while (!kfifo_is_full(&inbuf)) {
+		if (!hvc_dcc_get_chars(0, &ch, 1))
+			break;
+		kfifo_put(&inbuf, ch);
+	}
+
+	spin_unlock_irqrestore(&dcc_lock, irqflags);
+}
+
+/*
+ * Workqueue function that reads characters from DCC and puts them into the
+ * input FIFO.
+ */
+static void dcc_get_work_fn(struct work_struct *work)
+{
+	unsigned char ch;
+	unsigned long irqflags;
+
+	/*
+	 * Read characters from DCC and put them into the input FIFO, as
+	 * long as there is room and we have characters to read.
+	 */
+	spin_lock_irqsave(&dcc_lock, irqflags);
+
+	while (!kfifo_is_full(&inbuf)) {
+		if (!hvc_dcc_get_chars(0, &ch, 1))
+			break;
+		kfifo_put(&inbuf, ch);
+	}
+	spin_unlock_irqrestore(&dcc_lock, irqflags);
+}
+
+/*
+ * Write characters directly to the DCC if we're on core 0 and the FIFO
+ * is empty, or write them to the FIFO if we're not.
+ */
+static int hvc_dcc0_put_chars(uint32_t vt, const char *buf,
+					     int count)
+{
+	int len;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dcc_lock, irqflags);
+	if (smp_processor_id() || (!kfifo_is_empty(&outbuf))) {
+		len = kfifo_in(&outbuf, buf, count);
+		spin_unlock_irqrestore(&dcc_lock, irqflags);
+		/*
+		 * We just push data to the output FIFO, so schedule the
+		 * workqueue that will actually write that data to DCC.
+		 */
+		schedule_work_on(0, &dcc_pwork);
+		return len;
+	}
+
+	/*
+	 * If we're already on core 0, and the FIFO is empty, then just
+	 * write the data to DCC.
+	 */
+	len = hvc_dcc_put_chars(vt, buf, count);
+	spin_unlock_irqrestore(&dcc_lock, irqflags);
+
+	return len;
+}
+
+/*
+ * Read characters directly from the DCC if we're on core 0 and the FIFO
+ * is empty, or read them from the FIFO if we're not.
+ */
+static int hvc_dcc0_get_chars(uint32_t vt, char *buf, int count)
+{
+	int len;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dcc_lock, irqflags);
+
+	if (smp_processor_id() || (!kfifo_is_empty(&inbuf))) {
+		len = kfifo_out(&inbuf, buf, count);
+		spin_unlock_irqrestore(&dcc_lock, irqflags);
+
+		/*
+		 * If the FIFO was empty, there may be characters in the DCC
+		 * that we haven't read yet.  Schedule a workqueue to fill
+		 * the input FIFO, so that the next time this function is
+		 * called, we'll have data.
+		 */
+		if (!len)
+			schedule_work_on(0, &dcc_gwork);
+
+		return len;
+	}
+
+	/*
+	 * If we're already on core 0, and the FIFO is empty, then just
+	 * read the data from DCC.
+	 */
+	len = hvc_dcc_get_chars(vt, buf, count);
+	spin_unlock_irqrestore(&dcc_lock, irqflags);
+
+	return len;
+}
+
+static const struct hv_ops hvc_dcc_get_put_ops = {
+	.get_chars = hvc_dcc0_get_chars,
+	.put_chars = hvc_dcc0_put_chars,
+};
+
+#else
 
 static const struct hv_ops hvc_dcc_get_put_ops = {
 	.get_chars = hvc_dcc_get_chars,
 	.put_chars = hvc_dcc_put_chars,
 };
 
+#endif
+
 static int __init hvc_dcc_console_init(void)
 {
 	int ret;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 4d09bd4..6e3e636 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -52,6 +52,7 @@
 	struct pci_dev		*dev;
 	unsigned int		nr;
 	struct pci_serial_quirk	*quirk;
+	const struct pciserial_board *board;
 	int			line[0];
 };
 
@@ -3871,6 +3872,7 @@
 		}
 	}
 	priv->nr = i;
+	priv->board = board;
 	return priv;
 
 err_deinit:
@@ -3881,7 +3883,7 @@
 }
 EXPORT_SYMBOL_GPL(pciserial_init_ports);
 
-void pciserial_remove_ports(struct serial_private *priv)
+void pciserial_detach_ports(struct serial_private *priv)
 {
 	struct pci_serial_quirk *quirk;
 	int i;
@@ -3895,7 +3897,11 @@
 	quirk = find_quirk(priv->dev);
 	if (quirk->exit)
 		quirk->exit(priv->dev);
+}
 
+void pciserial_remove_ports(struct serial_private *priv)
+{
+	pciserial_detach_ports(priv);
 	kfree(priv);
 }
 EXPORT_SYMBOL_GPL(pciserial_remove_ports);
@@ -5590,7 +5596,7 @@
 		return PCI_ERS_RESULT_DISCONNECT;
 
 	if (priv)
-		pciserial_suspend_ports(priv);
+		pciserial_detach_ports(priv);
 
 	pci_disable_device(dev);
 
@@ -5615,9 +5621,18 @@
 static void serial8250_io_resume(struct pci_dev *dev)
 {
 	struct serial_private *priv = pci_get_drvdata(dev);
+	const struct pciserial_board *board;
 
-	if (priv)
-		pciserial_resume_ports(priv);
+	if (!priv)
+		return;
+
+	board = priv->board;
+	kfree(priv);
+	priv = pciserial_init_ports(dev, board);
+
+	if (!IS_ERR(priv)) {
+		pci_set_drvdata(dev, priv);
+	}
 }
 
 static const struct pci_error_handlers serial8250_err_handler = {
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index a9ded51..bac9975 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1079,6 +1079,12 @@
 	    hardware.
 	    The driver supports console and High speed UART functions.
 
+config SERIAL_MSM_GENI_CONSOLE
+	tristate "MSM on-chip GENI HW based console support"
+	depends on SERIAL_MSM_GENI=y
+	select SERIAL_CORE_CONSOLE
+	select SERIAL_EARLYCON
+
 config SERIAL_MSM_CONSOLE
 	bool "MSM serial console support"
 	depends on SERIAL_MSM=y
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index a115f58..7c4654c 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -313,6 +313,7 @@
 }
 #endif
 
+#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
 static void msm_geni_serial_wr_char(struct uart_port *uport, int ch)
 {
 	geni_write_reg(ch, uport->membase, SE_GENI_TX_FIFOn);
@@ -384,6 +385,53 @@
 	spin_unlock(&uport->lock);
 }
 
+static int handle_rx_console(struct uart_port *uport,
+			unsigned int rx_fifo_wc,
+			unsigned int rx_last_byte_valid,
+			unsigned int rx_last)
+{
+	int i, c;
+	unsigned char *rx_char;
+	struct tty_port *tport;
+	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+
+	tport = &uport->state->port;
+
+	for (i = 0; i < rx_fifo_wc; i++) {
+		int bytes = 4;
+
+		*(msm_port->rx_fifo) =
+			geni_read_reg(uport->membase, SE_GENI_RX_FIFOn);
+		rx_char = (unsigned char *)msm_port->rx_fifo;
+
+		if (i == (rx_fifo_wc - 1)) {
+			if (rx_last && rx_last_byte_valid)
+				bytes = rx_last_byte_valid;
+		}
+		for (c = 0; c < bytes; c++) {
+			char flag = TTY_NORMAL;
+			int sysrq;
+
+			uport->icount.rx++;
+			sysrq = uart_handle_sysrq_char(uport, rx_char[c]);
+			if (!sysrq)
+				tty_insert_flip_char(tport, rx_char[c], flag);
+		}
+	}
+	tty_flip_buffer_push(tport);
+	return 0;
+}
+#else
+static int handle_rx_console(struct uart_port *uport,
+			unsigned int rx_fifo_wc,
+			unsigned int rx_last_byte_valid,
+			unsigned int rx_last)
+{
+	return -EPERM;
+}
+
+#endif /* (CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)) */
+
 static void msm_geni_serial_start_tx(struct uart_port *uport)
 {
 	unsigned int geni_m_irq_en;
@@ -476,43 +524,6 @@
 		WARN_ON(1);
 }
 
-static int handle_rx_console(struct uart_port *uport,
-			unsigned int rx_fifo_wc,
-			unsigned int rx_last_byte_valid,
-			unsigned int rx_last)
-{
-	int i, c;
-	unsigned char *rx_char;
-	struct tty_port *tport;
-	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
-
-	tport = &uport->state->port;
-
-	for (i = 0; i < rx_fifo_wc; i++) {
-		int bytes = 4;
-
-		*(msm_port->rx_fifo) =
-			geni_read_reg(uport->membase, SE_GENI_RX_FIFOn);
-		rx_char = (unsigned char *)msm_port->rx_fifo;
-
-		if (i == (rx_fifo_wc - 1)) {
-			if (rx_last && rx_last_byte_valid)
-				bytes = rx_last_byte_valid;
-		}
-		for (c = 0; c < bytes; c++) {
-			char flag = TTY_NORMAL;
-			int sysrq;
-
-			uport->icount.rx++;
-			sysrq = uart_handle_sysrq_char(uport, rx_char[c]);
-			if (!sysrq)
-				tty_insert_flip_char(tport, rx_char[c], flag);
-		}
-	}
-	tty_flip_buffer_push(tport);
-	return 0;
-}
-
 static int handle_rx_hs(struct uart_port *uport,
 			unsigned int rx_fifo_wc,
 			unsigned int rx_last_byte_valid,
@@ -848,7 +859,7 @@
 	unsigned int stop_bit_len;
 	unsigned int rxstale;
 	unsigned int clk_div;
-	unsigned long ser_clk_cfg;
+	unsigned long ser_clk_cfg = 0;
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
 	unsigned long clk_rate;
 
@@ -943,6 +954,7 @@
 	return is_tx_empty;
 }
 
+#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
 static int __init msm_geni_console_setup(struct console *co, char *options)
 {
 	struct uart_port *uport;
@@ -985,13 +997,100 @@
 	return uart_set_options(uport, co, baud, parity, bits, flow);
 }
 
-static void msm_geni_serial_debug_init(struct uart_port *uport)
+static void
+msm_geni_serial_early_console_write(struct console *con, const char *s,
+			unsigned int n)
 {
-	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+	struct earlycon_device *dev = con->data;
 
-	msm_port->dbg = debugfs_create_dir(dev_name(uport->dev), NULL);
-	if (IS_ERR_OR_NULL(msm_port->dbg))
-		dev_err(uport->dev, "Failed to create dbg dir\n");
+	__msm_geni_serial_console_write(&dev->port, s, n);
+}
+
+static int __init
+msm_geni_serial_earlycon_setup(struct earlycon_device *dev,
+		const char *opt)
+{
+	struct uart_port *uport = &dev->port;
+	int ret = 0;
+	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+	u32 tx_trans_cfg = 0;
+	u32 tx_parity_cfg = 0;
+	u32 rx_trans_cfg = 0;
+	u32 rx_parity_cfg = 0;
+	u32 stop_bit = 0;
+	u32 rx_stale = 0;
+	u32 bits_per_char = 0;
+	u32 s_clk_cfg = 0;
+	u32 baud = 115200;
+	u32 clk_div;
+	unsigned long clk_rate;
+
+	if (!uport->membase) {
+		ret = -ENOMEM;
+		goto exit_geni_serial_earlyconsetup;
+	}
+
+	if (get_se_proto(uport->membase) != UART) {
+		ret = -ENXIO;
+		goto exit_geni_serial_earlyconsetup;
+	}
+
+	msm_port->xfer_mode = FIFO_MODE;
+	set_rfr_wm(msm_port);
+	msm_port->tx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
+	msm_port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
+	msm_port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
+	geni_se_init(uport->membase, msm_port->xfer_mode, msm_port->rx_wm,
+							msm_port->rx_rfr);
+	/*
+	 * Ignore Flow control.
+	 * Disable Tx Parity.
+	 * Don't check Parity during Rx.
+	 * Disable Rx Parity.
+	 * n = 8.
+	 * Stop bit = 0.
+	 * Stale timeout in bit-time (3 chars worth).
+	 */
+	tx_trans_cfg |= UART_CTS_MASK;
+	tx_parity_cfg = 0;
+	rx_trans_cfg = 0;
+	rx_parity_cfg = 0;
+	bits_per_char = 0x8;
+	stop_bit = 0;
+	rx_stale = 0x18;
+	clk_div = get_clk_div_rate(baud, &clk_rate);
+	if (clk_div <= 0) {
+		ret = -EINVAL;
+		goto exit_geni_serial_earlyconsetup;
+	}
+
+	s_clk_cfg |= SER_CLK_EN;
+	s_clk_cfg |= (clk_div << CLK_DIV_SHFT);
+
+	geni_serial_write_term_regs(uport, 0, tx_trans_cfg,
+		tx_parity_cfg, rx_trans_cfg, rx_parity_cfg, bits_per_char,
+		stop_bit, rx_stale, s_clk_cfg);
+
+	dev->con->write = msm_geni_serial_early_console_write;
+	dev->con->setup = NULL;
+	/*
+	 * Ensure that the early console setup completes before
+	 * returning.
+	 */
+	mb();
+exit_geni_serial_earlyconsetup:
+	return ret;
+}
+OF_EARLYCON_DECLARE(msm_geni_serial, "qcom,msm-geni-uart",
+		msm_geni_serial_earlycon_setup);
+
+static int console_register(struct uart_driver *drv)
+{
+	return uart_register_driver(drv);
+}
+static void console_unregister(struct uart_driver *drv)
+{
+	uart_unregister_driver(drv);
 }
 
 static struct console cons_ops = {
@@ -1004,6 +1103,33 @@
 	.data = &msm_geni_console_driver,
 };
 
+static struct uart_driver msm_geni_console_driver = {
+	.owner = THIS_MODULE,
+	.driver_name = "msm_geni_console",
+	.dev_name = "ttyMSM",
+	.nr =  GENI_UART_NR_PORTS,
+	.cons = &cons_ops,
+};
+#else
+static int console_register(struct uart_driver *drv)
+{
+	return 0;
+}
+
+static void console_unregister(struct uart_driver *drv)
+{
+}
+#endif /* defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL) */
+
+static void msm_geni_serial_debug_init(struct uart_port *uport)
+{
+	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+
+	msm_port->dbg = debugfs_create_dir(dev_name(uport->dev), NULL);
+	if (IS_ERR_OR_NULL(msm_port->dbg))
+		dev_err(uport->dev, "Failed to create dbg dir\n");
+}
+
 static const struct uart_ops msm_geni_serial_pops = {
 	.tx_empty = msm_geni_serial_tx_empty,
 	.stop_tx = msm_geni_serial_stop_tx,
@@ -1022,8 +1148,10 @@
 };
 
 static const struct of_device_id msm_geni_device_tbl[] = {
+#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
 	{ .compatible = "qcom,msm-geni-console",
 			.data = (void *)&msm_geni_console_driver},
+#endif
 	{ .compatible = "qcom,msm-geni-serial-hs",
 			.data = (void *)&msm_geni_serial_hs_driver},
 	{},
@@ -1189,92 +1317,6 @@
 	return 0;
 }
 
-static void
-msm_geni_serial_early_console_write(struct console *con, const char *s,
-			unsigned int n)
-{
-	struct earlycon_device *dev = con->data;
-
-	__msm_geni_serial_console_write(&dev->port, s, n);
-}
-
-static int __init
-msm_geni_serial_earlycon_setup(struct earlycon_device *dev,
-		const char *opt)
-{
-	struct uart_port *uport = &dev->port;
-	int ret = 0;
-	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
-	u32 tx_trans_cfg = 0;
-	u32 tx_parity_cfg = 0;
-	u32 rx_trans_cfg = 0;
-	u32 rx_parity_cfg = 0;
-	u32 stop_bit = 0;
-	u32 rx_stale = 0;
-	u32 bits_per_char = 0;
-	u32 s_clk_cfg = 0;
-	u32 baud = 115200;
-	u32 clk_div;
-	unsigned long clk_rate;
-
-	if (!uport->membase) {
-		ret = -ENOMEM;
-		goto exit_geni_serial_earlyconsetup;
-	}
-
-	if (get_se_proto(uport->membase) != UART) {
-		ret = -ENXIO;
-		goto exit_geni_serial_earlyconsetup;
-	}
-
-	msm_port->xfer_mode = FIFO_MODE;
-	set_rfr_wm(msm_port);
-	msm_port->tx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
-	msm_port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
-	msm_port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
-	geni_se_init(uport->membase, msm_port->xfer_mode, msm_port->rx_wm,
-							msm_port->rx_rfr);
-	/*
-	 * Ignore Flow control.
-	 * Disable Tx Parity.
-	 * Don't check Parity during Rx.
-	 * Disable Rx Parity.
-	 * n = 8.
-	 * Stop bit = 0.
-	 * Stale timeout in bit-time (3 chars worth).
-	 */
-	tx_trans_cfg |= UART_CTS_MASK;
-	tx_parity_cfg = 0;
-	rx_trans_cfg = 0;
-	rx_parity_cfg = 0;
-	bits_per_char = 0x8;
-	stop_bit = 0;
-	rx_stale = 0x18;
-	clk_div = get_clk_div_rate(baud, &clk_rate);
-	if (clk_div <= 0) {
-		ret = -EINVAL;
-		goto exit_geni_serial_earlyconsetup;
-	}
-
-	s_clk_cfg |= SER_CLK_EN;
-	s_clk_cfg |= (clk_div << CLK_DIV_SHFT);
-
-	geni_serial_write_term_regs(uport, 0, tx_trans_cfg,
-		tx_parity_cfg, rx_trans_cfg, rx_parity_cfg, bits_per_char,
-		stop_bit, rx_stale, s_clk_cfg);
-
-	dev->con->write = msm_geni_serial_early_console_write;
-	dev->con->setup = NULL;
-	/*
-	 * Ensure that the early console setup completes before
-	 * returning.
-	 */
-	mb();
-exit_geni_serial_earlyconsetup:
-	return ret;
-}
-OF_EARLYCON_DECLARE(msm_geni_serial, "qcom,msm-geni-uart",
-		msm_geni_serial_earlycon_setup);
 
 #ifdef CONFIG_PM
 static int msm_geni_serial_runtime_suspend(struct device *dev)
@@ -1366,13 +1408,6 @@
 	},
 };
 
-static struct uart_driver msm_geni_console_driver = {
-	.owner = THIS_MODULE,
-	.driver_name = "msm_geni_console",
-	.dev_name = "ttyMSM",
-	.nr =  GENI_UART_NR_PORTS,
-	.cons = &cons_ops,
-};
 
 static struct uart_driver msm_geni_serial_hs_driver = {
 	.owner = THIS_MODULE,
@@ -1393,7 +1428,7 @@
 		msm_geni_serial_ports[i].uport.line = i;
 	}
 
-	ret = uart_register_driver(&msm_geni_console_driver);
+	ret = console_register(&msm_geni_console_driver);
 	if (ret)
 		return ret;
 
@@ -1405,7 +1440,7 @@
 
 	ret = platform_driver_register(&msm_geni_serial_platform_driver);
 	if (ret) {
-		uart_unregister_driver(&msm_geni_console_driver);
+		console_unregister(&msm_geni_console_driver);
 		uart_unregister_driver(&msm_geni_serial_hs_driver);
 		return ret;
 	}
@@ -1419,7 +1454,7 @@
 {
 	platform_driver_unregister(&msm_geni_serial_platform_driver);
 	uart_unregister_driver(&msm_geni_serial_hs_driver);
-	uart_unregister_driver(&msm_geni_console_driver);
+	console_unregister(&msm_geni_console_driver);
 }
 module_exit(msm_geni_serial_exit);
 
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index a6c1fae..a391b50 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1380,7 +1380,7 @@
 
 	dev_dbg(&intf->dev, "%s called\n", __func__);
 
-	data = kmalloc(sizeof(*data), GFP_KERNEL);
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
 
@@ -1443,6 +1443,13 @@
 			break;
 		}
 	}
+
+	if (!data->bulk_out || !data->bulk_in) {
+		dev_err(&intf->dev, "bulk endpoints not found\n");
+		retcode = -ENODEV;
+		goto err_put;
+	}
+
 	/* Find int endpoint */
 	for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) {
 		endpoint = &iface_desc->endpoint[n].desc;
@@ -1468,8 +1475,10 @@
 	if (data->iin_ep_present) {
 		/* allocate int urb */
 		data->iin_urb = usb_alloc_urb(0, GFP_KERNEL);
-		if (!data->iin_urb)
+		if (!data->iin_urb) {
+			retcode = -ENOMEM;
 			goto error_register;
+		}
 
 		/* will reference data in int urb */
 		kref_get(&data->kref);
@@ -1477,8 +1486,10 @@
 		/* allocate buffer for interrupt in */
 		data->iin_buffer = kmalloc(data->iin_wMaxPacketSize,
 					GFP_KERNEL);
-		if (!data->iin_buffer)
+		if (!data->iin_buffer) {
+			retcode = -ENOMEM;
 			goto error_register;
+		}
 
 		/* fill interrupt urb */
 		usb_fill_int_urb(data->iin_urb, data->usb_dev,
@@ -1511,6 +1522,7 @@
 	sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
 	sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
 	usbtmc_free_int(data);
+err_put:
 	kref_put(&data->kref, usbtmc_delete);
 	return retcode;
 }
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 98e39f9..a6cd44a 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -63,7 +63,7 @@
 	int		i, size;
 
 	if (!IS_ENABLED(CONFIG_HAS_DMA) ||
-	    (!hcd->self.controller->dma_mask &&
+	    (!is_device_dma_capable(hcd->self.sysdev) &&
 	     !(hcd->driver->flags & HCD_LOCAL_MEM)))
 		return 0;
 
@@ -72,7 +72,7 @@
 		if (!size)
 			continue;
 		snprintf(name, sizeof(name), "buffer-%d", size);
-		hcd->pool[i] = dma_pool_create(name, hcd->self.controller,
+		hcd->pool[i] = dma_pool_create(name, hcd->self.sysdev,
 				size, size, 0);
 		if (!hcd->pool[i]) {
 			hcd_buffer_destroy(hcd);
@@ -127,7 +127,7 @@
 
 	/* some USB hosts just use PIO */
 	if (!IS_ENABLED(CONFIG_HAS_DMA) ||
-	    (!bus->controller->dma_mask &&
+	    (!is_device_dma_capable(bus->sysdev) &&
 	     !(hcd->driver->flags & HCD_LOCAL_MEM))) {
 		*dma = ~(dma_addr_t) 0;
 		return kmalloc(size, mem_flags);
@@ -137,7 +137,7 @@
 		if (size <= pool_max[i])
 			return dma_pool_alloc(hcd->pool[i], mem_flags, dma);
 	}
-	return dma_alloc_coherent(hcd->self.controller, size, dma, mem_flags);
+	return dma_alloc_coherent(hcd->self.sysdev, size, dma, mem_flags);
 }
 
 void hcd_buffer_free(
@@ -154,7 +154,7 @@
 		return;
 
 	if (!IS_ENABLED(CONFIG_HAS_DMA) ||
-	    (!bus->controller->dma_mask &&
+	    (!is_device_dma_capable(bus->sysdev) &&
 	     !(hcd->driver->flags & HCD_LOCAL_MEM))) {
 		kfree(addr);
 		return;
@@ -166,5 +166,5 @@
 			return;
 		}
 	}
-	dma_free_coherent(hcd->self.controller, size, addr, dma);
+	dma_free_coherent(hcd->self.sysdev, size, addr, dma);
 }
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 1f7036c..eef716b 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -275,6 +275,16 @@
 
 			/*
 			 * Adjust bInterval for quirked devices.
+			 */
+			/*
+			 * This quirk fixes bIntervals reported in ms.
+			 */
+			if (to_usb_device(ddev)->quirks &
+				USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) {
+				n = clamp(fls(d->bInterval) + 3, i, j);
+				i = j = n;
+			}
+			/*
 			 * This quirk fixes bIntervals reported in
 			 * linear microframes.
 			 */
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index dadd1e8d..26a305f 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1441,6 +1441,9 @@
 {
 	struct usb_device	*udev = to_usb_device(dev);
 
+	if (udev->bus->skip_resume && udev->state == USB_STATE_SUSPENDED)
+		return 0;
+
 	unbind_no_pm_drivers_interfaces(udev);
 
 	/* From now on we are sure all drivers support suspend/resume
@@ -1470,6 +1473,15 @@
 	struct usb_device	*udev = to_usb_device(dev);
 	int			status;
 
+	/*
+	 * Some buses would like to keep their devices in suspend
+	 * state after system resume.  Their resume happen when
+	 * a remote wakeup is detected or interface driver start
+	 * I/O.
+	 */
+	if (udev->bus->skip_resume)
+		return 0;
+
 	/* For all calls, take the device back to full power and
 	 * tell the PM core in case it was autosuspended previously.
 	 * Unbind the interfaces that will need rebinding later,
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 479e223..cf25708 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1073,6 +1073,7 @@
 static int register_root_hub(struct usb_hcd *hcd)
 {
 	struct device *parent_dev = hcd->self.controller;
+	struct device *sysdev = hcd->self.sysdev;
 	struct usb_device *usb_dev = hcd->self.root_hub;
 	const int devnum = 1;
 	int retval;
@@ -1119,7 +1120,7 @@
 		/* Did the HC die before the root hub was registered? */
 		if (HCD_DEAD(hcd))
 			usb_hc_died (hcd);	/* This time clean up */
-		usb_dev->dev.of_node = parent_dev->of_node;
+		usb_dev->dev.of_node = sysdev->of_node;
 	}
 	mutex_unlock(&usb_bus_idr_lock);
 
@@ -1432,7 +1433,7 @@
 {
 	if (IS_ENABLED(CONFIG_HAS_DMA) &&
 	    (urb->transfer_flags & URB_SETUP_MAP_SINGLE))
-		dma_unmap_single(hcd->self.controller,
+		dma_unmap_single(hcd->self.sysdev,
 				urb->setup_dma,
 				sizeof(struct usb_ctrlrequest),
 				DMA_TO_DEVICE);
@@ -1465,19 +1466,19 @@
 	dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 	if (IS_ENABLED(CONFIG_HAS_DMA) &&
 	    (urb->transfer_flags & URB_DMA_MAP_SG))
-		dma_unmap_sg(hcd->self.controller,
+		dma_unmap_sg(hcd->self.sysdev,
 				urb->sg,
 				urb->num_sgs,
 				dir);
 	else if (IS_ENABLED(CONFIG_HAS_DMA) &&
 		 (urb->transfer_flags & URB_DMA_MAP_PAGE))
-		dma_unmap_page(hcd->self.controller,
+		dma_unmap_page(hcd->self.sysdev,
 				urb->transfer_dma,
 				urb->transfer_buffer_length,
 				dir);
 	else if (IS_ENABLED(CONFIG_HAS_DMA) &&
 		 (urb->transfer_flags & URB_DMA_MAP_SINGLE))
-		dma_unmap_single(hcd->self.controller,
+		dma_unmap_single(hcd->self.sysdev,
 				urb->transfer_dma,
 				urb->transfer_buffer_length,
 				dir);
@@ -1520,11 +1521,11 @@
 			return ret;
 		if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) {
 			urb->setup_dma = dma_map_single(
-					hcd->self.controller,
+					hcd->self.sysdev,
 					urb->setup_packet,
 					sizeof(struct usb_ctrlrequest),
 					DMA_TO_DEVICE);
-			if (dma_mapping_error(hcd->self.controller,
+			if (dma_mapping_error(hcd->self.sysdev,
 						urb->setup_dma))
 				return -EAGAIN;
 			urb->transfer_flags |= URB_SETUP_MAP_SINGLE;
@@ -1555,7 +1556,7 @@
 				}
 
 				n = dma_map_sg(
-						hcd->self.controller,
+						hcd->self.sysdev,
 						urb->sg,
 						urb->num_sgs,
 						dir);
@@ -1570,12 +1571,12 @@
 			} else if (urb->sg) {
 				struct scatterlist *sg = urb->sg;
 				urb->transfer_dma = dma_map_page(
-						hcd->self.controller,
+						hcd->self.sysdev,
 						sg_page(sg),
 						sg->offset,
 						urb->transfer_buffer_length,
 						dir);
-				if (dma_mapping_error(hcd->self.controller,
+				if (dma_mapping_error(hcd->self.sysdev,
 						urb->transfer_dma))
 					ret = -EAGAIN;
 				else
@@ -1585,11 +1586,11 @@
 				ret = -EAGAIN;
 			} else {
 				urb->transfer_dma = dma_map_single(
-						hcd->self.controller,
+						hcd->self.sysdev,
 						urb->transfer_buffer,
 						urb->transfer_buffer_length,
 						dir);
-				if (dma_mapping_error(hcd->self.controller,
+				if (dma_mapping_error(hcd->self.sysdev,
 						urb->transfer_dma))
 					ret = -EAGAIN;
 				else
@@ -2228,8 +2229,65 @@
 	return hcd->driver->get_frame_number (hcd);
 }
 
+int usb_hcd_sec_event_ring_setup(struct usb_device *udev,
+	unsigned int intr_num)
+{
+	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
+
+	if (!HCD_RH_RUNNING(hcd))
+		return 0;
+
+	return hcd->driver->sec_event_ring_setup(hcd, intr_num);
+}
+
+int usb_hcd_sec_event_ring_cleanup(struct usb_device *udev,
+	unsigned int intr_num)
+{
+	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
+
+	if (!HCD_RH_RUNNING(hcd))
+		return 0;
+
+	return hcd->driver->sec_event_ring_cleanup(hcd, intr_num);
+}
+
 /*-------------------------------------------------------------------------*/
 
+dma_addr_t
+usb_hcd_get_sec_event_ring_dma_addr(struct usb_device *udev,
+	unsigned int intr_num)
+{
+	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
+
+	if (!HCD_RH_RUNNING(hcd))
+		return 0;
+
+	return hcd->driver->get_sec_event_ring_dma_addr(hcd, intr_num);
+}
+
+dma_addr_t
+usb_hcd_get_dcba_dma_addr(struct usb_device *udev)
+{
+	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
+
+	if (!HCD_RH_RUNNING(hcd))
+		return 0;
+
+	return hcd->driver->get_dcba_dma_addr(hcd, udev);
+}
+
+dma_addr_t
+usb_hcd_get_xfer_ring_dma_addr(struct usb_device *udev,
+		struct usb_host_endpoint *ep)
+{
+	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
+
+	if (!HCD_RH_RUNNING(hcd))
+		return 0;
+
+	return hcd->driver->get_xfer_ring_dma_addr(hcd, udev, ep);
+}
+
 #ifdef	CONFIG_PM
 
 int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
@@ -2482,6 +2540,7 @@
 	}
 	spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
 	/* Make sure that the other roothub is also deallocated. */
+	usb_atomic_notify_dead_bus(&hcd->self);
 }
 EXPORT_SYMBOL_GPL (usb_hc_died);
 
@@ -2495,24 +2554,8 @@
 	tasklet_init(&bh->bh, usb_giveback_urb_bh, (unsigned long)bh);
 }
 
-/**
- * usb_create_shared_hcd - create and initialize an HCD structure
- * @driver: HC driver that will use this hcd
- * @dev: device for this HC, stored in hcd->self.controller
- * @bus_name: value to store in hcd->self.bus_name
- * @primary_hcd: a pointer to the usb_hcd structure that is sharing the
- *              PCI device.  Only allocate certain resources for the primary HCD
- * Context: !in_interrupt()
- *
- * Allocate a struct usb_hcd, with extra space at the end for the
- * HC driver's private data.  Initialize the generic members of the
- * hcd structure.
- *
- * Return: On success, a pointer to the created and initialized HCD structure.
- * On failure (e.g. if memory is unavailable), %NULL.
- */
-struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
-		struct device *dev, const char *bus_name,
+struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
+		struct device *sysdev, struct device *dev, const char *bus_name,
 		struct usb_hcd *primary_hcd)
 {
 	struct usb_hcd *hcd;
@@ -2553,8 +2596,9 @@
 
 	usb_bus_init(&hcd->self);
 	hcd->self.controller = dev;
+	hcd->self.sysdev = sysdev;
 	hcd->self.bus_name = bus_name;
-	hcd->self.uses_dma = (dev->dma_mask != NULL);
+	hcd->self.uses_dma = (sysdev->dma_mask != NULL);
 
 	init_timer(&hcd->rh_timer);
 	hcd->rh_timer.function = rh_timer_func;
@@ -2569,6 +2613,30 @@
 			"USB Host Controller";
 	return hcd;
 }
+EXPORT_SYMBOL_GPL(__usb_create_hcd);
+
+/**
+ * usb_create_shared_hcd - create and initialize an HCD structure
+ * @driver: HC driver that will use this hcd
+ * @dev: device for this HC, stored in hcd->self.controller
+ * @bus_name: value to store in hcd->self.bus_name
+ * @primary_hcd: a pointer to the usb_hcd structure that is sharing the
+ *              PCI device.  Only allocate certain resources for the primary HCD
+ * Context: !in_interrupt()
+ *
+ * Allocate a struct usb_hcd, with extra space at the end for the
+ * HC driver's private data.  Initialize the generic members of the
+ * hcd structure.
+ *
+ * Return: On success, a pointer to the created and initialized HCD structure.
+ * On failure (e.g. if memory is unavailable), %NULL.
+ */
+struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
+		struct device *dev, const char *bus_name,
+		struct usb_hcd *primary_hcd)
+{
+	return __usb_create_hcd(driver, dev, dev, bus_name, primary_hcd);
+}
 EXPORT_SYMBOL_GPL(usb_create_shared_hcd);
 
 /**
@@ -2588,7 +2656,7 @@
 struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
 		struct device *dev, const char *bus_name)
 {
-	return usb_create_shared_hcd(driver, dev, bus_name, NULL);
+	return __usb_create_hcd(driver, dev, dev, bus_name, NULL);
 }
 EXPORT_SYMBOL_GPL(usb_create_hcd);
 
@@ -2715,7 +2783,7 @@
 	struct usb_device *rhdev;
 
 	if (IS_ENABLED(CONFIG_USB_PHY) && !hcd->usb_phy) {
-		struct usb_phy *phy = usb_get_phy_dev(hcd->self.controller, 0);
+		struct usb_phy *phy = usb_get_phy_dev(hcd->self.sysdev, 0);
 
 		if (IS_ERR(phy)) {
 			retval = PTR_ERR(phy);
@@ -2733,7 +2801,7 @@
 	}
 
 	if (IS_ENABLED(CONFIG_GENERIC_PHY) && !hcd->phy) {
-		struct phy *phy = phy_get(hcd->self.controller, "usb");
+		struct phy *phy = phy_get(hcd->self.sysdev, "usb");
 
 		if (IS_ERR(phy)) {
 			retval = PTR_ERR(phy);
@@ -2781,7 +2849,7 @@
 	 */
 	retval = hcd_buffer_create(hcd);
 	if (retval != 0) {
-		dev_dbg(hcd->self.controller, "pool alloc failed\n");
+		dev_dbg(hcd->self.sysdev, "pool alloc failed\n");
 		goto err_create_buf;
 	}
 
@@ -2791,7 +2859,7 @@
 
 	rhdev = usb_alloc_dev(NULL, &hcd->self, 0);
 	if (rhdev == NULL) {
-		dev_err(hcd->self.controller, "unable to allocate root hub\n");
+		dev_err(hcd->self.sysdev, "unable to allocate root hub\n");
 		retval = -ENOMEM;
 		goto err_allocate_root_hub;
 	}
@@ -2966,6 +3034,9 @@
 	cancel_work_sync(&hcd->wakeup_work);
 #endif
 
+	/* handle any pending hub events before XHCI stops */
+	usb_flush_hub_wq();
+
 	mutex_lock(&usb_bus_idr_lock);
 	usb_disconnect(&rhdev);		/* Sets rhdev to NULL */
 	mutex_unlock(&usb_bus_idr_lock);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index aef81a1..c3d249f 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -48,6 +48,11 @@
 /* synchronize hub-port add/remove and peering operations */
 DEFINE_MUTEX(usb_port_peer_mutex);
 
+static bool skip_extended_resume_delay = 1;
+module_param(skip_extended_resume_delay, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(skip_extended_resume_delay,
+		"removes extra delay added to finish bus resume");
+
 /* cycle leds on hubs that aren't blinking for attention */
 static bool blinkenlights;
 module_param(blinkenlights, bool, S_IRUGO);
@@ -627,6 +632,12 @@
 		kick_hub_wq(hub);
 }
 
+void usb_flush_hub_wq(void)
+{
+	flush_workqueue(hub_wq);
+}
+EXPORT_SYMBOL(usb_flush_hub_wq);
+
 /*
  * Let the USB core know that a USB 3.0 device has sent a Function Wake Device
  * Notification, which indicates it had initiated remote wakeup.
@@ -3398,7 +3409,9 @@
 		/* drive resume for USB_RESUME_TIMEOUT msec */
 		dev_dbg(&udev->dev, "usb %sresume\n",
 				(PMSG_IS_AUTO(msg) ? "auto-" : ""));
-		msleep(USB_RESUME_TIMEOUT);
+		if (!skip_extended_resume_delay)
+			usleep_range(USB_RESUME_TIMEOUT * 1000,
+					(USB_RESUME_TIMEOUT + 1) * 1000);
 
 		/* Virtual root hubs can trigger on GET_PORT_STATUS to
 		 * stop resume signaling.  Then finish the resume
@@ -3407,7 +3420,7 @@
 		status = hub_port_status(hub, port1, &portstatus, &portchange);
 
 		/* TRSMRCY = 10 msec */
-		msleep(10);
+		usleep_range(10000, 10500);
 	}
 
  SuspendCleared:
@@ -4266,7 +4279,7 @@
 	struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
 	int connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN;
 
-	if (!udev->usb2_hw_lpm_capable)
+	if (!udev->usb2_hw_lpm_capable || !udev->bos)
 		return;
 
 	if (hub)
diff --git a/drivers/usb/core/notify.c b/drivers/usb/core/notify.c
index 7728c91..af91b1e 100644
--- a/drivers/usb/core/notify.c
+++ b/drivers/usb/core/notify.c
@@ -17,6 +17,7 @@
 #include "usb.h"
 
 static BLOCKING_NOTIFIER_HEAD(usb_notifier_list);
+static ATOMIC_NOTIFIER_HEAD(usb_atomic_notifier_list);
 
 /**
  * usb_register_notify - register a notifier callback whenever a usb change happens
@@ -67,3 +68,33 @@
 {
 	blocking_notifier_call_chain(&usb_notifier_list, USB_BUS_REMOVE, ubus);
 }
+
+/**
+ * usb_register_atomic_notify - register a atomic notifier callback whenever a
+ * HC dies
+ * @nb: pointer to the atomic notifier block for the callback events.
+ *
+ */
+void usb_register_atomic_notify(struct notifier_block *nb)
+{
+	atomic_notifier_chain_register(&usb_atomic_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(usb_register_atomic_notify);
+
+/**
+ * usb_unregister_atomic_notify - unregister a atomic notifier callback
+ * @nb: pointer to the notifier block for the callback events.
+ *
+ */
+void usb_unregister_atomic_notify(struct notifier_block *nb)
+{
+	atomic_notifier_chain_unregister(&usb_atomic_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(usb_unregister_atomic_notify);
+
+
+void usb_atomic_notify_dead_bus(struct usb_bus *ubus)
+{
+	atomic_notifier_call_chain(&usb_atomic_notifier_list, USB_BUS_DIED,
+					 ubus);
+}
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 24f9f98..96b21b0 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -170,6 +170,14 @@
 	/* M-Systems Flash Disk Pioneers */
 	{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
 
+	/* Baum Vario Ultra */
+	{ USB_DEVICE(0x0904, 0x6101), .driver_info =
+			USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+	{ USB_DEVICE(0x0904, 0x6102), .driver_info =
+			USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+	{ USB_DEVICE(0x0904, 0x6103), .driver_info =
+			USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+
 	/* Keytouch QWERTY Panel keyboard */
 	{ USB_DEVICE(0x0926, 0x3333), .driver_info =
 			USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 5921514..7272f9a 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -450,9 +450,9 @@
 	 * Note: calling dma_set_mask() on a USB device would set the
 	 * mask for the entire HCD, so don't do that.
 	 */
-	dev->dev.dma_mask = bus->controller->dma_mask;
-	dev->dev.dma_pfn_offset = bus->controller->dma_pfn_offset;
-	set_dev_node(&dev->dev, dev_to_node(bus->controller));
+	dev->dev.dma_mask = bus->sysdev->dma_mask;
+	dev->dev.dma_pfn_offset = bus->sysdev->dma_pfn_offset;
+	set_dev_node(&dev->dev, dev_to_node(bus->sysdev));
 	dev->state = USB_STATE_ATTACHED;
 	dev->lpm_disable_count = 1;
 	atomic_set(&dev->urbnum, 0);
@@ -685,6 +685,54 @@
 }
 EXPORT_SYMBOL_GPL(usb_get_current_frame_number);
 
+int usb_sec_event_ring_setup(struct usb_device *dev,
+	unsigned int intr_num)
+{
+	if (dev->state == USB_STATE_NOTATTACHED)
+		return 0;
+
+	return usb_hcd_sec_event_ring_setup(dev, intr_num);
+}
+EXPORT_SYMBOL(usb_sec_event_ring_setup);
+
+int usb_sec_event_ring_cleanup(struct usb_device *dev,
+	unsigned int intr_num)
+{
+	return usb_hcd_sec_event_ring_cleanup(dev, intr_num);
+}
+EXPORT_SYMBOL(usb_sec_event_ring_cleanup);
+
+dma_addr_t
+usb_get_sec_event_ring_dma_addr(struct usb_device *dev,
+	unsigned int intr_num)
+{
+	if (dev->state == USB_STATE_NOTATTACHED)
+		return 0;
+
+	return usb_hcd_get_sec_event_ring_dma_addr(dev, intr_num);
+}
+EXPORT_SYMBOL(usb_get_sec_event_ring_dma_addr);
+
+dma_addr_t
+usb_get_dcba_dma_addr(struct usb_device *dev)
+{
+	if (dev->state == USB_STATE_NOTATTACHED)
+		return 0;
+
+	return usb_hcd_get_dcba_dma_addr(dev);
+}
+EXPORT_SYMBOL(usb_get_dcba_dma_addr);
+
+dma_addr_t usb_get_xfer_ring_dma_addr(struct usb_device *dev,
+	struct usb_host_endpoint *ep)
+{
+	if (dev->state == USB_STATE_NOTATTACHED)
+		return 0;
+
+	return usb_hcd_get_xfer_ring_dma_addr(dev, ep);
+}
+EXPORT_SYMBOL(usb_get_xfer_ring_dma_addr);
+
 /*-------------------------------------------------------------------*/
 /*
  * __usb_get_extra_descriptor() finds a descriptor of specific type in the
@@ -800,7 +848,7 @@
 	if (!urb
 			|| !urb->dev
 			|| !(bus = urb->dev->bus)
-			|| !(controller = bus->controller))
+			|| !(controller = bus->sysdev))
 		return NULL;
 
 	if (controller->dma_mask) {
@@ -838,7 +886,7 @@
 			|| !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
 			|| !urb->dev
 			|| !(bus = urb->dev->bus)
-			|| !(controller = bus->controller))
+			|| !(controller = bus->sysdev))
 		return;
 
 	if (controller->dma_mask) {
@@ -872,7 +920,7 @@
 			|| !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
 			|| !urb->dev
 			|| !(bus = urb->dev->bus)
-			|| !(controller = bus->controller))
+			|| !(controller = bus->sysdev))
 		return;
 
 	if (controller->dma_mask) {
@@ -922,7 +970,7 @@
 
 	if (!dev
 			|| !(bus = dev->bus)
-			|| !(controller = bus->controller)
+			|| !(controller = bus->sysdev)
 			|| !controller->dma_mask)
 		return -EINVAL;
 
@@ -958,7 +1006,7 @@
 
 	if (!dev
 			|| !(bus = dev->bus)
-			|| !(controller = bus->controller)
+			|| !(controller = bus->sysdev)
 			|| !controller->dma_mask)
 		return;
 
@@ -986,7 +1034,7 @@
 
 	if (!dev
 			|| !(bus = dev->bus)
-			|| !(controller = bus->controller)
+			|| !(controller = bus->sysdev)
 			|| !controller->dma_mask)
 		return;
 
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 5331812..fbff25f 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -175,6 +175,7 @@
 extern void usb_notify_remove_device(struct usb_device *udev);
 extern void usb_notify_add_bus(struct usb_bus *ubus);
 extern void usb_notify_remove_bus(struct usb_bus *ubus);
+extern void usb_atomic_notify_dead_bus(struct usb_bus *ubus);
 extern void usb_hub_adjust_deviceremovable(struct usb_device *hdev,
 		struct usb_hub_descriptor *desc);
 
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index ee06d07..33e3d9f 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -323,7 +323,7 @@
 static void dwc3_free_one_event_buffer(struct dwc3 *dwc,
 		struct dwc3_event_buffer *evt)
 {
-	dma_free_coherent(dwc->dev, evt->length, evt->buf, evt->dma);
+	dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma);
 }
 
 /**
@@ -345,7 +345,7 @@
 
 	evt->dwc	= dwc;
 	evt->length	= length;
-	evt->buf	= dma_alloc_coherent(dwc->dev, length,
+	evt->buf	= dma_alloc_coherent(dwc->sysdev, length,
 			&evt->dma, GFP_KERNEL);
 	if (!evt->buf)
 		return ERR_PTR(-ENOMEM);
@@ -474,11 +474,11 @@
 	if (!WARN_ON(dwc->scratchbuf))
 		return 0;
 
-	scratch_addr = dma_map_single(dwc->dev, dwc->scratchbuf,
+	scratch_addr = dma_map_single(dwc->sysdev, dwc->scratchbuf,
 			dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE,
 			DMA_BIDIRECTIONAL);
-	if (dma_mapping_error(dwc->dev, scratch_addr)) {
-		dev_err(dwc->dev, "failed to map scratch buffer\n");
+	if (dma_mapping_error(dwc->sysdev, scratch_addr)) {
+		dev_err(dwc->sysdev, "failed to map scratch buffer\n");
 		ret = -EFAULT;
 		goto err0;
 	}
@@ -502,7 +502,7 @@
 	return 0;
 
 err1:
-	dma_unmap_single(dwc->dev, dwc->scratch_addr, dwc->nr_scratch *
+	dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch *
 			DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
 
 err0:
@@ -521,7 +521,7 @@
 	if (!WARN_ON(dwc->scratchbuf))
 		return;
 
-	dma_unmap_single(dwc->dev, dwc->scratch_addr, dwc->nr_scratch *
+	dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch *
 			DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
 	kfree(dwc->scratchbuf);
 }
@@ -740,6 +740,16 @@
 		}
 	}
 
+	/*
+	 * Workaround for STAR 9000961433 which affects only version
+	 * 3.00a of the DWC_usb3 core. This prevents the controller
+	 * interrupt from being masked while handling events. IMOD
+	 * allows us to work around this issue. Enable it for the
+	 * affected version.
+	 */
+	 if (!dwc->imod_interval && (dwc->revision == DWC3_REVISION_300A))
+		dwc->imod_interval = 1;
+
 	/* issue device SoftReset too */
 	ret = dwc3_core_reset(dwc);
 	if (ret)
@@ -1109,6 +1119,15 @@
 
 #define DWC3_ALIGN_MASK		(16 - 1)
 
+/* check whether the core supports IMOD */
+bool dwc3_has_imod(struct dwc3 *dwc)
+{
+	return ((dwc3_is_usb3(dwc) &&
+		dwc->revision >= DWC3_REVISION_300A) ||
+		(dwc3_is_usb31(dwc) &&
+		dwc->revision >= DWC3_USB31_REVISION_120A));
+}
+
 static int dwc3_probe(struct platform_device *pdev)
 {
 	struct device		*dev = &pdev->dev;
@@ -1154,8 +1173,8 @@
 
 	/* will be enabled in dwc3_msm_resume() */
 	irq_set_status_flags(irq, IRQ_NOAUTOEN);
-	ret = devm_request_threaded_irq(dev, irq, NULL, dwc3_interrupt,
-			IRQF_SHARED | IRQF_ONESHOT, "dwc3", dwc);
+	ret = devm_request_irq(dev, irq, dwc3_interrupt, IRQF_SHARED, "dwc3",
+			dwc);
 	if (ret) {
 		dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
 				irq, ret);
@@ -1214,6 +1233,13 @@
 
 	dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node);
 
+	dwc->sysdev_is_parent = device_property_read_bool(dev,
+				"linux,sysdev_is_parent");
+	if (dwc->sysdev_is_parent)
+		dwc->sysdev = dwc->dev->parent;
+	else
+		dwc->sysdev = dwc->dev;
+
 	dwc->has_lpm_erratum = device_property_read_bool(dev,
 				"snps,has-lpm-erratum");
 	device_property_read_u8(dev, "snps,lpm-nyet-threshold",
@@ -1288,12 +1314,14 @@
 
 	spin_lock_init(&dwc->lock);
 
-	if (!dev->dma_mask) {
-		dev->dma_mask = dev->parent->dma_mask;
-		dev->dma_parms = dev->parent->dma_parms;
-		dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
+	dwc->dwc_wq = alloc_ordered_workqueue("dwc_wq", WQ_HIGHPRI);
+	if (!dwc->dwc_wq) {
+		pr_err("%s: Unable to create workqueue dwc_wq\n", __func__);
+		return -ENOMEM;
 	}
 
+	INIT_WORK(&dwc->bh_work, dwc3_bh_work);
+
 	pm_runtime_no_callbacks(dev);
 	pm_runtime_set_active(dev);
 	pm_runtime_enable(dev);
@@ -1365,6 +1393,7 @@
 	 * memory region the next time probe is called.
 	 */
 	res->start -= DWC3_GLOBALS_REGS_START;
+	destroy_workqueue(dwc->dwc_wq);
 
 	return ret;
 }
@@ -1388,6 +1417,8 @@
 	dwc3_core_exit(dwc);
 	dwc3_ulpi_exit(dwc);
 
+	destroy_workqueue(dwc->dwc_wq);
+
 	pm_runtime_put_sync(&pdev->dev);
 	pm_runtime_allow(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
@@ -1547,6 +1578,10 @@
 	struct dwc3	*dwc = dev_get_drvdata(dev);
 	int		ret;
 
+	/* Check if platform glue driver handling PM, if not then handle here */
+	if (!dwc3_notify_event(dwc, DWC3_CORE_PM_SUSPEND_EVENT))
+		return 0;
+
 	ret = dwc3_suspend_common(dwc);
 	if (ret)
 		return ret;
@@ -1561,6 +1596,10 @@
 	struct dwc3	*dwc = dev_get_drvdata(dev);
 	int		ret;
 
+	/* Check if platform glue driver handling PM, if not then handle here */
+	if (!dwc3_notify_event(dwc, DWC3_CORE_PM_RESUME_EVENT))
+		return 0;
+
 	pinctrl_pm_select_default_state(dev);
 
 	ret = dwc3_resume_common(dwc);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 7ded2b2..009193c 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -68,6 +68,7 @@
 #define DWC3_DEVICE_EVENT_OVERFLOW		11
 
 #define DWC3_GEVNTCOUNT_MASK	0xfffc
+#define DWC3_GEVNTCOUNT_EHB	(1 << 31)
 #define DWC3_GSNPSID_MASK	0xffff0000
 #define DWC3_GSNPSREV_MASK	0xffff
 
@@ -150,6 +151,8 @@
 #define DWC3_DEPCMDPAR0		0x08
 #define DWC3_DEPCMD		0x0c
 
+#define DWC3_DEV_IMOD(n)	(0xca00 + (n * 0x4))
+
 /* OTG Registers */
 #define DWC3_OCFG		0xcc00
 #define DWC3_OCTL		0xcc04
@@ -485,6 +488,11 @@
 #define DWC3_DEPCMD_TYPE_BULK		2
 #define DWC3_DEPCMD_TYPE_INTR		3
 
+#define DWC3_DEV_IMOD_COUNT_SHIFT	16
+#define DWC3_DEV_IMOD_COUNT_MASK	(0xffff << 16)
+#define DWC3_DEV_IMOD_INTERVAL_SHIFT	0
+#define DWC3_DEV_IMOD_INTERVAL_MASK	(0xffff << 0)
+
 #define DWC_CTRL_COUNT	10
 #define NUM_LOG_PAGES	12
 
@@ -887,6 +895,7 @@
  * @ep0_bounced: true when we used bounce buffer
  * @ep0_expect_in: true when we expect a DATA IN transfer
  * @has_hibernation: true when dwc3 was configured with Hibernation
+ * @sysdev_is_parent: true when dwc3 device has a parent driver
  * @has_lpm_erratum: true when core was configured with LPM Erratum. Note that
  *			there's now way for software to detect this in runtime.
  * @is_utmi_l1_suspend: the core asserts output signal
@@ -938,6 +947,8 @@
  * @vbus_draw: current to be drawn from USB
  * @index: dwc3 instance's number
  * @dwc_ipc_log_ctxt: dwc3 ipc log context
+ * @imod_interval: set the interrupt moderation interval in 250ns
+ *			increments or 0 to disable.
  */
 struct dwc3 {
 	struct usb_ctrlrequest	*ctrl_req;
@@ -956,6 +967,7 @@
 	spinlock_t		lock;
 
 	struct device		*dev;
+	struct device		*sysdev;
 
 	struct platform_device	*xhci;
 	struct resource		xhci_resources[DWC3_XHCI_RESOURCES_NUM];
@@ -1025,6 +1037,7 @@
  */
 #define DWC3_REVISION_IS_DWC31		0x80000000
 #define DWC3_USB31_REVISION_110A	(0x3131302a | DWC3_REVISION_IS_DWC31)
+#define DWC3_USB31_REVISION_120A	(0x3132302a | DWC3_REVISION_IS_DWC31)
 
 	enum dwc3_ep0_next	ep0_next_event;
 	enum dwc3_ep0_state	ep0state;
@@ -1062,6 +1075,7 @@
 	unsigned		ep0_bounced:1;
 	unsigned		ep0_expect_in:1;
 	unsigned		has_hibernation:1;
+	unsigned		sysdev_is_parent:1;
 	unsigned		has_lpm_erratum:1;
 	unsigned		is_utmi_l1_suspend:1;
 	unsigned		is_fpga:1;
@@ -1105,6 +1119,11 @@
 	bool			b_suspend;
 	unsigned int		vbus_draw;
 
+	u16			imod_interval;
+
+	struct workqueue_struct	*dwc_wq;
+	struct work_struct	bh_work;
+
 	/* IRQ timing statistics */
 	int			irq;
 	unsigned long		ep_cmd_timeout_cnt;
@@ -1280,12 +1299,20 @@
 u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type);
 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc);
 
+/* check whether we are on the DWC_usb3 core */
+static inline bool dwc3_is_usb3(struct dwc3 *dwc)
+{
+	return !(dwc->revision & DWC3_REVISION_IS_DWC31);
+}
+
 /* check whether we are on the DWC_usb31 core */
 static inline bool dwc3_is_usb31(struct dwc3 *dwc)
 {
 	return !!(dwc->revision & DWC3_REVISION_IS_DWC31);
 }
 
+bool dwc3_has_imod(struct dwc3 *dwc);
+
 #if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
 int dwc3_host_init(struct dwc3 *dwc);
 void dwc3_host_exit(struct dwc3 *dwc);
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index f456f12..96684f4 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -21,6 +21,8 @@
 #include <linux/pm_runtime.h>
 #include <linux/ratelimit.h>
 #include <linux/interrupt.h>
+#include <asm/dma-iommu.h>
+#include <linux/iommu.h>
 #include <linux/ioport.h>
 #include <linux/clk.h>
 #include <linux/io.h>
@@ -155,6 +157,7 @@
 	void __iomem *base;
 	void __iomem *ahb2phy_base;
 	struct platform_device	*dwc3;
+	struct dma_iommu_mapping *iommu_map;
 	const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
 	struct list_head req_complete_list;
 	struct clk		*xo_clk;
@@ -212,6 +215,8 @@
 
 	unsigned int		irq_to_affin;
 	struct notifier_block	dwc3_cpu_notifier;
+	struct notifier_block	usbdev_nb;
+	bool			hc_died;
 
 	struct extcon_dev	*extcon_vbus;
 	struct extcon_dev	*extcon_id;
@@ -1004,7 +1009,7 @@
 	int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
 					: (req->num_bufs + 1);
 
-	dep->trb_dma_pool = dma_pool_create(ep->name, dwc->dev,
+	dep->trb_dma_pool = dma_pool_create(ep->name, dwc->sysdev,
 					num_trbs * sizeof(struct dwc3_trb),
 					num_trbs * sizeof(struct dwc3_trb), 0);
 	if (!dep->trb_dma_pool) {
@@ -1503,6 +1508,33 @@
 	flush_delayed_work(&mdwc->sm_work);
 }
 
+static int msm_dwc3_usbdev_notify(struct notifier_block *self,
+			unsigned long action, void *priv)
+{
+	struct dwc3_msm *mdwc = container_of(self, struct dwc3_msm, usbdev_nb);
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+	struct usb_bus *bus = priv;
+
+	/* Interested only in recovery when HC dies */
+	if (action != USB_BUS_DIED)
+		return 0;
+
+	dev_dbg(mdwc->dev, "%s initiate recovery from hc_died\n", __func__);
+	/* Recovery already under process */
+	if (mdwc->hc_died)
+		return 0;
+
+	if (bus->controller != &dwc->xhci->dev) {
+		dev_dbg(mdwc->dev, "%s event for diff HCD\n", __func__);
+		return 0;
+	}
+
+	mdwc->hc_died = true;
+	schedule_delayed_work(&mdwc->sm_work, 0);
+	return 0;
+}
+
+
 /*
  * Check whether the DWC3 requires resetting the ep
  * after going to Low Power Mode (lpm)
@@ -2042,6 +2074,9 @@
 	if (dwc->irq)
 		disable_irq(dwc->irq);
 
+	if (work_busy(&dwc->bh_work))
+		dbg_event(0xFF, "pend evt", 0);
+
 	/* disable power event irq, hs and ss phy irq is used as wake up src */
 	disable_irq(mdwc->pwr_event_irq);
 
@@ -2091,6 +2126,11 @@
 		dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
 		dwc3_msm_config_gdsc(mdwc, 0);
 		clk_disable_unprepare(mdwc->sleep_clk);
+
+		if (mdwc->iommu_map) {
+			arm_iommu_detach_device(mdwc->dev);
+			dev_dbg(mdwc->dev, "IOMMU detached\n");
+		}
 	}
 
 	/* Remove bus voting */
@@ -2224,6 +2264,16 @@
 	if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
 		u32 tmp;
 
+		if (mdwc->iommu_map) {
+			ret = arm_iommu_attach_device(mdwc->dev,
+					mdwc->iommu_map);
+			if (ret)
+				dev_err(mdwc->dev, "IOMMU attach failed (%d)\n",
+						ret);
+			else
+				dev_dbg(mdwc->dev, "attached to IOMMU\n");
+		}
+
 		dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
 
 		dwc3_msm_power_collapse_por(mdwc);
@@ -2766,6 +2816,60 @@
 	return ret;
 }
 
+#define SMMU_BASE	0x10000000 /* Device address range base */
+#define SMMU_SIZE	0x40000000 /* Device address range size */
+
+static int dwc3_msm_init_iommu(struct dwc3_msm *mdwc)
+{
+	struct device_node *node = mdwc->dev->of_node;
+	int atomic_ctx = 1, s1_bypass;
+	int ret;
+
+	if (!of_property_read_bool(node, "iommus"))
+		return 0;
+
+	mdwc->iommu_map = arm_iommu_create_mapping(&platform_bus_type,
+			SMMU_BASE, SMMU_SIZE);
+	if (IS_ERR_OR_NULL(mdwc->iommu_map)) {
+		ret = PTR_ERR(mdwc->iommu_map) ?: -ENODEV;
+		dev_err(mdwc->dev, "Failed to create IOMMU mapping (%d)\n",
+				ret);
+		return ret;
+	}
+	dev_dbg(mdwc->dev, "IOMMU mapping created: %pK\n", mdwc->iommu_map);
+
+	ret = iommu_domain_set_attr(mdwc->iommu_map->domain, DOMAIN_ATTR_ATOMIC,
+			&atomic_ctx);
+	if (ret) {
+		dev_err(mdwc->dev, "IOMMU set atomic attribute failed (%d)\n",
+			ret);
+		goto release_mapping;
+	}
+
+	s1_bypass = of_property_read_bool(node, "qcom,smmu-s1-bypass");
+	ret = iommu_domain_set_attr(mdwc->iommu_map->domain,
+			DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
+	if (ret) {
+		dev_err(mdwc->dev, "IOMMU set s1 bypass (%d) failed (%d)\n",
+			s1_bypass, ret);
+		goto release_mapping;
+	}
+
+	ret = arm_iommu_attach_device(mdwc->dev, mdwc->iommu_map);
+	if (ret) {
+		dev_err(mdwc->dev, "IOMMU attach failed (%d)\n", ret);
+		goto release_mapping;
+	}
+	dev_dbg(mdwc->dev, "attached to IOMMU\n");
+
+	return 0;
+
+release_mapping:
+	arm_iommu_release_mapping(mdwc->iommu_map);
+	mdwc->iommu_map = NULL;
+	return ret;
+}
+
 static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
 		char *buf)
 {
@@ -3070,12 +3174,16 @@
 
 	dwc3_set_notifier(&dwc3_msm_notify_event);
 
+	ret = dwc3_msm_init_iommu(mdwc);
+	if (ret)
+		goto err;
+
 	/* Assumes dwc3 is the first DT child of dwc3-msm */
 	dwc3_node = of_get_next_available_child(node, NULL);
 	if (!dwc3_node) {
 		dev_err(&pdev->dev, "failed to find dwc3 child\n");
 		ret = -ENODEV;
-		goto err;
+		goto uninit_iommu;
 	}
 
 	ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
@@ -3083,7 +3191,7 @@
 		dev_err(&pdev->dev,
 				"failed to add create dwc3 core\n");
 		of_node_put(dwc3_node);
-		goto err;
+		goto uninit_iommu;
 	}
 
 	mdwc->dwc3 = of_find_device_by_node(dwc3_node);
@@ -3129,6 +3237,10 @@
 	ret = of_property_read_u32(node, "qcom,num-gsi-evt-buffs",
 				&mdwc->num_gsi_event_buffers);
 
+	/* IOMMU will be reattached upon each resume/connect */
+	if (mdwc->iommu_map)
+		arm_iommu_detach_device(mdwc->dev);
+
 	/*
 	 * Clocks and regulators will not be turned on until the first time
 	 * runtime PM resume is called. This is to allow for booting up with
@@ -3194,6 +3306,11 @@
 	platform_device_put(mdwc->dwc3);
 	if (mdwc->bus_perf_client)
 		msm_bus_scale_unregister_client(mdwc->bus_perf_client);
+uninit_iommu:
+	if (mdwc->iommu_map) {
+		arm_iommu_detach_device(mdwc->dev);
+		arm_iommu_release_mapping(mdwc->iommu_map);
+	}
 err:
 	return ret;
 }
@@ -3271,6 +3388,12 @@
 
 	dwc3_msm_config_gdsc(mdwc, 0);
 
+	if (mdwc->iommu_map) {
+		if (!atomic_read(&dwc->in_lpm))
+			arm_iommu_detach_device(mdwc->dev);
+		arm_iommu_release_mapping(mdwc->iommu_map);
+	}
+
 	return 0;
 }
 
@@ -3445,6 +3568,8 @@
 		mdwc->host_nb.notifier_call = dwc3_msm_host_notifier;
 		usb_register_notify(&mdwc->host_nb);
 
+		mdwc->usbdev_nb.notifier_call = msm_dwc3_usbdev_notify;
+		usb_register_atomic_notify(&mdwc->usbdev_nb);
 		/*
 		 * FIXME If micro A cable is disconnected during system suspend,
 		 * xhci platform device will be removed before runtime pm is
@@ -3498,6 +3623,7 @@
 	} else {
 		dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
 
+		usb_unregister_atomic_notify(&mdwc->usbdev_nb);
 		if (!IS_ERR(mdwc->vbus_reg))
 			ret = regulator_disable(mdwc->vbus_reg);
 		if (ret) {
@@ -3820,11 +3946,12 @@
 		break;
 
 	case OTG_STATE_A_HOST:
-		if (test_bit(ID, &mdwc->inputs)) {
-			dev_dbg(mdwc->dev, "id\n");
+		if (test_bit(ID, &mdwc->inputs) || mdwc->hc_died) {
+			dev_dbg(mdwc->dev, "id || hc_died\n");
 			dwc3_otg_start_host(mdwc, 0);
 			mdwc->otg_state = OTG_STATE_B_IDLE;
 			mdwc->vbus_retry_count = 0;
+			mdwc->hc_died = false;
 			work = 1;
 		} else {
 			dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index c2b2938..bb32978 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -1011,8 +1011,8 @@
 		u32	transfer_size = 0;
 		u32	maxpacket;
 
-		ret = usb_gadget_map_request(&dwc->gadget, &req->request,
-				dep->number);
+		ret = usb_gadget_map_request_by_dev(dwc->sysdev,
+				&req->request, dep->number);
 		if (ret) {
 			dwc3_trace(trace_dwc3_ep0, "failed to map request");
 			return;
@@ -1040,8 +1040,8 @@
 				DWC3_TRBCTL_CONTROL_DATA, false);
 		ret = dwc3_ep0_start_trans(dwc, dep->number);
 	} else {
-		ret = usb_gadget_map_request(&dwc->gadget, &req->request,
-				dep->number);
+		ret = usb_gadget_map_request_by_dev(dwc->sysdev,
+				&req->request, dep->number);
 		if (ret) {
 			dwc3_trace(trace_dwc3_ep0, "failed to map request");
 			return;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index a6014fa..a5d3209f 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -295,8 +295,8 @@
 	if (dwc->ep0_bounced && dep->number <= 1)
 		dwc->ep0_bounced = false;
 
-	usb_gadget_unmap_request(&dwc->gadget, &req->request,
-			req->direction);
+	usb_gadget_unmap_request_by_dev(dwc->sysdev,
+			&req->request, req->direction);
 
 	trace_dwc3_gadget_giveback(req);
 
@@ -476,7 +476,7 @@
 	if (dep->trb_pool)
 		return 0;
 
-	dep->trb_pool = dma_zalloc_coherent(dwc->dev,
+	dep->trb_pool = dma_zalloc_coherent(dwc->sysdev,
 			sizeof(struct dwc3_trb) * num_trbs,
 			&dep->trb_pool_dma, GFP_KERNEL);
 	if (!dep->trb_pool) {
@@ -508,7 +508,7 @@
 		dbg_event(dep->number, "Clr_TRB", 0);
 		dev_dbg(dwc->dev, "Clr_TRB ring of %s\n", dep->name);
 
-		dma_free_coherent(dwc->dev,
+		dma_free_coherent(dwc->sysdev,
 				sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
 				dep->trb_pool, dep->trb_pool_dma);
 		dep->trb_pool = NULL;
@@ -1150,8 +1150,8 @@
 		 * here and stop, unmap, free and del each of the linked
 		 * requests instead of what we do now.
 		 */
-		usb_gadget_unmap_request(&dwc->gadget, &req->request,
-				req->direction);
+		usb_gadget_unmap_request_by_dev(dwc->sysdev,
+				&req->request, req->direction);
 		list_del(&req->list);
 		return ret;
 	}
@@ -1233,8 +1233,8 @@
 
 	trace_dwc3_ep_queue(req);
 
-	ret = usb_gadget_map_request(&dwc->gadget, &req->request,
-			dep->direction);
+	ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request,
+					    dep->direction);
 	if (ret)
 		return ret;
 
@@ -2099,6 +2099,18 @@
 	u32			reg;
 
 	dbg_event(0xFF, "__Gadgetstart", 0);
+
+	/*
+	 * Use IMOD if enabled via dwc->imod_interval. Otherwise, if
+	 * the core supports IMOD, disable it.
+	 */
+	if (dwc->imod_interval) {
+		dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
+		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
+	} else if (dwc3_has_imod(dwc)) {
+		dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0);
+	}
+
 	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
 	reg &= ~(DWC3_DCFG_SPEED_MASK);
 
@@ -3473,8 +3485,6 @@
 		 */
 		evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
 		left -= 4;
-
-		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 4);
 	}
 
 	dwc->bh_handled_evt_cnt[dwc->bh_dbg_index] += (evt->count / 4);
@@ -3487,9 +3497,22 @@
 	reg &= ~DWC3_GEVNTSIZ_INTMASK;
 	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
 
+	if (dwc->imod_interval)
+		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0),
+				DWC3_GEVNTCOUNT_EHB);
+
 	return ret;
 }
 
+void dwc3_bh_work(struct work_struct *w)
+{
+	struct dwc3 *dwc = container_of(w, struct dwc3, bh_work);
+
+	 pm_runtime_get_sync(dwc->dev);
+	 dwc3_thread_interrupt(dwc->irq, dwc);
+	 pm_runtime_put(dwc->dev);
+}
+
 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
 {
 	struct dwc3 *dwc = _dwc;
@@ -3543,6 +3566,8 @@
 	reg |= DWC3_GEVNTSIZ_INTMASK;
 	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
 
+	dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
+
 	return IRQ_WAKE_THREAD;
 }
 
@@ -3572,7 +3597,7 @@
 	dwc->irq_dbg_index = (dwc->irq_dbg_index + 1) % MAX_INTR_STATS;
 
 	if (ret == IRQ_WAKE_THREAD)
-		dwc3_thread_interrupt(dwc->irq, dwc);
+		queue_work(dwc->dwc_wq, &dwc->bh_work);
 
 	return IRQ_HANDLED;
 }
@@ -3615,7 +3640,7 @@
 
 	INIT_WORK(&dwc->wakeup_work, dwc3_gadget_wakeup_work);
 
-	dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
+	dwc->ctrl_req = dma_alloc_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req),
 			&dwc->ctrl_req_addr, GFP_KERNEL);
 	if (!dwc->ctrl_req) {
 		dev_err(dwc->dev, "failed to allocate ctrl request\n");
@@ -3623,8 +3648,9 @@
 		goto err0;
 	}
 
-	dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
-			&dwc->ep0_trb_addr, GFP_KERNEL);
+	dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev,
+					  sizeof(*dwc->ep0_trb) * 2,
+					  &dwc->ep0_trb_addr, GFP_KERNEL);
 	if (!dwc->ep0_trb) {
 		dev_err(dwc->dev, "failed to allocate ep0 trb\n");
 		ret = -ENOMEM;
@@ -3637,7 +3663,7 @@
 		goto err2;
 	}
 
-	dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
+	dwc->ep0_bounce = dma_alloc_coherent(dwc->sysdev,
 			DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
 			GFP_KERNEL);
 	if (!dwc->ep0_bounce) {
@@ -3716,18 +3742,18 @@
 
 err4:
 	dwc3_gadget_free_endpoints(dwc);
-	dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
+	dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE,
 			dwc->ep0_bounce, dwc->ep0_bounce_addr);
 
 err3:
 	kfree(dwc->setup_buf);
 
 err2:
-	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
+	dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
 			dwc->ep0_trb, dwc->ep0_trb_addr);
 
 err1:
-	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
+	dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req),
 			dwc->ctrl_req, dwc->ctrl_req_addr);
 
 err0:
@@ -3747,16 +3773,16 @@
 
 	dwc3_gadget_free_endpoints(dwc);
 
-	dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
+	dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE,
 			dwc->ep0_bounce, dwc->ep0_bounce_addr);
 
 	kfree(dwc->setup_buf);
 	kfree(dwc->zlp_buf);
 
-	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
+	dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
 			dwc->ep0_trb, dwc->ep0_trb_addr);
 
-	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
+	dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req),
 			dwc->ctrl_req, dwc->ctrl_req_addr);
 }
 
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 990f423..e973ad3 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -97,6 +97,7 @@
 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
 void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
 irqreturn_t dwc3_interrupt(int irq, void *_dwc);
+void dwc3_bh_work(struct work_struct *w);
 
 static inline dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
 		struct dwc3_trb *trb)
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 800bcae..e52bf45 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -72,12 +72,7 @@
 		return -ENOMEM;
 	}
 
-	arch_setup_dma_ops(&xhci->dev, 0, 0, NULL, 0);
-	dma_set_coherent_mask(&xhci->dev, dwc->dev->coherent_dma_mask);
-
 	xhci->dev.parent	= dwc->dev;
-	xhci->dev.dma_mask	= dwc->dev->dma_mask;
-	xhci->dev.dma_parms	= dwc->dev->dma_parms;
 
 	dwc->xhci = xhci;
 
@@ -100,9 +95,9 @@
 	}
 
 	phy_create_lookup(dwc->usb2_generic_phy, "usb2-phy",
-			  dev_name(&xhci->dev));
+			  dev_name(dwc->dev));
 	phy_create_lookup(dwc->usb3_generic_phy, "usb3-phy",
-			  dev_name(&xhci->dev));
+			  dev_name(dwc->dev));
 
 	/* Platform device gets added as part of state machine */
 
@@ -115,9 +110,9 @@
 void dwc3_host_exit(struct dwc3 *dwc)
 {
 	phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy",
-			  dev_name(&dwc->xhci->dev));
+			  dev_name(dwc->dev));
 	phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy",
-			  dev_name(&dwc->xhci->dev));
+			  dev_name(dwc->dev));
 	if (!dwc->is_drd)
 		platform_device_unregister(dwc->xhci);
 }
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
index daca68b..46df732 100644
--- a/drivers/usb/gadget/function/f_accessory.c
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -611,8 +611,7 @@
 {
 	struct acc_dev *dev = fp->private_data;
 	struct usb_request *req;
-	ssize_t r = count;
-	unsigned xfer;
+	ssize_t r = count, xfer, len;
 	int ret = 0;
 
 	pr_debug("acc_read(%zu)\n", count);
@@ -633,6 +632,8 @@
 		goto done;
 	}
 
+	len = ALIGN(count, dev->ep_out->maxpacket);
+
 	if (dev->rx_done) {
 		// last req cancelled. try to get it.
 		req = dev->rx_req[0];
@@ -642,7 +643,7 @@
 requeue_req:
 	/* queue a request */
 	req = dev->rx_req[0];
-	req->length = count;
+	req->length = len;
 	dev->rx_done = 0;
 	ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
 	if (ret < 0) {
@@ -941,6 +942,8 @@
 			memset(dev->serial, 0, sizeof(dev->serial));
 			dev->start_requested = 0;
 			dev->audio_mode = 0;
+			strlcpy(dev->manufacturer, "Android", ACC_STRING_SIZE);
+			strlcpy(dev->model, "Android", ACC_STRING_SIZE);
 		}
 	}
 
@@ -1251,13 +1254,13 @@
 	INIT_DELAYED_WORK(&dev->start_work, acc_start_work);
 	INIT_WORK(&dev->hid_work, acc_hid_work);
 
-	/* _acc_dev must be set before calling usb_gadget_register_driver */
-	_acc_dev = dev;
-
 	ret = misc_register(&acc_device);
 	if (ret)
 		goto err;
 
+	/* _acc_dev must be set before calling usb_gadget_register_driver */
+	_acc_dev = dev;
+
 	return 0;
 
 err:
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index a30766c..5e3828d 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -535,13 +535,15 @@
 {
 	struct usb_composite_dev *cdev = acm->port.func.config->cdev;
 	int			status;
+	__le16			serial_state;
 
 	spin_lock(&acm->lock);
 	if (acm->notify_req) {
 		dev_dbg(&cdev->gadget->dev, "acm ttyGS%d serial state %04x\n",
 			acm->port_num, acm->serial_state);
+		serial_state = cpu_to_le16(acm->serial_state);
 		status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
-				0, &acm->serial_state, sizeof(acm->serial_state));
+				0, &serial_state, sizeof(acm->serial_state));
 	} else {
 		acm->pending = true;
 		status = 0;
diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c
index db7903d..a2a9185 100644
--- a/drivers/usb/gadget/function/f_audio_source.c
+++ b/drivers/usb/gadget/function/f_audio_source.c
@@ -989,6 +989,7 @@
 
 struct device *create_function_device(char *name);
 
+#define AUDIO_SOURCE_DEV_NAME_LENGTH 20
 static struct usb_function_instance *audio_source_alloc_inst(void)
 {
 	struct audio_source_instance *fi_audio;
@@ -997,6 +998,8 @@
 	struct device *dev;
 	void *err_ptr;
 	int err = 0;
+	char device_name[AUDIO_SOURCE_DEV_NAME_LENGTH];
+	static u8 count;
 
 	fi_audio = kzalloc(sizeof(*fi_audio), GFP_KERNEL);
 	if (!fi_audio)
@@ -1014,7 +1017,11 @@
 
 	config_group_init_type_name(&fi_audio->func_inst.group, "",
 						&audio_source_func_type);
-	dev = create_function_device("f_audio_source");
+
+	snprintf(device_name, AUDIO_SOURCE_DEV_NAME_LENGTH,
+					"f_audio_source%d", count++);
+
+	dev = create_function_device(device_name);
 
 	if (IS_ERR(dev)) {
 		err_ptr = dev;
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index c807b12..12e94d5 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -1562,13 +1562,6 @@
 			event->bNotificationType, req->status);
 		/* FALLTHROUGH */
 	case 0:
-		/*
-		 * handle multiple pending resp available
-		 * notifications by queuing same until we're done,
-		 * rest of the notification require queuing new
-		 * request.
-		 */
-		gsi_ctrl_send_notification(gsi);
 		break;
 	}
 }
@@ -1663,6 +1656,14 @@
 	gsi_ctrl_send_cpkt_tomodem(gsi, req->buf, 0);
 }
 
+static void gsi_ctrl_send_response_complete(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct f_gsi *gsi = req->context;
+
+	gsi_ctrl_send_notification(gsi);
+}
+
 static int
 gsi_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
 {
@@ -1749,6 +1750,8 @@
 		memcpy(req->buf, cpkt->buf, value);
 		gsi_ctrl_pkt_free(cpkt);
 
+		req->complete = gsi_ctrl_send_response_complete;
+		req->context = gsi;
 		log_event_dbg("copied encap_resp %d bytes",
 			value);
 		break;
@@ -3047,6 +3050,9 @@
 {
 	struct gsi_opts *opts = container_of(f, struct gsi_opts, func_inst);
 
+	if (!opts->gsi)
+		return;
+
 	if (opts->gsi->c_port.ctrl_device.fops)
 		misc_deregister(&opts->gsi->c_port.ctrl_device);
 
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index a832d27..fbe6910 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -1284,7 +1284,7 @@
 	card = midi->card;
 	midi->card = NULL;
 	if (card)
-		snd_card_free(card);
+		snd_card_free_when_closed(card);
 
 	usb_free_all_descriptors(f);
 }
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index 4d8694a..c6aa884 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -1425,6 +1425,7 @@
 	struct usb_request *req;
 	int i;
 
+	mtp_string_defs[INTERFACE_STRING_INDEX].id = 0;
 	while ((req = mtp_req_get(dev, &dev->tx_idle)))
 		mtp_request_free(req, dev->ep_in);
 	for (i = 0; i < RX_REQ_MAX; i++)
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index e837536..d2fbed7 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1428,17 +1428,39 @@
 	 */
 	if (!ncm_opts->bound) {
 		mutex_lock(&ncm_opts->lock);
+		ncm_opts->net = gether_setup_default();
+		if (IS_ERR(ncm_opts->net)) {
+			status = PTR_ERR(ncm_opts->net);
+			mutex_unlock(&ncm_opts->lock);
+			goto error;
+		}
 		gether_set_gadget(ncm_opts->net, cdev->gadget);
 		status = gether_register_netdev(ncm_opts->net);
 		mutex_unlock(&ncm_opts->lock);
-		if (status)
-			return status;
+		if (status) {
+			free_netdev(ncm_opts->net);
+			goto error;
+		}
 		ncm_opts->bound = true;
 	}
+
+	/* export host's Ethernet address in CDC format */
+	status = gether_get_host_addr_cdc(ncm_opts->net, ncm->ethaddr,
+				      sizeof(ncm->ethaddr));
+	if (status < 12) { /* strlen("01234567890a") */
+		ERROR(cdev, "%s: failed to get host eth addr, err %d\n",
+		__func__, status);
+		status = -EINVAL;
+		goto netdev_cleanup;
+	}
+	ncm->port.ioport = netdev_priv(ncm_opts->net);
+
 	us = usb_gstrings_attach(cdev, ncm_strings,
 				 ARRAY_SIZE(ncm_string_defs));
-	if (IS_ERR(us))
-		return PTR_ERR(us);
+	if (IS_ERR(us)) {
+		status = PTR_ERR(us);
+		goto netdev_cleanup;
+	}
 	ncm_control_intf.iInterface = us[STRING_CTRL_IDX].id;
 	ncm_data_nop_intf.iInterface = us[STRING_DATA_IDX].id;
 	ncm_data_intf.iInterface = us[STRING_DATA_IDX].id;
@@ -1539,7 +1561,10 @@
 		kfree(ncm->notify_req->buf);
 		usb_ep_free_request(ncm->notify, ncm->notify_req);
 	}
+netdev_cleanup:
+	gether_cleanup(netdev_priv(ncm_opts->net));
 
+error:
 	ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
 
 	return status;
@@ -1587,8 +1612,6 @@
 	opts = container_of(f, struct f_ncm_opts, func_inst);
 	if (opts->bound)
 		gether_cleanup(netdev_priv(opts->net));
-	else
-		free_netdev(opts->net);
 	kfree(opts);
 }
 
@@ -1601,12 +1624,6 @@
 		return ERR_PTR(-ENOMEM);
 	mutex_init(&opts->lock);
 	opts->func_inst.free_func_inst = ncm_free_inst;
-	opts->net = gether_setup_default();
-	if (IS_ERR(opts->net)) {
-		struct net_device *net = opts->net;
-		kfree(opts);
-		return ERR_CAST(net);
-	}
 
 	config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type);
 
@@ -1629,6 +1646,8 @@
 static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
 {
 	struct f_ncm *ncm = func_to_ncm(f);
+	struct f_ncm_opts *opts = container_of(f->fi, struct f_ncm_opts,
+					func_inst);
 
 	DBG(c->cdev, "ncm unbind\n");
 
@@ -1640,13 +1659,15 @@
 
 	kfree(ncm->notify_req->buf);
 	usb_ep_free_request(ncm->notify, ncm->notify_req);
+
+	gether_cleanup(netdev_priv(opts->net));
+	opts->bound = false;
 }
 
 static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
 {
 	struct f_ncm		*ncm;
 	struct f_ncm_opts	*opts;
-	int status;
 
 	/* allocate and initialize one new instance */
 	ncm = kzalloc(sizeof(*ncm), GFP_KERNEL);
@@ -1656,20 +1677,9 @@
 	opts = container_of(fi, struct f_ncm_opts, func_inst);
 	mutex_lock(&opts->lock);
 	opts->refcnt++;
-
-	/* export host's Ethernet address in CDC format */
-	status = gether_get_host_addr_cdc(opts->net, ncm->ethaddr,
-				      sizeof(ncm->ethaddr));
-	if (status < 12) { /* strlen("01234567890a") */
-		kfree(ncm);
-		mutex_unlock(&opts->lock);
-		return ERR_PTR(-EINVAL);
-	}
 	ncm_string_defs[STRING_MAC_IDX].s = ncm->ethaddr;
-
 	spin_lock_init(&ncm->lock);
 	ncm_reset_values(ncm);
-	ncm->port.ioport = netdev_priv(opts->net);
 	mutex_unlock(&opts->lock);
 	ncm->port.is_fixed = true;
 	ncm->port.supports_multi_frame = true;
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 29b41b5..c7689d0 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -625,7 +625,7 @@
 	uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst;
 	uvc_ss_streaming_comp.wBytesPerInterval =
 		cpu_to_le16(max_packet_size * max_packet_mult *
-			    opts->streaming_maxburst);
+			    (opts->streaming_maxburst + 1));
 
 	/* Allocate endpoints. */
 	ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep);
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 45bc997..a95b3e7 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1978,7 +1978,8 @@
 			dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
 			goto err;
 		}
-		ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index);
+		sprintf(ep->name, "ep%d", ep->index);
+		ep->ep.name = ep->name;
 
 		ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
 		ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h
index 3e1c9d5..b03b2eb 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.h
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.h
@@ -280,6 +280,7 @@
 	void __iomem				*ep_regs;
 	void __iomem				*dma_regs;
 	void __iomem				*fifo;
+	char					name[8];
 	struct usb_ep				ep;
 	struct usba_udc				*udc;
 
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index a84fe94..681b77a 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -407,11 +407,20 @@
 				return -ENOMEM;
 
 			}
-			xhci_queue_stop_endpoint(xhci, command, slot_id, i,
-						 suspend);
+
+			ret = xhci_queue_stop_endpoint(xhci, command, slot_id,
+					i, suspend);
+			if (ret) {
+				spin_unlock_irqrestore(&xhci->lock, flags);
+				goto err_cmd_queue;
+			}
 		}
 	}
-	xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
+	ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
+	if (ret) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		goto err_cmd_queue;
+	}
 	xhci_ring_cmd_db(xhci);
 	spin_unlock_irqrestore(&xhci->lock, flags);
 
@@ -422,6 +431,8 @@
 		xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
 		ret = -ETIME;
 	}
+
+err_cmd_queue:
 	xhci_free_command(xhci, cmd);
 	return ret;
 }
@@ -1338,7 +1349,7 @@
 				xhci_set_link_state(xhci, port_array, wIndex,
 							XDEV_RESUME);
 				spin_unlock_irqrestore(&xhci->lock, flags);
-				msleep(USB_RESUME_TIMEOUT);
+				usleep_range(21000, 21500);
 				spin_lock_irqsave(&xhci->lock, flags);
 				xhci_set_link_state(xhci, port_array, wIndex,
 							XDEV_U0);
@@ -1619,7 +1630,7 @@
 
 	if (need_usb2_u3_exit) {
 		spin_unlock_irqrestore(&xhci->lock, flags);
-		msleep(USB_RESUME_TIMEOUT);
+		usleep_range(21000, 21500);
 		spin_lock_irqsave(&xhci->lock, flags);
 	}
 
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 7064892..374750f 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -586,7 +586,7 @@
 		unsigned int num_stream_ctxs,
 		struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
 {
-	struct device *dev = xhci_to_hcd(xhci)->self.controller;
+	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
 	size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
 
 	if (size > MEDIUM_STREAM_ARRAY_SIZE)
@@ -614,7 +614,7 @@
 		unsigned int num_stream_ctxs, dma_addr_t *dma,
 		gfp_t mem_flags)
 {
-	struct device *dev = xhci_to_hcd(xhci)->self.controller;
+	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
 	size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
 
 	if (size > MEDIUM_STREAM_ARRAY_SIZE)
@@ -1678,7 +1678,7 @@
 static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
 {
 	int i;
-	struct device *dev = xhci_to_hcd(xhci)->self.controller;
+	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
 	int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
 
 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
@@ -1750,7 +1750,7 @@
 {
 	int num_sp;
 	int i;
-	struct device *dev = xhci_to_hcd(xhci)->self.controller;
+	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
 
 	if (!xhci->scratchpad)
 		return;
@@ -1824,25 +1824,151 @@
 	kfree(command);
 }
 
-void xhci_mem_cleanup(struct xhci_hcd *xhci)
+void xhci_handle_sec_intr_events(struct xhci_hcd *xhci, int intr_num)
 {
-	struct device	*dev = xhci_to_hcd(xhci)->self.controller;
+	union xhci_trb *erdp_trb, *current_trb;
+	struct xhci_segment	*seg;
+	u64 erdp_reg;
+	u32 iman_reg;
+	dma_addr_t deq;
+	unsigned long segment_offset;
+
+	/* disable irq, ack pending interrupt and ack all pending events */
+
+	iman_reg =
+		readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
+	iman_reg &= ~IMAN_IE;
+	writel_relaxed(iman_reg,
+			&xhci->sec_ir_set[intr_num]->irq_pending);
+	iman_reg =
+		readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
+	if (iman_reg & IMAN_IP)
+		writel_relaxed(iman_reg,
+			&xhci->sec_ir_set[intr_num]->irq_pending);
+
+	/* last acked event trb is in erdp reg  */
+	erdp_reg =
+		xhci_read_64(xhci, &xhci->sec_ir_set[intr_num]->erst_dequeue);
+	deq = (dma_addr_t)(erdp_reg & ~ERST_PTR_MASK);
+	if (!deq) {
+		pr_debug("%s: event ring handling not required\n", __func__);
+		return;
+	}
+
+	seg = xhci->sec_event_ring[intr_num]->first_seg;
+	segment_offset = deq - seg->dma;
+
+	/* find out virtual address of the last acked event trb */
+	erdp_trb = current_trb = &seg->trbs[0] +
+				(segment_offset/sizeof(*current_trb));
+
+	/* read cycle state of the last acked trb to find out CCS */
+	xhci->sec_event_ring[intr_num]->cycle_state =
+				(current_trb->event_cmd.flags & TRB_CYCLE);
+
+	while (1) {
+		/* last trb of the event ring: toggle cycle state */
+		if (current_trb == &seg->trbs[TRBS_PER_SEGMENT - 1]) {
+			xhci->sec_event_ring[intr_num]->cycle_state ^= 1;
+			current_trb = &seg->trbs[0];
+		} else {
+			current_trb++;
+		}
+
+		/* cycle state transition */
+		if ((le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE) !=
+		    xhci->sec_event_ring[intr_num]->cycle_state)
+			break;
+	}
+
+	if (erdp_trb != current_trb) {
+		deq =
+		xhci_trb_virt_to_dma(xhci->sec_event_ring[intr_num]->deq_seg,
+					current_trb);
+		if (deq == 0)
+			xhci_warn(xhci,
+				"WARN ivalid SW event ring dequeue ptr.\n");
+		/* Update HC event ring dequeue pointer */
+		erdp_reg &= ERST_PTR_MASK;
+		erdp_reg |= ((u64) deq & (u64) ~ERST_PTR_MASK);
+	}
+
+	/* Clear the event handler busy flag (RW1C); event ring is empty. */
+	erdp_reg |= ERST_EHB;
+	xhci_write_64(xhci, erdp_reg,
+			&xhci->sec_ir_set[intr_num]->erst_dequeue);
+}
+
+int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned int intr_num)
+{
 	int size;
-	int i, j, num_ports;
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	struct device	*dev = xhci_to_hcd(xhci)->self.sysdev;
 
-	cancel_delayed_work_sync(&xhci->cmd_timer);
+	if (intr_num >= xhci->max_interrupters) {
+		xhci_err(xhci, "invalid secondary interrupter num %d\n",
+			intr_num);
+		return -EINVAL;
+	}
 
-	/* Free the Event Ring Segment Table and the actual Event Ring */
+	size =
+	sizeof(struct xhci_erst_entry)*(xhci->sec_erst[intr_num].num_entries);
+	if (xhci->sec_erst[intr_num].entries) {
+		xhci_handle_sec_intr_events(xhci, intr_num);
+		dma_free_coherent(dev, size, xhci->sec_erst[intr_num].entries,
+				xhci->sec_erst[intr_num].erst_dma_addr);
+		xhci->sec_erst[intr_num].entries = NULL;
+	}
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed SEC ERST#%d",
+		intr_num);
+	if (xhci->sec_event_ring[intr_num])
+		xhci_ring_free(xhci, xhci->sec_event_ring[intr_num]);
+
+	xhci->sec_event_ring[intr_num] = NULL;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"Freed sec event ring");
+
+	return 0;
+}
+
+void xhci_event_ring_cleanup(struct xhci_hcd *xhci)
+{
+	int size;
+	unsigned int i;
+	struct device	*dev = xhci_to_hcd(xhci)->self.sysdev;
+
+	/* sec event ring clean up */
+	for (i = 1; i < xhci->max_interrupters; i++)
+		xhci_sec_event_ring_cleanup(xhci_to_hcd(xhci), i);
+
+	kfree(xhci->sec_ir_set);
+	xhci->sec_ir_set = NULL;
+	kfree(xhci->sec_erst);
+	xhci->sec_erst = NULL;
+	kfree(xhci->sec_event_ring);
+	xhci->sec_event_ring = NULL;
+
+	/* primary event ring clean up */
 	size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
 	if (xhci->erst.entries)
 		dma_free_coherent(dev, size,
 				xhci->erst.entries, xhci->erst.erst_dma_addr);
 	xhci->erst.entries = NULL;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed primary ERST");
 	if (xhci->event_ring)
 		xhci_ring_free(xhci, xhci->event_ring);
 	xhci->event_ring = NULL;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed priamry event ring");
+}
+
+void xhci_mem_cleanup(struct xhci_hcd *xhci)
+{
+	struct device	*dev = xhci_to_hcd(xhci)->self.sysdev;
+	int i, j, num_ports;
+
+	cancel_delayed_work_sync(&xhci->cmd_timer);
+
+	xhci_event_ring_cleanup(xhci);
 
 	if (xhci->lpm_command)
 		xhci_free_command(xhci, xhci->lpm_command);
@@ -2083,30 +2209,6 @@
 	return 0;
 }
 
-static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
-{
-	u64 temp;
-	dma_addr_t deq;
-
-	deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
-			xhci->event_ring->dequeue);
-	if (deq == 0 && !in_interrupt())
-		xhci_warn(xhci, "WARN something wrong with SW event ring "
-				"dequeue ptr.\n");
-	/* Update HC event ring dequeue pointer */
-	temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
-	temp &= ERST_PTR_MASK;
-	/* Don't clear the EHB bit (which is RW1C) because
-	 * there might be more events to service.
-	 */
-	temp &= ~ERST_EHB;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Write event ring dequeue pointer, "
-			"preserving EHB bit");
-	xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
-			&xhci->ir_set->erst_dequeue);
-}
-
 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
 		__le32 __iomem *addr, int max_caps)
 {
@@ -2365,13 +2467,183 @@
 	return 0;
 }
 
+int xhci_event_ring_setup(struct xhci_hcd *xhci, struct xhci_ring **er,
+	struct xhci_intr_reg __iomem *ir_set, struct xhci_erst *erst,
+	unsigned int intr_num, gfp_t flags)
+{
+	dma_addr_t dma, deq;
+	u64 val_64;
+	unsigned int val;
+	struct xhci_segment *seg;
+	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+
+	*er = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, 0, flags);
+	if (!*er)
+		return -ENOMEM;
+
+	erst->entries = dma_alloc_coherent(dev,
+			sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
+			flags);
+	if (!erst->entries) {
+		xhci_ring_free(xhci, *er);
+		return -ENOMEM;
+	}
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"intr# %d: Allocated event ring segment table at 0x%llx",
+		intr_num, (unsigned long long)dma);
+
+	memset(erst->entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
+	erst->num_entries = ERST_NUM_SEGS;
+	erst->erst_dma_addr = dma;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"intr# %d: num segs = %i, virt addr = %p, dma addr = 0x%llx",
+			intr_num,
+			erst->num_entries,
+			erst->entries,
+			(unsigned long long)erst->erst_dma_addr);
+
+	/* set ring base address and size for each segment table entry */
+	for (val = 0, seg = (*er)->first_seg; val < ERST_NUM_SEGS; val++) {
+		struct xhci_erst_entry *entry = &erst->entries[val];
+
+		entry->seg_addr = cpu_to_le64(seg->dma);
+		entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
+		entry->rsvd = 0;
+		seg = seg->next;
+	}
+
+	/* set ERST count with the number of entries in the segment table */
+	val = readl_relaxed(&ir_set->erst_size);
+	val &= ERST_SIZE_MASK;
+	val |= ERST_NUM_SEGS;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"Write ERST size = %i to ir_set %d (some bits preserved)", val,
+		intr_num);
+	writel_relaxed(val, &ir_set->erst_size);
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"intr# %d: Set ERST entries to point to event ring.",
+			intr_num);
+	/* set the segment table base address */
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"Set ERST base address for ir_set %d = 0x%llx",
+			intr_num,
+			(unsigned long long)erst->erst_dma_addr);
+	val_64 = xhci_read_64(xhci, &ir_set->erst_base);
+	val_64 &= ERST_PTR_MASK;
+	val_64 |= (erst->erst_dma_addr & (u64) ~ERST_PTR_MASK);
+	xhci_write_64(xhci, val_64, &ir_set->erst_base);
+
+	/* Set the event ring dequeue address */
+	deq = xhci_trb_virt_to_dma((*er)->deq_seg, (*er)->dequeue);
+	if (deq == 0 && !in_interrupt())
+		xhci_warn(xhci,
+		"intr# %d:WARN something wrong with SW event ring deq ptr.\n",
+		intr_num);
+	/* Update HC event ring dequeue pointer */
+	val_64 = xhci_read_64(xhci, &ir_set->erst_dequeue);
+	val_64 &= ERST_PTR_MASK;
+	/* Don't clear the EHB bit (which is RW1C) because
+	 * there might be more events to service.
+	 */
+	val_64 &= ~ERST_EHB;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"intr# %d:Write event ring dequeue pointer, preserving EHB bit",
+		intr_num);
+	xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | val_64,
+			&ir_set->erst_dequeue);
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"Wrote ERST address to ir_set %d.", intr_num);
+	xhci_print_ir_set(xhci, intr_num);
+
+	return 0;
+}
+
+int xhci_sec_event_ring_setup(struct usb_hcd *hcd, unsigned int intr_num)
+{
+	int ret;
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	if ((xhci->xhc_state & XHCI_STATE_HALTED) || !xhci->sec_ir_set
+		|| !xhci->sec_event_ring || !xhci->sec_erst ||
+		intr_num >= xhci->max_interrupters) {
+		xhci_err(xhci,
+		"%s:state %x ir_set %p evt_ring %p erst %p intr# %d\n",
+		__func__, xhci->xhc_state, xhci->sec_ir_set,
+		xhci->sec_event_ring, xhci->sec_erst, intr_num);
+		return -EINVAL;
+	}
+
+	if (xhci->sec_event_ring && xhci->sec_event_ring[intr_num]
+		&& xhci->sec_event_ring[intr_num]->first_seg)
+		goto done;
+
+	xhci->sec_ir_set[intr_num] = &xhci->run_regs->ir_set[intr_num];
+	ret = xhci_event_ring_setup(xhci,
+				&xhci->sec_event_ring[intr_num],
+				xhci->sec_ir_set[intr_num],
+				&xhci->sec_erst[intr_num],
+				intr_num, GFP_KERNEL);
+	if (ret) {
+		xhci_err(xhci, "sec event ring setup failed inter#%d\n",
+			intr_num);
+		return ret;
+	}
+done:
+	return 0;
+}
+
+int xhci_event_ring_init(struct xhci_hcd *xhci, gfp_t flags)
+{
+	int ret = 0;
+
+	/* primary + secondary */
+	xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1);
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"// Allocating primary event ring");
+
+	/* Set ir_set to interrupt register set 0 */
+	xhci->ir_set = &xhci->run_regs->ir_set[0];
+	ret = xhci_event_ring_setup(xhci, &xhci->event_ring, xhci->ir_set,
+		&xhci->erst, 0, flags);
+	if (ret) {
+		xhci_err(xhci, "failed to setup primary event ring\n");
+		goto fail;
+	}
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"// Allocating sec event ring related pointers");
+
+	xhci->sec_ir_set = kcalloc(xhci->max_interrupters,
+				sizeof(*xhci->sec_ir_set), flags);
+	if (!xhci->sec_ir_set) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	xhci->sec_event_ring = kcalloc(xhci->max_interrupters,
+				sizeof(*xhci->sec_event_ring), flags);
+	if (!xhci->sec_event_ring) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	xhci->sec_erst = kcalloc(xhci->max_interrupters,
+				sizeof(*xhci->sec_erst), flags);
+	if (!xhci->sec_erst)
+		ret = -ENOMEM;
+fail:
+	return ret;
+}
+
 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
 {
 	dma_addr_t	dma;
-	struct device	*dev = xhci_to_hcd(xhci)->self.controller;
+	struct device	*dev = xhci_to_hcd(xhci)->self.sysdev;
 	unsigned int	val, val2;
 	u64		val_64;
-	struct xhci_segment	*seg;
 	u32 page_size, temp;
 	int i;
 
@@ -2497,74 +2769,17 @@
 	xhci->dba = (void __iomem *) xhci->cap_regs + val;
 	xhci_dbg_regs(xhci);
 	xhci_print_run_regs(xhci);
-	/* Set ir_set to interrupt register set 0 */
-	xhci->ir_set = &xhci->run_regs->ir_set[0];
 
 	/*
 	 * Event ring setup: Allocate a normal ring, but also setup
 	 * the event ring segment table (ERST).  Section 4.9.3.
 	 */
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
-	xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
-					0, flags);
-	if (!xhci->event_ring)
+	if (xhci_event_ring_init(xhci, GFP_KERNEL))
 		goto fail;
+
 	if (xhci_check_trb_in_td_math(xhci) < 0)
 		goto fail;
 
-	xhci->erst.entries = dma_alloc_coherent(dev,
-			sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
-			flags);
-	if (!xhci->erst.entries)
-		goto fail;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Allocated event ring segment table at 0x%llx",
-			(unsigned long long)dma);
-
-	memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
-	xhci->erst.num_entries = ERST_NUM_SEGS;
-	xhci->erst.erst_dma_addr = dma;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
-			xhci->erst.num_entries,
-			xhci->erst.entries,
-			(unsigned long long)xhci->erst.erst_dma_addr);
-
-	/* set ring base address and size for each segment table entry */
-	for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
-		struct xhci_erst_entry *entry = &xhci->erst.entries[val];
-		entry->seg_addr = cpu_to_le64(seg->dma);
-		entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
-		entry->rsvd = 0;
-		seg = seg->next;
-	}
-
-	/* set ERST count with the number of entries in the segment table */
-	val = readl(&xhci->ir_set->erst_size);
-	val &= ERST_SIZE_MASK;
-	val |= ERST_NUM_SEGS;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Write ERST size = %i to ir_set 0 (some bits preserved)",
-			val);
-	writel(val, &xhci->ir_set->erst_size);
-
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Set ERST entries to point to event ring.");
-	/* set the segment table base address */
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Set ERST base address for ir_set 0 = 0x%llx",
-			(unsigned long long)xhci->erst.erst_dma_addr);
-	val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
-	val_64 &= ERST_PTR_MASK;
-	val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
-	xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
-
-	/* Set the event ring dequeue address */
-	xhci_set_hc_event_deq(xhci);
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"Wrote ERST address to ir_set 0.");
-	xhci_print_ir_set(xhci, 0);
-
 	/*
 	 * XXX: Might need to set the Interrupter Moderation Register to
 	 * something other than the default (~1ms minimum between interrupts).
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 129bb3f..fa1323b 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -14,6 +14,7 @@
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
+#include <linux/pci.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/usb/phy.h>
@@ -135,16 +136,72 @@
 MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
 #endif
 
+static ssize_t config_imod_store(struct device *pdev,
+		struct device_attribute *attr, const char *buff, size_t size)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(pdev);
+	struct xhci_hcd *xhci;
+	u32 temp;
+	u32 imod;
+	unsigned long flags;
+
+	if (kstrtouint(buff, 10, &imod) != 1)
+		return 0;
+
+	imod &= ER_IRQ_INTERVAL_MASK;
+	xhci = hcd_to_xhci(hcd);
+
+	if (xhci->shared_hcd->state == HC_STATE_SUSPENDED
+		&& hcd->state == HC_STATE_SUSPENDED)
+		return -EACCES;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	temp = readl_relaxed(&xhci->ir_set->irq_control);
+	temp &= ~ER_IRQ_INTERVAL_MASK;
+	temp |= imod;
+	writel_relaxed(temp, &xhci->ir_set->irq_control);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	return size;
+}
+
+static ssize_t config_imod_show(struct device *pdev,
+		struct device_attribute *attr, char *buff)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(pdev);
+	struct xhci_hcd *xhci;
+	u32 temp;
+	unsigned long flags;
+
+	xhci = hcd_to_xhci(hcd);
+
+	if (xhci->shared_hcd->state == HC_STATE_SUSPENDED
+		&& hcd->state == HC_STATE_SUSPENDED)
+		return -EACCES;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	temp = readl_relaxed(&xhci->ir_set->irq_control) &
+			ER_IRQ_INTERVAL_MASK;
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	return snprintf(buff, PAGE_SIZE, "%08u\n", temp);
+}
+
+static DEVICE_ATTR(config_imod, 0644, config_imod_show, config_imod_store);
+
 static int xhci_plat_probe(struct platform_device *pdev)
 {
 	const struct of_device_id *match;
 	const struct hc_driver	*driver;
+	struct device		*sysdev;
 	struct xhci_hcd		*xhci;
 	struct resource         *res;
 	struct usb_hcd		*hcd;
 	struct clk              *clk;
 	int			ret;
 	int			irq;
+	u32			temp, imod;
+	unsigned long		flags;
 
 	if (usb_disabled())
 		return -ENODEV;
@@ -155,25 +212,44 @@
 	if (irq < 0)
 		return -ENODEV;
 
+	/*
+	 * sysdev must point to a device that is known to the system firmware
+	 * or PCI hardware. We handle these three cases here:
+	 * 1. xhci_plat comes from firmware
+	 * 2. xhci_plat is child of a device from firmware (dwc3-plat)
+	 * 3. xhci_plat is grandchild of a pci device (dwc3-pci)
+	 */
+	sysdev = &pdev->dev;
+	if (sysdev->parent && !sysdev->of_node && sysdev->parent->of_node)
+		sysdev = sysdev->parent;
+#ifdef CONFIG_PCI
+	else if (sysdev->parent && sysdev->parent->parent &&
+		 sysdev->parent->parent->bus == &pci_bus_type)
+		sysdev = sysdev->parent->parent;
+#endif
+
 	/* Try to set 64-bit DMA first */
-	if (WARN_ON(!pdev->dev.dma_mask))
+	if (WARN_ON(!sysdev->dma_mask))
 		/* Platform did not initialize dma_mask */
-		ret = dma_coerce_mask_and_coherent(&pdev->dev,
+		ret = dma_coerce_mask_and_coherent(sysdev,
 						   DMA_BIT_MASK(64));
 	else
-		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+		ret = dma_set_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
 
 	/* If seting 64-bit DMA mask fails, fall back to 32-bit DMA mask */
 	if (ret) {
-		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+		ret = dma_set_mask_and_coherent(sysdev, DMA_BIT_MASK(32));
 		if (ret)
 			return ret;
 	}
 
-	hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+	hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
+			       dev_name(&pdev->dev), NULL);
 	if (!hcd)
 		return -ENOMEM;
 
+	hcd_to_bus(hcd)->skip_resume = true;
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	hcd->regs = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(hcd->regs)) {
@@ -222,17 +298,22 @@
 
 	xhci->clk = clk;
 	xhci->main_hcd = hcd;
-	xhci->shared_hcd = usb_create_shared_hcd(driver, &pdev->dev,
+	xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
 			dev_name(&pdev->dev), hcd);
 	if (!xhci->shared_hcd) {
 		ret = -ENOMEM;
 		goto disable_clk;
 	}
 
-	if (device_property_read_bool(&pdev->dev, "usb3-lpm-capable"))
+	hcd_to_bus(xhci->shared_hcd)->skip_resume = true;
+
+	if (device_property_read_bool(sysdev, "usb3-lpm-capable"))
 		xhci->quirks |= XHCI_LPM_SUPPORT;
 
-	hcd->usb_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0);
+	if (device_property_read_u32(sysdev, "snps,xhci-imod-value", &imod))
+		imod = 0;
+
+	hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
 	if (IS_ERR(hcd->usb_phy)) {
 		ret = PTR_ERR(hcd->usb_phy);
 		if (ret == -EPROBE_DEFER)
@@ -244,17 +325,34 @@
 			goto put_usb3_hcd;
 	}
 
-	ret = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_ONESHOT);
+	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
 	if (ret)
 		goto disable_usb_phy;
 
 	if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
 		xhci->shared_hcd->can_do_streams = 1;
 
-	ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED | IRQF_ONESHOT);
+	ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
 	if (ret)
 		goto dealloc_usb2_hcd;
 
+	/* override imod interval if specified */
+	if (imod) {
+		imod &= ER_IRQ_INTERVAL_MASK;
+		spin_lock_irqsave(&xhci->lock, flags);
+		temp = readl_relaxed(&xhci->ir_set->irq_control);
+		temp &= ~ER_IRQ_INTERVAL_MASK;
+		temp |= imod;
+		writel_relaxed(temp, &xhci->ir_set->irq_control);
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		dev_dbg(&pdev->dev, "%s: imod set to %u\n", __func__, imod);
+	}
+
+	ret = device_create_file(&pdev->dev, &dev_attr_config_imod);
+	if (ret)
+		dev_err(&pdev->dev, "%s: unable to create imod sysfs entry\n",
+					__func__);
+
 	pm_runtime_mark_last_busy(&pdev->dev);
 	pm_runtime_put_autosuspend(&pdev->dev);
 
@@ -289,6 +387,7 @@
 	pm_runtime_disable(&dev->dev);
 	xhci->xhc_state |= XHCI_STATE_REMOVING;
 
+	device_remove_file(&dev->dev, &dev_attr_config_imod);
 	usb_remove_hcd(xhci->shared_hcd);
 	usb_phy_shutdown(hcd->usb_phy);
 
@@ -329,7 +428,7 @@
 
 	dev_dbg(dev, "xhci-plat runtime suspend\n");
 
-	return xhci_suspend(xhci, true);
+	return 0;
 }
 
 static int xhci_plat_runtime_resume(struct device *dev)
@@ -343,7 +442,7 @@
 
 	dev_dbg(dev, "xhci-plat runtime resume\n");
 
-	ret = xhci_resume(xhci, false);
+	ret = 0;
 	pm_runtime_mark_last_busy(dev);
 
 	return ret;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index e7e9c07..5d434e0 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -675,7 +675,7 @@
 void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring,
 				 struct xhci_td *td)
 {
-	struct device *dev = xhci_to_hcd(xhci)->self.controller;
+	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
 	struct xhci_segment *seg = td->bounce_seg;
 	struct urb *urb = td->urb;
 
@@ -3153,7 +3153,7 @@
 static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
 			 u32 *trb_buff_len, struct xhci_segment *seg)
 {
-	struct device *dev = xhci_to_hcd(xhci)->self.controller;
+	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
 	unsigned int unalign;
 	unsigned int max_pkt;
 	u32 new_buff_len;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 34e23c7..e6e985d 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -231,6 +231,9 @@
 static int xhci_setup_msi(struct xhci_hcd *xhci)
 {
 	int ret;
+	/*
+	 * TODO:Check with MSI Soc for sysdev
+	 */
 	struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
 
 	ret = pci_enable_msi(pdev);
@@ -257,7 +260,7 @@
  */
 static void xhci_free_irq(struct xhci_hcd *xhci)
 {
-	struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+	struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.sysdev);
 	int ret;
 
 	/* return if using legacy interrupt */
@@ -743,7 +746,7 @@
 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 
 	if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
-		usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
+		usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
 
 	spin_lock_irq(&xhci->lock);
 	xhci_halt(xhci);
@@ -760,7 +763,7 @@
 
 	/* Yet another workaround for spurious wakeups at shutdown with HSW */
 	if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
-		pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot);
+		pci_set_power_state(to_pci_dev(hcd->self.sysdev), PCI_D3hot);
 }
 
 #ifdef CONFIG_PM
@@ -4821,7 +4824,11 @@
 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
 {
 	struct xhci_hcd		*xhci;
-	struct device		*dev = hcd->self.controller;
+	/*
+	 * TODO: Check with DWC3 clients for sysdev according to
+	 * quirks
+	 */
+	struct device		*dev = hcd->self.sysdev;
 	int			retval;
 
 	/* Accept arbitrarily long scatter-gather lists */
@@ -4942,6 +4949,61 @@
 }
 EXPORT_SYMBOL_GPL(xhci_gen_setup);
 
+dma_addr_t xhci_get_sec_event_ring_dma_addr(struct usb_hcd *hcd,
+	unsigned int intr_num)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	if (intr_num >= xhci->max_interrupters) {
+		xhci_err(xhci, "intr num %d >= max intrs %d\n", intr_num,
+			xhci->max_interrupters);
+		return 0;
+	}
+
+	if (!(xhci->xhc_state & XHCI_STATE_HALTED) &&
+		xhci->sec_event_ring && xhci->sec_event_ring[intr_num]
+		&& xhci->sec_event_ring[intr_num]->first_seg)
+		return xhci->sec_event_ring[intr_num]->first_seg->dma;
+
+	return 0;
+}
+
+dma_addr_t xhci_get_dcba_dma_addr(struct usb_hcd *hcd,
+	struct usb_device *udev)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	if (!(xhci->xhc_state & XHCI_STATE_HALTED) && xhci->dcbaa)
+		return xhci->dcbaa->dev_context_ptrs[udev->slot_id];
+
+	return 0;
+}
+
+dma_addr_t xhci_get_xfer_ring_dma_addr(struct usb_hcd *hcd,
+	struct usb_device *udev, struct usb_host_endpoint *ep)
+{
+	int ret;
+	unsigned int ep_index;
+	struct xhci_virt_device *virt_dev;
+
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
+	if (ret <= 0) {
+		xhci_err(xhci, "%s: invalid args\n", __func__);
+		return 0;
+	}
+
+	virt_dev = xhci->devs[udev->slot_id];
+	ep_index = xhci_get_endpoint_index(&ep->desc);
+
+	if (virt_dev->eps[ep_index].ring &&
+		virt_dev->eps[ep_index].ring->first_seg)
+		return virt_dev->eps[ep_index].ring->first_seg->dma;
+
+	return 0;
+}
+
 static const struct hc_driver xhci_hc_driver = {
 	.description =		"xhci-hcd",
 	.product_desc =		"xHCI Host Controller",
@@ -5001,6 +5063,11 @@
 	.enable_usb3_lpm_timeout =	xhci_enable_usb3_lpm_timeout,
 	.disable_usb3_lpm_timeout =	xhci_disable_usb3_lpm_timeout,
 	.find_raw_port_number =	xhci_find_raw_port_number,
+	.sec_event_ring_setup =		xhci_sec_event_ring_setup,
+	.sec_event_ring_cleanup =	xhci_sec_event_ring_cleanup,
+	.get_sec_event_ring_dma_addr =	xhci_get_sec_event_ring_dma_addr,
+	.get_xfer_ring_dma_addr =	xhci_get_xfer_ring_dma_addr,
+	.get_dcba_dma_addr =		xhci_get_dcba_dma_addr,
 };
 
 void xhci_init_driver(struct hc_driver *drv,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 5250c72..0fe91df 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1535,6 +1535,9 @@
 	/* Our HCD's current interrupter register set */
 	struct	xhci_intr_reg __iomem *ir_set;
 
+	/* secondary interrupter */
+	struct	xhci_intr_reg __iomem **sec_ir_set;
+
 	/* Cached register copies of read-only HC data */
 	__u32		hcs_params1;
 	__u32		hcs_params2;
@@ -1576,6 +1579,11 @@
 	struct xhci_command	*current_cmd;
 	struct xhci_ring	*event_ring;
 	struct xhci_erst	erst;
+
+	/* secondary event ring and erst */
+	struct xhci_ring	**sec_event_ring;
+	struct xhci_erst	*sec_erst;
+
 	/* Scratchpad */
 	struct xhci_scratchpad  *scratchpad;
 	/* Store LPM test failed devices' information */
@@ -1842,6 +1850,8 @@
 void xhci_urb_free_priv(struct urb_priv *urb_priv);
 void xhci_free_command(struct xhci_hcd *xhci,
 		struct xhci_command *command);
+int xhci_sec_event_ring_setup(struct usb_hcd *hcd, unsigned int intr_num);
+int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned int intr_num);
 
 /* xHCI host controller glue */
 typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 2975e80..9a67ae3 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -346,6 +346,9 @@
 	if (iface_desc->desc.bInterfaceClass != 0x0A)
 		return -ENODEV;
 
+	if (iface_desc->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	/* allocate memory for our device state and initialize it */
 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 	if (dev == NULL)
diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
index 7717651..d3d1247 100644
--- a/drivers/usb/misc/lvstest.c
+++ b/drivers/usb/misc/lvstest.c
@@ -366,6 +366,10 @@
 
 	hdev = interface_to_usbdev(intf);
 	desc = intf->cur_altsetting;
+
+	if (desc->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	endpoint = &desc->endpoint[0].desc;
 
 	/* valid only for SS root hub */
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 356d312..9ff6652 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -708,6 +708,11 @@
 
 	interface = intf->cur_altsetting;
 
+	if (interface->desc.bNumEndpoints < 3) {
+		usb_put_dev(usbdev);
+		return -ENODEV;
+	}
+
 	/*
 	 * Allocate parport interface 
 	 */
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 338575f..358feca 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2467,8 +2467,8 @@
 	pm_runtime_get_sync(musb->controller);
 	musb_host_cleanup(musb);
 	musb_gadget_cleanup(musb);
-	spin_lock_irqsave(&musb->lock, flags);
 	musb_platform_disable(musb);
+	spin_lock_irqsave(&musb->lock, flags);
 	musb_generic_disable(musb);
 	spin_unlock_irqrestore(&musb->lock, flags);
 	musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index d4d7c56..cb443df 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -232,8 +232,27 @@
 			transferred < cppi41_channel->packet_sz)
 		cppi41_channel->prog_len = 0;
 
-	if (cppi41_channel->is_tx)
-		empty = musb_is_tx_fifo_empty(hw_ep);
+	if (cppi41_channel->is_tx) {
+		u8 type;
+
+		if (is_host_active(musb))
+			type = hw_ep->out_qh->type;
+		else
+			type = hw_ep->ep_in.type;
+
+		if (type == USB_ENDPOINT_XFER_ISOC)
+			/*
+			 * Don't use the early-TX-interrupt workaround below
+			 * for Isoch transfter. Since Isoch are periodic
+			 * transfer, by the time the next transfer is
+			 * scheduled, the current one should be done already.
+			 *
+			 * This avoids audio playback underrun issue.
+			 */
+			empty = true;
+		else
+			empty = musb_is_tx_fifo_empty(hw_ep);
+	}
 
 	if (!cppi41_channel->is_tx || empty) {
 		cppi41_trans_done(cppi41_channel);
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index 58eb287..c59e33f 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -33,12 +33,8 @@
 #define QUSB2PHY_PLL_COMMON_STATUS_ONE	0x1A0
 #define CORE_READY_STATUS		BIT(0)
 
-/* In case Efuse register shows zero, use this value */
-#define TUNE2_DEFAULT_HIGH_NIBBLE	0xB
-#define TUNE2_DEFAULT_LOW_NIBBLE	0x3
-
-/* Get TUNE2's high nibble value read from efuse */
-#define TUNE2_HIGH_NIBBLE_VAL(val, pos, mask)	((val >> pos) & mask)
+/* Get TUNE value from efuse bit-mask */
+#define TUNE_VAL_MASK(val, pos, mask)	((val >> pos) & mask)
 
 #define QUSB2PHY_INTR_CTRL		0x22C
 #define DMSE_INTR_HIGH_SEL              BIT(4)
@@ -51,7 +47,8 @@
 #define DMSE_INTERRUPT			BIT(1)
 #define DPSE_INTERRUPT			BIT(0)
 
-#define QUSB2PHY_PORT_TUNE2		0x240
+#define QUSB2PHY_PORT_TUNE1		0x23c
+#define QUSB2PHY_TEST1			0x24C
 
 #define QUSB2PHY_1P8_VOL_MIN           1800000 /* uV */
 #define QUSB2PHY_1P8_VOL_MAX           1800000 /* uV */
@@ -64,14 +61,14 @@
 #define LINESTATE_DP			BIT(0)
 #define LINESTATE_DM			BIT(1)
 
-unsigned int phy_tune2;
-module_param(phy_tune2, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(phy_tune2, "QUSB PHY v2 TUNE2");
+unsigned int phy_tune1;
+module_param(phy_tune1, uint, 0644);
+MODULE_PARM_DESC(phy_tune1, "QUSB PHY v2 TUNE1");
 
 struct qusb_phy {
 	struct usb_phy		phy;
 	void __iomem		*base;
-	void __iomem		*tune2_efuse_reg;
+	void __iomem		*efuse_reg;
 
 	struct clk		*ref_clk_src;
 	struct clk		*ref_clk;
@@ -87,9 +84,9 @@
 	int			host_init_seq_len;
 	int			*qusb_phy_host_init_seq;
 
-	u32			tune2_val;
-	int			tune2_efuse_bit_pos;
-	int			tune2_efuse_num_of_bits;
+	u32			tune_val;
+	int			efuse_bit_pos;
+	int			efuse_num_of_bits;
 
 	bool			power_enabled;
 	bool			clocks_enabled;
@@ -118,15 +115,23 @@
 
 	if (!qphy->clocks_enabled && on) {
 		clk_prepare_enable(qphy->ref_clk_src);
-		clk_prepare_enable(qphy->ref_clk);
-		clk_prepare_enable(qphy->cfg_ahb_clk);
+		if (qphy->ref_clk)
+			clk_prepare_enable(qphy->ref_clk);
+
+		if (qphy->cfg_ahb_clk)
+			clk_prepare_enable(qphy->cfg_ahb_clk);
+
 		qphy->clocks_enabled = true;
 	}
 
 	if (qphy->clocks_enabled && !on) {
-		clk_disable_unprepare(qphy->ref_clk);
+		if (qphy->cfg_ahb_clk)
+			clk_disable_unprepare(qphy->cfg_ahb_clk);
+
+		if (qphy->ref_clk)
+			clk_disable_unprepare(qphy->ref_clk);
+
 		clk_disable_unprepare(qphy->ref_clk_src);
-		clk_disable_unprepare(qphy->cfg_ahb_clk);
 		qphy->clocks_enabled = false;
 	}
 
@@ -280,40 +285,35 @@
 	return ret;
 }
 
-static void qusb_phy_get_tune2_param(struct qusb_phy *qphy)
+static void qusb_phy_get_tune1_param(struct qusb_phy *qphy)
 {
-	u8 num_of_bits;
+	u8 reg;
 	u32 bit_mask = 1;
 
 	pr_debug("%s(): num_of_bits:%d bit_pos:%d\n", __func__,
-				qphy->tune2_efuse_num_of_bits,
-				qphy->tune2_efuse_bit_pos);
+				qphy->efuse_num_of_bits,
+				qphy->efuse_bit_pos);
 
 	/* get bit mask based on number of bits to use with efuse reg */
-	if (qphy->tune2_efuse_num_of_bits) {
-		num_of_bits = qphy->tune2_efuse_num_of_bits;
-		bit_mask = (bit_mask << num_of_bits) - 1;
-	}
+	bit_mask = (bit_mask << qphy->efuse_num_of_bits) - 1;
 
 	/*
-	 * Read EFUSE register having TUNE2 parameter's high nibble.
-	 * If efuse register shows value as 0x0, then use default value
-	 * as 0xB as high nibble. Otherwise use efuse register based
-	 * value for this purpose.
+	 * if efuse reg is updated (i.e non-zero) then use it to program
+	 * tune parameters
 	 */
-	qphy->tune2_val = readl_relaxed(qphy->tune2_efuse_reg);
-	pr_debug("%s(): bit_mask:%d efuse based tune2 value:%d\n",
-				__func__, bit_mask, qphy->tune2_val);
+	qphy->tune_val = readl_relaxed(qphy->efuse_reg);
+	pr_debug("%s(): bit_mask:%d efuse based tune1 value:%d\n",
+				__func__, bit_mask, qphy->tune_val);
 
-	qphy->tune2_val = TUNE2_HIGH_NIBBLE_VAL(qphy->tune2_val,
-				qphy->tune2_efuse_bit_pos, bit_mask);
+	qphy->tune_val = TUNE_VAL_MASK(qphy->tune_val,
+				qphy->efuse_bit_pos, bit_mask);
+	reg = readb_relaxed(qphy->base + QUSB2PHY_PORT_TUNE1);
+	if (qphy->tune_val) {
+		reg = reg & 0x0f;
+		reg |= (qphy->tune_val << 4);
+	}
 
-	if (!qphy->tune2_val)
-		qphy->tune2_val = TUNE2_DEFAULT_HIGH_NIBBLE;
-
-	/* Get TUNE2 byte value using high and low nibble value */
-	qphy->tune2_val = ((qphy->tune2_val << 0x4) |
-					TUNE2_DEFAULT_LOW_NIBBLE);
+	qphy->tune_val = reg;
 }
 
 static void qusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt,
@@ -419,27 +419,22 @@
 	if (qphy->qusb_phy_init_seq)
 		qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq,
 				qphy->init_seq_len, 0);
-	/*
-	 * Check for EFUSE value only if tune2_efuse_reg is available
-	 * and try to read EFUSE value only once i.e. not every USB
-	 * cable connect case.
-	 */
-	if (qphy->tune2_efuse_reg) {
-		if (!qphy->tune2_val)
-			qusb_phy_get_tune2_param(qphy);
+	if (qphy->efuse_reg) {
+		if (!qphy->tune_val)
+			qusb_phy_get_tune1_param(qphy);
 
-		pr_debug("%s(): Programming TUNE2 parameter as:%x\n", __func__,
-				qphy->tune2_val);
-		writel_relaxed(qphy->tune2_val,
-				qphy->base + QUSB2PHY_PORT_TUNE2);
+		pr_debug("%s(): Programming TUNE1 parameter as:%x\n", __func__,
+				qphy->tune_val);
+		writel_relaxed(qphy->tune_val,
+				qphy->base + QUSB2PHY_PORT_TUNE1);
 	}
 
-	/* If phy_tune2 modparam set, override tune2 value */
-	if (phy_tune2) {
-		pr_debug("%s(): (modparam) TUNE2 val:0x%02x\n",
-						__func__, phy_tune2);
-		writel_relaxed(phy_tune2,
-				qphy->base + QUSB2PHY_PORT_TUNE2);
+	/* If phy_tune1 modparam set, override tune1 value */
+	if (phy_tune1) {
+		pr_debug("%s(): (modparam) TUNE1 val:0x%02x\n",
+						__func__, phy_tune1);
+		writel_relaxed(phy_tune1,
+				qphy->base + QUSB2PHY_PORT_TUNE1);
 	}
 
 	/* ensure above writes are completed before re-enabling PHY */
@@ -542,6 +537,14 @@
 			writel_relaxed(intr_mask,
 				qphy->base + QUSB2PHY_INTR_CTRL);
 
+			/* enable phy auto-resume */
+			writel_relaxed(0x91,
+					qphy->base + QUSB2PHY_TEST1);
+			/* flush the previous write before next write */
+			wmb();
+			writel_relaxed(0x90,
+				qphy->base + QUSB2PHY_TEST1);
+
 			dev_dbg(phy->dev, "%s: intr_mask = %x\n",
 			__func__, intr_mask);
 
@@ -722,37 +725,50 @@
 	}
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-							"tune2_efuse_addr");
+							"efuse_addr");
 	if (res) {
-		qphy->tune2_efuse_reg = devm_ioremap_nocache(dev, res->start,
+		qphy->efuse_reg = devm_ioremap_nocache(dev, res->start,
 							resource_size(res));
-		if (!IS_ERR_OR_NULL(qphy->tune2_efuse_reg)) {
+		if (!IS_ERR_OR_NULL(qphy->efuse_reg)) {
 			ret = of_property_read_u32(dev->of_node,
-					"qcom,tune2-efuse-bit-pos",
-					&qphy->tune2_efuse_bit_pos);
+					"qcom,efuse-bit-pos",
+					&qphy->efuse_bit_pos);
 			if (!ret) {
 				ret = of_property_read_u32(dev->of_node,
-						"qcom,tune2-efuse-num-bits",
-						&qphy->tune2_efuse_num_of_bits);
+						"qcom,efuse-num-bits",
+						&qphy->efuse_num_of_bits);
 			}
 
 			if (ret) {
 				dev_err(dev,
-				"DT Value for tune2 efuse is invalid.\n");
+				"DT Value for efuse is invalid.\n");
 				return -EINVAL;
 			}
 		}
 	}
 
+	/* ref_clk_src is needed irrespective of SE_CLK or DIFF_CLK usage */
 	qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
-	if (IS_ERR(qphy->ref_clk_src))
+	if (IS_ERR(qphy->ref_clk_src)) {
 		dev_dbg(dev, "clk get failed for ref_clk_src\n");
+		ret = PTR_ERR(qphy->ref_clk_src);
+		return ret;
+	}
 
-	qphy->ref_clk = devm_clk_get(dev, "ref_clk");
-	if (IS_ERR(qphy->ref_clk))
-		dev_dbg(dev, "clk get failed for ref_clk\n");
-	else
+	/* ref_clk is needed only for DIFF_CLK case, hence make it optional. */
+	if (of_property_match_string(pdev->dev.of_node,
+				"clock-names", "ref_clk") >= 0) {
+		qphy->ref_clk = devm_clk_get(dev, "ref_clk");
+		if (IS_ERR(qphy->ref_clk)) {
+			ret = PTR_ERR(qphy->ref_clk);
+			if (ret != -EPROBE_DEFER)
+				dev_dbg(dev,
+					"clk get failed for ref_clk\n");
+			return ret;
+		}
+
 		clk_set_rate(qphy->ref_clk, 19200000);
+	}
 
 	if (of_property_match_string(pdev->dev.of_node,
 				"clock-names", "cfg_ahb_clk") >= 0) {
@@ -933,14 +949,7 @@
 	struct qusb_phy *qphy = platform_get_drvdata(pdev);
 
 	usb_remove_phy(&qphy->phy);
-
-	if (qphy->clocks_enabled) {
-		clk_disable_unprepare(qphy->cfg_ahb_clk);
-		clk_disable_unprepare(qphy->ref_clk);
-		clk_disable_unprepare(qphy->ref_clk_src);
-		qphy->clocks_enabled = false;
-	}
-
+	qusb_phy_enable_clocks(qphy, false);
 	qusb_phy_enable_power(qphy, false, true);
 
 	return 0;
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index d2afcc1..ee521a0 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -84,6 +84,7 @@
 	struct clk		*ref_clk_src;
 	struct clk		*ref_clk;
 	struct clk		*aux_clk;
+	struct clk		*com_aux_clk;
 	struct clk		*cfg_ahb_clk;
 	struct clk		*pipe_clk;
 	struct reset_control	*phy_reset;
@@ -114,6 +115,8 @@
 };
 MODULE_DEVICE_TABLE(of, msm_usb_id_table);
 
+static void msm_ssphy_qmp_enable_clks(struct msm_ssphy_qmp *phy, bool on);
+
 static inline char *get_cable_status_str(struct msm_ssphy_qmp *phy)
 {
 	return phy->cable_connected ? "connected" : "disconnected";
@@ -292,17 +295,7 @@
 		return ret;
 	}
 
-	if (!phy->clk_enabled) {
-		if (phy->ref_clk_src)
-			clk_prepare_enable(phy->ref_clk_src);
-		if (phy->ref_clk)
-			clk_prepare_enable(phy->ref_clk);
-		clk_prepare_enable(phy->aux_clk);
-		clk_prepare_enable(phy->cfg_ahb_clk);
-		clk_set_rate(phy->pipe_clk, 125000000);
-		clk_prepare_enable(phy->pipe_clk);
-		phy->clk_enabled = true;
-	}
+	msm_ssphy_qmp_enable_clks(phy, true);
 
 	writel_relaxed(0x01,
 		phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
@@ -469,29 +462,13 @@
 		/* Make sure above write completed with PHY */
 		wmb();
 
-		clk_disable_unprepare(phy->cfg_ahb_clk);
-		clk_disable_unprepare(phy->aux_clk);
-		clk_disable_unprepare(phy->pipe_clk);
-		if (phy->ref_clk)
-			clk_disable_unprepare(phy->ref_clk);
-		if (phy->ref_clk_src)
-			clk_disable_unprepare(phy->ref_clk_src);
-		phy->clk_enabled = false;
+		msm_ssphy_qmp_enable_clks(phy, false);
 		phy->in_suspend = true;
 		msm_ssphy_power_enable(phy, 0);
 		dev_dbg(uphy->dev, "QMP PHY is suspend\n");
 	} else {
 		msm_ssphy_power_enable(phy, 1);
-		clk_prepare_enable(phy->pipe_clk);
-		if (!phy->clk_enabled) {
-			if (phy->ref_clk_src)
-				clk_prepare_enable(phy->ref_clk_src);
-			if (phy->ref_clk)
-				clk_prepare_enable(phy->ref_clk);
-			clk_prepare_enable(phy->aux_clk);
-			clk_prepare_enable(phy->cfg_ahb_clk);
-			phy->clk_enabled = true;
-		}
+		msm_ssphy_qmp_enable_clks(phy, true);
 		if (!phy->cable_connected) {
 			writel_relaxed(0x01,
 			phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
@@ -533,16 +510,9 @@
 	return 0;
 }
 
-static int msm_ssphy_qmp_probe(struct platform_device *pdev)
+static int msm_ssphy_qmp_get_clks(struct msm_ssphy_qmp *phy, struct device *dev)
 {
-	struct msm_ssphy_qmp *phy;
-	struct device *dev = &pdev->dev;
-	struct resource *res;
-	int ret = 0, size = 0, len;
-
-	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
-	if (!phy)
-		return -ENOMEM;
+	int ret = 0;
 
 	phy->aux_clk = devm_clk_get(dev, "aux_clk");
 	if (IS_ERR(phy->aux_clk)) {
@@ -552,11 +522,10 @@
 			dev_err(dev, "failed to get aux_clk\n");
 		goto err;
 	}
-
 	clk_set_rate(phy->aux_clk, clk_round_rate(phy->aux_clk, ULONG_MAX));
 
-	if (of_property_match_string(pdev->dev.of_node,
-				"clock-names", "cfg_ahb_clk") >= 0) {
+	if (of_property_match_string(dev->of_node,
+			"clock-names", "cfg_ahb_clk") >= 0) {
 		phy->cfg_ahb_clk = devm_clk_get(dev, "cfg_ahb_clk");
 		if (IS_ERR(phy->cfg_ahb_clk)) {
 			ret = PTR_ERR(phy->cfg_ahb_clk);
@@ -576,6 +545,88 @@
 		goto err;
 	}
 
+	phy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
+	if (IS_ERR(phy->ref_clk_src))
+		phy->ref_clk_src = NULL;
+
+	phy->ref_clk = devm_clk_get(dev, "ref_clk");
+	if (IS_ERR(phy->ref_clk))
+		phy->ref_clk = NULL;
+
+	if (of_property_match_string(dev->of_node,
+			"clock-names", "com_aux_clk") >= 0) {
+		phy->com_aux_clk = devm_clk_get(dev, "com_aux_clk");
+		if (IS_ERR(phy->com_aux_clk)) {
+			ret = PTR_ERR(phy->com_aux_clk);
+			if (ret != -EPROBE_DEFER)
+				dev_err(dev,
+				"failed to get com_aux_clk ret %d\n", ret);
+			goto err;
+		}
+	}
+
+err:
+	return ret;
+}
+
+static void msm_ssphy_qmp_enable_clks(struct msm_ssphy_qmp *phy, bool on)
+{
+	dev_dbg(phy->phy.dev, "%s(): clk_enabled:%d on:%d\n", __func__,
+					phy->clk_enabled, on);
+
+	if (!phy->clk_enabled && on) {
+		if (phy->ref_clk_src)
+			clk_prepare_enable(phy->ref_clk_src);
+
+		if (phy->ref_clk)
+			clk_prepare_enable(phy->ref_clk);
+
+		if (phy->com_aux_clk)
+			clk_prepare_enable(phy->com_aux_clk);
+
+		clk_prepare_enable(phy->aux_clk);
+		if (phy->cfg_ahb_clk)
+			clk_prepare_enable(phy->cfg_ahb_clk);
+
+		clk_prepare_enable(phy->pipe_clk);
+		phy->clk_enabled = true;
+	}
+
+	if (phy->clk_enabled && !on) {
+		clk_disable_unprepare(phy->pipe_clk);
+
+		if (phy->cfg_ahb_clk)
+			clk_disable_unprepare(phy->cfg_ahb_clk);
+
+		clk_disable_unprepare(phy->aux_clk);
+		if (phy->com_aux_clk)
+			clk_disable_unprepare(phy->com_aux_clk);
+
+		if (phy->ref_clk)
+			clk_disable_unprepare(phy->ref_clk);
+
+		if (phy->ref_clk_src)
+			clk_disable_unprepare(phy->ref_clk_src);
+
+		phy->clk_enabled = false;
+	}
+}
+
+static int msm_ssphy_qmp_probe(struct platform_device *pdev)
+{
+	struct msm_ssphy_qmp *phy;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	int ret = 0, size = 0, len;
+
+	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	ret = msm_ssphy_qmp_get_clks(phy, dev);
+	if (ret)
+		goto err;
+
 	phy->phy_reset = devm_reset_control_get(dev, "phy_reset");
 	if (IS_ERR(phy->phy_reset)) {
 		ret = PTR_ERR(phy->phy_reset);
@@ -726,13 +777,6 @@
 		goto err;
 	}
 
-	phy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
-	if (IS_ERR(phy->ref_clk_src))
-		phy->ref_clk_src = NULL;
-	phy->ref_clk = devm_clk_get(dev, "ref_clk");
-	if (IS_ERR(phy->ref_clk))
-		phy->ref_clk = NULL;
-
 	platform_set_drvdata(pdev, phy);
 
 	if (of_property_read_bool(dev->of_node, "qcom,vbus-valid-override"))
@@ -760,14 +804,8 @@
 		return 0;
 
 	usb_remove_phy(&phy->phy);
-	if (phy->ref_clk)
-		clk_disable_unprepare(phy->ref_clk);
-	if (phy->ref_clk_src)
-		clk_disable_unprepare(phy->ref_clk_src);
+	msm_ssphy_qmp_enable_clks(phy, false);
 	msm_ssusb_qmp_ldo_enable(phy, 0);
-	clk_disable_unprepare(phy->aux_clk);
-	clk_disable_unprepare(phy->cfg_ahb_clk);
-	clk_disable_unprepare(phy->pipe_clk);
 	kfree(phy);
 	return 0;
 }
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 42cc72e..af67a0d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -233,6 +233,14 @@
 #define BANDRICH_PRODUCT_1012			0x1012
 
 #define QUALCOMM_VENDOR_ID			0x05C6
+/* These Quectel products use Qualcomm's vendor ID */
+#define QUECTEL_PRODUCT_UC20			0x9003
+#define QUECTEL_PRODUCT_UC15			0x9090
+
+#define QUECTEL_VENDOR_ID			0x2c7c
+/* These Quectel products use Quectel's vendor ID */
+#define QUECTEL_PRODUCT_EC21			0x0121
+#define QUECTEL_PRODUCT_EC25			0x0125
 
 #define CMOTECH_VENDOR_ID			0x16d8
 #define CMOTECH_PRODUCT_6001			0x6001
@@ -1161,7 +1169,14 @@
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
-	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
+	/* Quectel products using Qualcomm vendor ID */
+	{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
+	{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
+	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	/* Quectel products using Quectel vendor ID */
+	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
+	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 696458d..38b3f0d 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -169,6 +169,8 @@
 	{DEVICE_SWI(0x413c, 0x81a9)},	/* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
 	{DEVICE_SWI(0x413c, 0x81b1)},	/* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
 	{DEVICE_SWI(0x413c, 0x81b3)},	/* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
+	{DEVICE_SWI(0x413c, 0x81b5)},	/* Dell Wireless 5811e QDL */
+	{DEVICE_SWI(0x413c, 0x81b6)},	/* Dell Wireless 5811e QDL */
 
 	/* Huawei devices */
 	{DEVICE_HWI(0x03f0, 0x581d)},	/* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
index 252c7bd..d01496f 100644
--- a/drivers/usb/wusbcore/wa-hc.c
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -39,6 +39,9 @@
 	int result;
 	struct device *dev = &iface->dev;
 
+	if (iface->cur_altsetting->desc.bNumEndpoints < 3)
+		return -ENODEV;
+
 	result = wa_rpipes_create(wa);
 	if (result < 0)
 		goto error_rpipes_create;
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 0aa6c3c..35a1e77 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -823,6 +823,9 @@
 	struct hwarc *hwarc;
 	struct device *dev = &iface->dev;
 
+	if (iface->cur_altsetting->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	result = -ENOMEM;
 	uwb_rc = uwb_rc_alloc();
 	if (uwb_rc == NULL) {
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
index 2bfc846..6345e85 100644
--- a/drivers/uwb/i1480/dfu/usb.c
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -362,6 +362,9 @@
 				 result);
 	}
 
+	if (iface->cur_altsetting->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	result = -ENOMEM;
 	i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL);
 	if (i1480_usb == NULL) {
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 80378dd..c882357 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -31,49 +31,49 @@
 static void tce_iommu_detach_group(void *iommu_data,
 		struct iommu_group *iommu_group);
 
-static long try_increment_locked_vm(long npages)
+static long try_increment_locked_vm(struct mm_struct *mm, long npages)
 {
 	long ret = 0, locked, lock_limit;
 
-	if (!current || !current->mm)
-		return -ESRCH; /* process exited */
+	if (WARN_ON_ONCE(!mm))
+		return -EPERM;
 
 	if (!npages)
 		return 0;
 
-	down_write(&current->mm->mmap_sem);
-	locked = current->mm->locked_vm + npages;
+	down_write(&mm->mmap_sem);
+	locked = mm->locked_vm + npages;
 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 	if (locked > lock_limit && !capable(CAP_IPC_LOCK))
 		ret = -ENOMEM;
 	else
-		current->mm->locked_vm += npages;
+		mm->locked_vm += npages;
 
 	pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
 			npages << PAGE_SHIFT,
-			current->mm->locked_vm << PAGE_SHIFT,
+			mm->locked_vm << PAGE_SHIFT,
 			rlimit(RLIMIT_MEMLOCK),
 			ret ? " - exceeded" : "");
 
-	up_write(&current->mm->mmap_sem);
+	up_write(&mm->mmap_sem);
 
 	return ret;
 }
 
-static void decrement_locked_vm(long npages)
+static void decrement_locked_vm(struct mm_struct *mm, long npages)
 {
-	if (!current || !current->mm || !npages)
-		return; /* process exited */
+	if (!mm || !npages)
+		return;
 
-	down_write(&current->mm->mmap_sem);
-	if (WARN_ON_ONCE(npages > current->mm->locked_vm))
-		npages = current->mm->locked_vm;
-	current->mm->locked_vm -= npages;
+	down_write(&mm->mmap_sem);
+	if (WARN_ON_ONCE(npages > mm->locked_vm))
+		npages = mm->locked_vm;
+	mm->locked_vm -= npages;
 	pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
 			npages << PAGE_SHIFT,
-			current->mm->locked_vm << PAGE_SHIFT,
+			mm->locked_vm << PAGE_SHIFT,
 			rlimit(RLIMIT_MEMLOCK));
-	up_write(&current->mm->mmap_sem);
+	up_write(&mm->mmap_sem);
 }
 
 /*
@@ -89,6 +89,15 @@
 };
 
 /*
+ * A container needs to remember which preregistered region  it has
+ * referenced to do proper cleanup at the userspace process exit.
+ */
+struct tce_iommu_prereg {
+	struct list_head next;
+	struct mm_iommu_table_group_mem_t *mem;
+};
+
+/*
  * The container descriptor supports only a single group per container.
  * Required by the API as the container is not supplied with the IOMMU group
  * at the moment of initialization.
@@ -97,24 +106,68 @@
 	struct mutex lock;
 	bool enabled;
 	bool v2;
+	bool def_window_pending;
 	unsigned long locked_pages;
+	struct mm_struct *mm;
 	struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
 	struct list_head group_list;
+	struct list_head prereg_list;
 };
 
+static long tce_iommu_mm_set(struct tce_container *container)
+{
+	if (container->mm) {
+		if (container->mm == current->mm)
+			return 0;
+		return -EPERM;
+	}
+	BUG_ON(!current->mm);
+	container->mm = current->mm;
+	atomic_inc(&container->mm->mm_count);
+
+	return 0;
+}
+
+static long tce_iommu_prereg_free(struct tce_container *container,
+		struct tce_iommu_prereg *tcemem)
+{
+	long ret;
+
+	ret = mm_iommu_put(container->mm, tcemem->mem);
+	if (ret)
+		return ret;
+
+	list_del(&tcemem->next);
+	kfree(tcemem);
+
+	return 0;
+}
+
 static long tce_iommu_unregister_pages(struct tce_container *container,
 		__u64 vaddr, __u64 size)
 {
 	struct mm_iommu_table_group_mem_t *mem;
+	struct tce_iommu_prereg *tcemem;
+	bool found = false;
 
 	if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
 		return -EINVAL;
 
-	mem = mm_iommu_find(vaddr, size >> PAGE_SHIFT);
+	mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT);
 	if (!mem)
 		return -ENOENT;
 
-	return mm_iommu_put(mem);
+	list_for_each_entry(tcemem, &container->prereg_list, next) {
+		if (tcemem->mem == mem) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return -ENOENT;
+
+	return tce_iommu_prereg_free(container, tcemem);
 }
 
 static long tce_iommu_register_pages(struct tce_container *container,
@@ -122,22 +175,36 @@
 {
 	long ret = 0;
 	struct mm_iommu_table_group_mem_t *mem = NULL;
+	struct tce_iommu_prereg *tcemem;
 	unsigned long entries = size >> PAGE_SHIFT;
 
 	if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
 			((vaddr + size) < vaddr))
 		return -EINVAL;
 
-	ret = mm_iommu_get(vaddr, entries, &mem);
+	mem = mm_iommu_find(container->mm, vaddr, entries);
+	if (mem) {
+		list_for_each_entry(tcemem, &container->prereg_list, next) {
+			if (tcemem->mem == mem)
+				return -EBUSY;
+		}
+	}
+
+	ret = mm_iommu_get(container->mm, vaddr, entries, &mem);
 	if (ret)
 		return ret;
 
+	tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
+	tcemem->mem = mem;
+	list_add(&tcemem->next, &container->prereg_list);
+
 	container->enabled = true;
 
 	return 0;
 }
 
-static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
+static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl,
+		struct mm_struct *mm)
 {
 	unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
 			tbl->it_size, PAGE_SIZE);
@@ -146,13 +213,13 @@
 
 	BUG_ON(tbl->it_userspace);
 
-	ret = try_increment_locked_vm(cb >> PAGE_SHIFT);
+	ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT);
 	if (ret)
 		return ret;
 
 	uas = vzalloc(cb);
 	if (!uas) {
-		decrement_locked_vm(cb >> PAGE_SHIFT);
+		decrement_locked_vm(mm, cb >> PAGE_SHIFT);
 		return -ENOMEM;
 	}
 	tbl->it_userspace = uas;
@@ -160,7 +227,8 @@
 	return 0;
 }
 
-static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
+static void tce_iommu_userspace_view_free(struct iommu_table *tbl,
+		struct mm_struct *mm)
 {
 	unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
 			tbl->it_size, PAGE_SIZE);
@@ -170,7 +238,7 @@
 
 	vfree(tbl->it_userspace);
 	tbl->it_userspace = NULL;
-	decrement_locked_vm(cb >> PAGE_SHIFT);
+	decrement_locked_vm(mm, cb >> PAGE_SHIFT);
 }
 
 static bool tce_page_is_contained(struct page *page, unsigned page_shift)
@@ -230,9 +298,6 @@
 	struct iommu_table_group *table_group;
 	struct tce_iommu_group *tcegrp;
 
-	if (!current->mm)
-		return -ESRCH; /* process exited */
-
 	if (container->enabled)
 		return -EBUSY;
 
@@ -277,8 +342,12 @@
 	if (!table_group->tce32_size)
 		return -EPERM;
 
+	ret = tce_iommu_mm_set(container);
+	if (ret)
+		return ret;
+
 	locked = table_group->tce32_size >> PAGE_SHIFT;
-	ret = try_increment_locked_vm(locked);
+	ret = try_increment_locked_vm(container->mm, locked);
 	if (ret)
 		return ret;
 
@@ -296,10 +365,8 @@
 
 	container->enabled = false;
 
-	if (!current->mm)
-		return;
-
-	decrement_locked_vm(container->locked_pages);
+	BUG_ON(!container->mm);
+	decrement_locked_vm(container->mm, container->locked_pages);
 }
 
 static void *tce_iommu_open(unsigned long arg)
@@ -317,6 +384,7 @@
 
 	mutex_init(&container->lock);
 	INIT_LIST_HEAD_RCU(&container->group_list);
+	INIT_LIST_HEAD_RCU(&container->prereg_list);
 
 	container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
 
@@ -326,7 +394,8 @@
 static int tce_iommu_clear(struct tce_container *container,
 		struct iommu_table *tbl,
 		unsigned long entry, unsigned long pages);
-static void tce_iommu_free_table(struct iommu_table *tbl);
+static void tce_iommu_free_table(struct tce_container *container,
+		struct iommu_table *tbl);
 
 static void tce_iommu_release(void *iommu_data)
 {
@@ -351,10 +420,20 @@
 			continue;
 
 		tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
-		tce_iommu_free_table(tbl);
+		tce_iommu_free_table(container, tbl);
+	}
+
+	while (!list_empty(&container->prereg_list)) {
+		struct tce_iommu_prereg *tcemem;
+
+		tcemem = list_first_entry(&container->prereg_list,
+				struct tce_iommu_prereg, next);
+		WARN_ON_ONCE(tce_iommu_prereg_free(container, tcemem));
 	}
 
 	tce_iommu_disable(container);
+	if (container->mm)
+		mmdrop(container->mm);
 	mutex_destroy(&container->lock);
 
 	kfree(container);
@@ -369,13 +448,14 @@
 	put_page(page);
 }
 
-static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
+static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
+		unsigned long tce, unsigned long size,
 		unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
 {
 	long ret = 0;
 	struct mm_iommu_table_group_mem_t *mem;
 
-	mem = mm_iommu_lookup(tce, size);
+	mem = mm_iommu_lookup(container->mm, tce, size);
 	if (!mem)
 		return -EINVAL;
 
@@ -388,18 +468,18 @@
 	return 0;
 }
 
-static void tce_iommu_unuse_page_v2(struct iommu_table *tbl,
-		unsigned long entry)
+static void tce_iommu_unuse_page_v2(struct tce_container *container,
+		struct iommu_table *tbl, unsigned long entry)
 {
 	struct mm_iommu_table_group_mem_t *mem = NULL;
 	int ret;
 	unsigned long hpa = 0;
 	unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
 
-	if (!pua || !current || !current->mm)
+	if (!pua)
 		return;
 
-	ret = tce_iommu_prereg_ua_to_hpa(*pua, IOMMU_PAGE_SIZE(tbl),
+	ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl),
 			&hpa, &mem);
 	if (ret)
 		pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
@@ -429,7 +509,7 @@
 			continue;
 
 		if (container->v2) {
-			tce_iommu_unuse_page_v2(tbl, entry);
+			tce_iommu_unuse_page_v2(container, tbl, entry);
 			continue;
 		}
 
@@ -509,13 +589,19 @@
 	unsigned long hpa;
 	enum dma_data_direction dirtmp;
 
+	if (!tbl->it_userspace) {
+		ret = tce_iommu_userspace_view_alloc(tbl, container->mm);
+		if (ret)
+			return ret;
+	}
+
 	for (i = 0; i < pages; ++i) {
 		struct mm_iommu_table_group_mem_t *mem = NULL;
 		unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
 				entry + i);
 
-		ret = tce_iommu_prereg_ua_to_hpa(tce, IOMMU_PAGE_SIZE(tbl),
-				&hpa, &mem);
+		ret = tce_iommu_prereg_ua_to_hpa(container,
+				tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
 		if (ret)
 			break;
 
@@ -536,7 +622,7 @@
 		ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
 		if (ret) {
 			/* dirtmp cannot be DMA_NONE here */
-			tce_iommu_unuse_page_v2(tbl, entry + i);
+			tce_iommu_unuse_page_v2(container, tbl, entry + i);
 			pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
 					__func__, entry << tbl->it_page_shift,
 					tce, ret);
@@ -544,7 +630,7 @@
 		}
 
 		if (dirtmp != DMA_NONE)
-			tce_iommu_unuse_page_v2(tbl, entry + i);
+			tce_iommu_unuse_page_v2(container, tbl, entry + i);
 
 		*pua = tce;
 
@@ -572,7 +658,7 @@
 	if (!table_size)
 		return -EINVAL;
 
-	ret = try_increment_locked_vm(table_size >> PAGE_SHIFT);
+	ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT);
 	if (ret)
 		return ret;
 
@@ -582,25 +668,17 @@
 	WARN_ON(!ret && !(*ptbl)->it_ops->free);
 	WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
 
-	if (!ret && container->v2) {
-		ret = tce_iommu_userspace_view_alloc(*ptbl);
-		if (ret)
-			(*ptbl)->it_ops->free(*ptbl);
-	}
-
-	if (ret)
-		decrement_locked_vm(table_size >> PAGE_SHIFT);
-
 	return ret;
 }
 
-static void tce_iommu_free_table(struct iommu_table *tbl)
+static void tce_iommu_free_table(struct tce_container *container,
+		struct iommu_table *tbl)
 {
 	unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
 
-	tce_iommu_userspace_view_free(tbl);
+	tce_iommu_userspace_view_free(tbl, container->mm);
 	tbl->it_ops->free(tbl);
-	decrement_locked_vm(pages);
+	decrement_locked_vm(container->mm, pages);
 }
 
 static long tce_iommu_create_window(struct tce_container *container,
@@ -663,7 +741,7 @@
 		table_group = iommu_group_get_iommudata(tcegrp->grp);
 		table_group->ops->unset_window(table_group, num);
 	}
-	tce_iommu_free_table(tbl);
+	tce_iommu_free_table(container, tbl);
 
 	return ret;
 }
@@ -701,12 +779,41 @@
 
 	/* Free table */
 	tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
-	tce_iommu_free_table(tbl);
+	tce_iommu_free_table(container, tbl);
 	container->tables[num] = NULL;
 
 	return 0;
 }
 
+static long tce_iommu_create_default_window(struct tce_container *container)
+{
+	long ret;
+	__u64 start_addr = 0;
+	struct tce_iommu_group *tcegrp;
+	struct iommu_table_group *table_group;
+
+	if (!container->def_window_pending)
+		return 0;
+
+	if (!tce_groups_attached(container))
+		return -ENODEV;
+
+	tcegrp = list_first_entry(&container->group_list,
+			struct tce_iommu_group, next);
+	table_group = iommu_group_get_iommudata(tcegrp->grp);
+	if (!table_group)
+		return -ENODEV;
+
+	ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
+			table_group->tce32_size, 1, &start_addr);
+	WARN_ON_ONCE(!ret && start_addr);
+
+	if (!ret)
+		container->def_window_pending = false;
+
+	return ret;
+}
+
 static long tce_iommu_ioctl(void *iommu_data,
 				 unsigned int cmd, unsigned long arg)
 {
@@ -727,7 +834,17 @@
 		}
 
 		return (ret < 0) ? 0 : ret;
+	}
 
+	/*
+	 * Sanity check to prevent one userspace from manipulating
+	 * another userspace mm.
+	 */
+	BUG_ON(!container);
+	if (container->mm && container->mm != current->mm)
+		return -EPERM;
+
+	switch (cmd) {
 	case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
 		struct vfio_iommu_spapr_tce_info info;
 		struct tce_iommu_group *tcegrp;
@@ -797,6 +914,10 @@
 				VFIO_DMA_MAP_FLAG_WRITE))
 			return -EINVAL;
 
+		ret = tce_iommu_create_default_window(container);
+		if (ret)
+			return ret;
+
 		num = tce_iommu_find_table(container, param.iova, &tbl);
 		if (num < 0)
 			return -ENXIO;
@@ -860,6 +981,10 @@
 		if (param.flags)
 			return -EINVAL;
 
+		ret = tce_iommu_create_default_window(container);
+		if (ret)
+			return ret;
+
 		num = tce_iommu_find_table(container, param.iova, &tbl);
 		if (num < 0)
 			return -ENXIO;
@@ -888,6 +1013,10 @@
 		minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
 				size);
 
+		ret = tce_iommu_mm_set(container);
+		if (ret)
+			return ret;
+
 		if (copy_from_user(&param, (void __user *)arg, minsz))
 			return -EFAULT;
 
@@ -911,6 +1040,9 @@
 		if (!container->v2)
 			break;
 
+		if (!container->mm)
+			return -EPERM;
+
 		minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
 				size);
 
@@ -969,6 +1101,10 @@
 		if (!container->v2)
 			break;
 
+		ret = tce_iommu_mm_set(container);
+		if (ret)
+			return ret;
+
 		if (!tce_groups_attached(container))
 			return -ENXIO;
 
@@ -986,6 +1122,10 @@
 
 		mutex_lock(&container->lock);
 
+		ret = tce_iommu_create_default_window(container);
+		if (ret)
+			return ret;
+
 		ret = tce_iommu_create_window(container, create.page_shift,
 				create.window_size, create.levels,
 				&create.start_addr);
@@ -1003,6 +1143,10 @@
 		if (!container->v2)
 			break;
 
+		ret = tce_iommu_mm_set(container);
+		if (ret)
+			return ret;
+
 		if (!tce_groups_attached(container))
 			return -ENXIO;
 
@@ -1018,6 +1162,11 @@
 		if (remove.flags)
 			return -EINVAL;
 
+		if (container->def_window_pending && !remove.start_addr) {
+			container->def_window_pending = false;
+			return 0;
+		}
+
 		mutex_lock(&container->lock);
 
 		ret = tce_iommu_remove_window(container, remove.start_addr);
@@ -1043,7 +1192,7 @@
 			continue;
 
 		tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
-		tce_iommu_userspace_view_free(tbl);
+		tce_iommu_userspace_view_free(tbl, container->mm);
 		if (tbl->it_map)
 			iommu_release_ownership(tbl);
 
@@ -1062,10 +1211,7 @@
 		if (!tbl || !tbl->it_map)
 			continue;
 
-		rc = tce_iommu_userspace_view_alloc(tbl);
-		if (!rc)
-			rc = iommu_take_ownership(tbl);
-
+		rc = iommu_take_ownership(tbl);
 		if (rc) {
 			for (j = 0; j < i; ++j)
 				iommu_release_ownership(
@@ -1100,9 +1246,6 @@
 static long tce_iommu_take_ownership_ddw(struct tce_container *container,
 		struct iommu_table_group *table_group)
 {
-	long i, ret = 0;
-	struct iommu_table *tbl = NULL;
-
 	if (!table_group->ops->create_table || !table_group->ops->set_window ||
 			!table_group->ops->release_ownership) {
 		WARN_ON_ONCE(1);
@@ -1111,47 +1254,7 @@
 
 	table_group->ops->take_ownership(table_group);
 
-	/*
-	 * If it the first group attached, check if there is
-	 * a default DMA window and create one if none as
-	 * the userspace expects it to exist.
-	 */
-	if (!tce_groups_attached(container) && !container->tables[0]) {
-		ret = tce_iommu_create_table(container,
-				table_group,
-				0, /* window number */
-				IOMMU_PAGE_SHIFT_4K,
-				table_group->tce32_size,
-				1, /* default levels */
-				&tbl);
-		if (ret)
-			goto release_exit;
-		else
-			container->tables[0] = tbl;
-	}
-
-	/* Set all windows to the new group */
-	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
-		tbl = container->tables[i];
-
-		if (!tbl)
-			continue;
-
-		/* Set the default window to a new group */
-		ret = table_group->ops->set_window(table_group, i, tbl);
-		if (ret)
-			goto release_exit;
-	}
-
 	return 0;
-
-release_exit:
-	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
-		table_group->ops->unset_window(table_group, i);
-
-	table_group->ops->release_ownership(table_group);
-
-	return ret;
 }
 
 static int tce_iommu_attach_group(void *iommu_data,
@@ -1203,10 +1306,13 @@
 	}
 
 	if (!table_group->ops || !table_group->ops->take_ownership ||
-			!table_group->ops->release_ownership)
+			!table_group->ops->release_ownership) {
 		ret = tce_iommu_take_ownership(container, table_group);
-	else
+	} else {
 		ret = tce_iommu_take_ownership_ddw(container, table_group);
+		if (!tce_groups_attached(container) && !container->tables[0])
+			container->def_window_pending = true;
+	}
 
 	if (!ret) {
 		tcegrp->grp = iommu_group;
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 288318a..16c8fcf 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -199,6 +199,11 @@
 	if (rc)
 		return rc;
 
+	bd->usr_brightness_req = brightness;
+	brightness = (brightness <= bd->thermal_brightness_limit) ?
+				bd->usr_brightness_req :
+				bd->thermal_brightness_limit;
+
 	rc = backlight_device_set_brightness(bd, brightness);
 
 	return rc ? rc : count;
@@ -310,6 +315,63 @@
 }
 EXPORT_SYMBOL(backlight_force_update);
 
+static int bd_cdev_get_max_brightness(struct thermal_cooling_device *cdev,
+					unsigned long *state)
+{
+	struct backlight_device *bd = (struct backlight_device *)cdev->devdata;
+
+	*state = bd->props.max_brightness;
+
+	return 0;
+}
+
+static int bd_cdev_get_cur_brightness(struct thermal_cooling_device *cdev,
+					unsigned long *state)
+{
+	struct backlight_device *bd = (struct backlight_device *)cdev->devdata;
+
+	*state = bd->props.max_brightness - bd->thermal_brightness_limit;
+
+	return 0;
+}
+
+static int bd_cdev_set_cur_brightness(struct thermal_cooling_device *cdev,
+					unsigned long state)
+{
+	struct backlight_device *bd = (struct backlight_device *)cdev->devdata;
+	int brightness_lvl;
+
+	brightness_lvl = bd->props.max_brightness - state;
+	if (brightness_lvl == bd->thermal_brightness_limit)
+		return 0;
+
+	bd->thermal_brightness_limit = brightness_lvl;
+	brightness_lvl = (bd->usr_brightness_req
+				<= bd->thermal_brightness_limit) ?
+				bd->usr_brightness_req :
+				bd->thermal_brightness_limit;
+	backlight_device_set_brightness(bd, brightness_lvl);
+
+	return 0;
+}
+
+static struct thermal_cooling_device_ops bd_cdev_ops = {
+	.get_max_state = bd_cdev_get_max_brightness,
+	.get_cur_state = bd_cdev_get_cur_brightness,
+	.set_cur_state = bd_cdev_set_cur_brightness,
+};
+
+static void backlight_cdev_register(struct device *parent,
+				    struct backlight_device *bd)
+{
+	if (of_find_property(parent->of_node, "#cooling-cells", NULL)) {
+		bd->cdev = thermal_of_cooling_device_register(parent->of_node,
+				(char *)dev_name(&bd->dev), bd, &bd_cdev_ops);
+		if (!bd->cdev)
+			pr_err("Cooling device register failed\n");
+	}
+}
+
 /**
  * backlight_device_register - create and register a new object of
  *   backlight_device class.
@@ -353,6 +415,8 @@
 			WARN(1, "%s: invalid backlight type", name);
 			new_bd->props.type = BACKLIGHT_RAW;
 		}
+		new_bd->thermal_brightness_limit = props->max_brightness;
+		new_bd->usr_brightness_req = props->brightness;
 	} else {
 		new_bd->props.type = BACKLIGHT_RAW;
 	}
@@ -369,6 +433,7 @@
 		return ERR_PTR(rc);
 	}
 
+	backlight_cdev_register(parent, new_bd);
 	new_bd->ops = ops;
 
 #ifdef CONFIG_PMAC_BACKLIGHT
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index b87f5cf..4db10d7 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1167,6 +1167,8 @@
 	p->userfont = 0;
 }
 
+static void set_vc_hi_font(struct vc_data *vc, bool set);
+
 static void fbcon_deinit(struct vc_data *vc)
 {
 	struct display *p = &fb_display[vc->vc_num];
@@ -1202,6 +1204,9 @@
 	if (free_font)
 		vc->vc_font.data = NULL;
 
+	if (vc->vc_hi_font_mask)
+		set_vc_hi_font(vc, false);
+
 	if (!con_is_bound(&fb_con))
 		fbcon_exit();
 
@@ -2438,32 +2443,10 @@
 	return 0;
 }
 
-static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
-			     const u8 * data, int userfont)
+/* set/clear vc_hi_font_mask and update vc attrs accordingly */
+static void set_vc_hi_font(struct vc_data *vc, bool set)
 {
-	struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
-	struct fbcon_ops *ops = info->fbcon_par;
-	struct display *p = &fb_display[vc->vc_num];
-	int resize;
-	int cnt;
-	char *old_data = NULL;
-
-	if (con_is_visible(vc) && softback_lines)
-		fbcon_set_origin(vc);
-
-	resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
-	if (p->userfont)
-		old_data = vc->vc_font.data;
-	if (userfont)
-		cnt = FNTCHARCNT(data);
-	else
-		cnt = 256;
-	vc->vc_font.data = (void *)(p->fontdata = data);
-	if ((p->userfont = userfont))
-		REFCOUNT(data)++;
-	vc->vc_font.width = w;
-	vc->vc_font.height = h;
-	if (vc->vc_hi_font_mask && cnt == 256) {
+	if (!set) {
 		vc->vc_hi_font_mask = 0;
 		if (vc->vc_can_do_color) {
 			vc->vc_complement_mask >>= 1;
@@ -2486,7 +2469,7 @@
 			    ((c & 0xfe00) >> 1) | (c & 0xff);
 			vc->vc_attr >>= 1;
 		}
-	} else if (!vc->vc_hi_font_mask && cnt == 512) {
+	} else {
 		vc->vc_hi_font_mask = 0x100;
 		if (vc->vc_can_do_color) {
 			vc->vc_complement_mask <<= 1;
@@ -2518,8 +2501,38 @@
 			} else
 				vc->vc_video_erase_char = c & ~0x100;
 		}
-
 	}
+}
+
+static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+			     const u8 * data, int userfont)
+{
+	struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+	struct fbcon_ops *ops = info->fbcon_par;
+	struct display *p = &fb_display[vc->vc_num];
+	int resize;
+	int cnt;
+	char *old_data = NULL;
+
+	if (con_is_visible(vc) && softback_lines)
+		fbcon_set_origin(vc);
+
+	resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
+	if (p->userfont)
+		old_data = vc->vc_font.data;
+	if (userfont)
+		cnt = FNTCHARCNT(data);
+	else
+		cnt = 256;
+	vc->vc_font.data = (void *)(p->fontdata = data);
+	if ((p->userfont = userfont))
+		REFCOUNT(data)++;
+	vc->vc_font.width = w;
+	vc->vc_font.height = h;
+	if (vc->vc_hi_font_mask && cnt == 256)
+		set_vc_hi_font(vc, false);
+	else if (!vc->vc_hi_font_mask && cnt == 512)
+		set_vc_hi_font(vc, true);
 
 	if (resize) {
 		int cols, rows;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 9d2738e..2c2e679 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -427,6 +427,8 @@
 		 * Prime this virtqueue with one buffer so the hypervisor can
 		 * use it to signal us later (it can't be broken yet!).
 		 */
+		update_balloon_stats(vb);
+
 		sg_init_one(&sg, vb->stats, sizeof vb->stats);
 		if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
 		    < 0)
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 4ce10bc..4b85746 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -27,10 +27,10 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/types.h>
+#include <linux/syscore_ops.h>
 #include <linux/acpi.h>
 #include <acpi/processor.h>
 #include <xen/xen.h>
-#include <xen/xen-ops.h>
 #include <xen/interface/platform.h>
 #include <asm/xen/hypercall.h>
 
@@ -466,15 +466,33 @@
 	return rc;
 }
 
-static int xen_acpi_processor_resume(struct notifier_block *nb,
-				     unsigned long action, void *data)
+static void xen_acpi_processor_resume_worker(struct work_struct *dummy)
 {
+	int rc;
+
 	bitmap_zero(acpi_ids_done, nr_acpi_bits);
-	return xen_upload_processor_pm_data();
+
+	rc = xen_upload_processor_pm_data();
+	if (rc != 0)
+		pr_info("ACPI data upload failed, error = %d\n", rc);
 }
 
-struct notifier_block xen_acpi_processor_resume_nb = {
-	.notifier_call = xen_acpi_processor_resume,
+static void xen_acpi_processor_resume(void)
+{
+	static DECLARE_WORK(wq, xen_acpi_processor_resume_worker);
+
+	/*
+	 * xen_upload_processor_pm_data() calls non-atomic code.
+	 * However, the context for xen_acpi_processor_resume is syscore
+	 * with only the boot CPU online and in an atomic context.
+	 *
+	 * So defer the upload for some point safer.
+	 */
+	schedule_work(&wq);
+}
+
+static struct syscore_ops xap_syscore_ops = {
+	.resume	= xen_acpi_processor_resume,
 };
 
 static int __init xen_acpi_processor_init(void)
@@ -527,7 +545,7 @@
 	if (rc)
 		goto err_unregister;
 
-	xen_resume_notifier_register(&xen_acpi_processor_resume_nb);
+	register_syscore_ops(&xap_syscore_ops);
 
 	return 0;
 err_unregister:
@@ -544,7 +562,7 @@
 {
 	int i;
 
-	xen_resume_notifier_unregister(&xen_acpi_processor_resume_nb);
+	unregister_syscore_ops(&xap_syscore_ops);
 	kfree(acpi_ids_done);
 	kfree(acpi_id_present);
 	kfree(acpi_id_cst_present);
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 98f87fe..61cfcce 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -352,7 +352,6 @@
 static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
 {
 	struct dentry *dir;
-	struct fscrypt_info *ci;
 	int dir_has_key, cached_with_key;
 
 	if (flags & LOOKUP_RCU)
@@ -364,18 +363,11 @@
 		return 0;
 	}
 
-	ci = d_inode(dir)->i_crypt_info;
-	if (ci && ci->ci_keyring_key &&
-	    (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
-					  (1 << KEY_FLAG_REVOKED) |
-					  (1 << KEY_FLAG_DEAD))))
-		ci = NULL;
-
 	/* this should eventually be an flag in d_flags */
 	spin_lock(&dentry->d_lock);
 	cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
 	spin_unlock(&dentry->d_lock);
-	dir_has_key = (ci != NULL);
+	dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
 	dput(dir);
 
 	/*
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 9b774f4..80bb956 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -350,7 +350,7 @@
 		fname->disk_name.len = iname->len;
 		return 0;
 	}
-	ret = get_crypt_info(dir);
+	ret = fscrypt_get_encryption_info(dir);
 	if (ret && ret != -EOPNOTSUPP)
 		return ret;
 
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 67fb6d8..bb46063 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -99,6 +99,7 @@
 	kfree(full_key_descriptor);
 	if (IS_ERR(keyring_key))
 		return PTR_ERR(keyring_key);
+	down_read(&keyring_key->sem);
 
 	if (keyring_key->type != &key_type_logon) {
 		printk_once(KERN_WARNING
@@ -106,11 +107,9 @@
 		res = -ENOKEY;
 		goto out;
 	}
-	down_read(&keyring_key->sem);
 	ukp = user_key_payload(keyring_key);
 	if (ukp->datalen != sizeof(struct fscrypt_key)) {
 		res = -EINVAL;
-		up_read(&keyring_key->sem);
 		goto out;
 	}
 	master_key = (struct fscrypt_key *)ukp->data;
@@ -121,17 +120,11 @@
 				"%s: key size incorrect: %d\n",
 				__func__, master_key->size);
 		res = -ENOKEY;
-		up_read(&keyring_key->sem);
 		goto out;
 	}
 	res = derive_key_aes(ctx->nonce, master_key->raw, raw_key);
-	up_read(&keyring_key->sem);
-	if (res)
-		goto out;
-
-	crypt_info->ci_keyring_key = keyring_key;
-	return 0;
 out:
+	up_read(&keyring_key->sem);
 	key_put(keyring_key);
 	return res;
 }
@@ -173,12 +166,11 @@
 	if (!ci)
 		return;
 
-	key_put(ci->ci_keyring_key);
 	crypto_free_skcipher(ci->ci_ctfm);
 	kmem_cache_free(fscrypt_info_cachep, ci);
 }
 
-int get_crypt_info(struct inode *inode)
+int fscrypt_get_encryption_info(struct inode *inode)
 {
 	struct fscrypt_info *crypt_info;
 	struct fscrypt_context ctx;
@@ -188,21 +180,15 @@
 	u8 *raw_key = NULL;
 	int res;
 
+	if (inode->i_crypt_info)
+		return 0;
+
 	res = fscrypt_initialize();
 	if (res)
 		return res;
 
 	if (!inode->i_sb->s_cop->get_context)
 		return -EOPNOTSUPP;
-retry:
-	crypt_info = ACCESS_ONCE(inode->i_crypt_info);
-	if (crypt_info) {
-		if (!crypt_info->ci_keyring_key ||
-				key_validate(crypt_info->ci_keyring_key) == 0)
-			return 0;
-		fscrypt_put_encryption_info(inode, crypt_info);
-		goto retry;
-	}
 
 	res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
 	if (res < 0) {
@@ -230,7 +216,6 @@
 	crypt_info->ci_data_mode = ctx.contents_encryption_mode;
 	crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
 	crypt_info->ci_ctfm = NULL;
-	crypt_info->ci_keyring_key = NULL;
 	memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
 				sizeof(crypt_info->ci_master_key));
 
@@ -285,14 +270,8 @@
 	if (res)
 		goto out;
 
-	kzfree(raw_key);
-	raw_key = NULL;
-	if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) {
-		put_crypt_info(crypt_info);
-		goto retry;
-	}
-	return 0;
-
+	if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
+		crypt_info = NULL;
 out:
 	if (res == -ENOKEY)
 		res = 0;
@@ -300,6 +279,7 @@
 	kzfree(raw_key);
 	return res;
 }
+EXPORT_SYMBOL(fscrypt_get_encryption_info);
 
 void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
 {
@@ -317,17 +297,3 @@
 	put_crypt_info(ci);
 }
 EXPORT_SYMBOL(fscrypt_put_encryption_info);
-
-int fscrypt_get_encryption_info(struct inode *inode)
-{
-	struct fscrypt_info *ci = inode->i_crypt_info;
-
-	if (!ci ||
-		(ci->ci_keyring_key &&
-		 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
-					       (1 << KEY_FLAG_REVOKED) |
-					       (1 << KEY_FLAG_DEAD)))))
-		return get_crypt_info(inode);
-	return 0;
-}
-EXPORT_SYMBOL(fscrypt_get_encryption_info);
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 004eebb..a5807fd 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1171,10 +1171,9 @@
 	set_buffer_uptodate(dir_block);
 	err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
 	if (err)
-		goto out;
+		return err;
 	set_buffer_verified(dir_block);
-out:
-	return err;
+	return ext4_mark_inode_dirty(handle, inode);
 }
 
 static int ext4_convert_inline_data_nolock(handle_t *handle,
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index afe29ba4..5fa9ba1 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3830,7 +3830,7 @@
 	db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
 		   EXT4_DESC_PER_BLOCK(sb);
 	if (ext4_has_feature_meta_bg(sb)) {
-		if (le32_to_cpu(es->s_first_meta_bg) >= db_count) {
+		if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
 			ext4_msg(sb, KERN_WARNING,
 				 "first meta block group too large: %u "
 				 "(group descriptor block count %u)",
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 4448ed3..3eeed8f 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -131,31 +131,26 @@
 }
 
 static int ext4_xattr_block_csum_verify(struct inode *inode,
-					sector_t block_nr,
-					struct ext4_xattr_header *hdr)
+					struct buffer_head *bh)
 {
-	if (ext4_has_metadata_csum(inode->i_sb) &&
-	    (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
-		return 0;
-	return 1;
+	struct ext4_xattr_header *hdr = BHDR(bh);
+	int ret = 1;
+
+	if (ext4_has_metadata_csum(inode->i_sb)) {
+		lock_buffer(bh);
+		ret = (hdr->h_checksum == ext4_xattr_block_csum(inode,
+							bh->b_blocknr, hdr));
+		unlock_buffer(bh);
+	}
+	return ret;
 }
 
 static void ext4_xattr_block_csum_set(struct inode *inode,
-				      sector_t block_nr,
-				      struct ext4_xattr_header *hdr)
+				      struct buffer_head *bh)
 {
-	if (!ext4_has_metadata_csum(inode->i_sb))
-		return;
-
-	hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
-}
-
-static inline int ext4_handle_dirty_xattr_block(handle_t *handle,
-						struct inode *inode,
-						struct buffer_head *bh)
-{
-	ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh));
-	return ext4_handle_dirty_metadata(handle, inode, bh);
+	if (ext4_has_metadata_csum(inode->i_sb))
+		BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode,
+						bh->b_blocknr, BHDR(bh));
 }
 
 static inline const struct xattr_handler *
@@ -218,7 +213,7 @@
 	if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
 	    BHDR(bh)->h_blocks != cpu_to_le32(1))
 		return -EFSCORRUPTED;
-	if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
+	if (!ext4_xattr_block_csum_verify(inode, bh))
 		return -EFSBADCRC;
 	error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
 				       bh->b_data);
@@ -601,23 +596,22 @@
 			}
 		}
 
+		ext4_xattr_block_csum_set(inode, bh);
 		/*
 		 * Beware of this ugliness: Releasing of xattr block references
 		 * from different inodes can race and so we have to protect
 		 * from a race where someone else frees the block (and releases
 		 * its journal_head) before we are done dirtying the buffer. In
 		 * nojournal mode this race is harmless and we actually cannot
-		 * call ext4_handle_dirty_xattr_block() with locked buffer as
+		 * call ext4_handle_dirty_metadata() with locked buffer as
 		 * that function can call sync_dirty_buffer() so for that case
 		 * we handle the dirtying after unlocking the buffer.
 		 */
 		if (ext4_handle_valid(handle))
-			error = ext4_handle_dirty_xattr_block(handle, inode,
-							      bh);
+			error = ext4_handle_dirty_metadata(handle, inode, bh);
 		unlock_buffer(bh);
 		if (!ext4_handle_valid(handle))
-			error = ext4_handle_dirty_xattr_block(handle, inode,
-							      bh);
+			error = ext4_handle_dirty_metadata(handle, inode, bh);
 		if (IS_SYNC(inode))
 			ext4_handle_sync(handle);
 		dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
@@ -846,13 +840,14 @@
 				ext4_xattr_cache_insert(ext4_mb_cache,
 					bs->bh);
 			}
+			ext4_xattr_block_csum_set(inode, bs->bh);
 			unlock_buffer(bs->bh);
 			if (error == -EFSCORRUPTED)
 				goto bad_block;
 			if (!error)
-				error = ext4_handle_dirty_xattr_block(handle,
-								      inode,
-								      bs->bh);
+				error = ext4_handle_dirty_metadata(handle,
+								   inode,
+								   bs->bh);
 			if (error)
 				goto cleanup;
 			goto inserted;
@@ -950,10 +945,11 @@
 					ce->e_reusable = 0;
 				ea_bdebug(new_bh, "reusing; refcount now=%d",
 					  ref);
+				ext4_xattr_block_csum_set(inode, new_bh);
 				unlock_buffer(new_bh);
-				error = ext4_handle_dirty_xattr_block(handle,
-								      inode,
-								      new_bh);
+				error = ext4_handle_dirty_metadata(handle,
+								   inode,
+								   new_bh);
 				if (error)
 					goto cleanup_dquot;
 			}
@@ -1003,11 +999,12 @@
 				goto getblk_failed;
 			}
 			memcpy(new_bh->b_data, s->base, new_bh->b_size);
+			ext4_xattr_block_csum_set(inode, new_bh);
 			set_buffer_uptodate(new_bh);
 			unlock_buffer(new_bh);
 			ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
-			error = ext4_handle_dirty_xattr_block(handle,
-							      inode, new_bh);
+			error = ext4_handle_dirty_metadata(handle, inode,
+							   new_bh);
 			if (error)
 				goto cleanup;
 		}
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index a6a3389..51519c2 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -207,7 +207,7 @@
 	struct gfs2_sbd *ln_sbd;
 	u64 ln_number;
 	unsigned int ln_type;
-};
+} __packed __aligned(sizeof(int));
 
 #define lm_name_equal(name1, name2) \
         (((name1)->ln_number == (name2)->ln_number) &&	\
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 927da49..7d4b557 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1125,10 +1125,8 @@
 
 	/* Set up a default-sized revoke table for the new mount. */
 	err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
-	if (err) {
-		kfree(journal);
-		return NULL;
-	}
+	if (err)
+		goto err_cleanup;
 
 	spin_lock_init(&journal->j_history_lock);
 
@@ -1145,23 +1143,25 @@
 	journal->j_wbufsize = n;
 	journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *),
 					GFP_KERNEL);
-	if (!journal->j_wbuf) {
-		kfree(journal);
-		return NULL;
-	}
+	if (!journal->j_wbuf)
+		goto err_cleanup;
 
 	bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize);
 	if (!bh) {
 		pr_err("%s: Cannot get buffer for journal superblock\n",
 			__func__);
-		kfree(journal->j_wbuf);
-		kfree(journal);
-		return NULL;
+		goto err_cleanup;
 	}
 	journal->j_sb_buffer = bh;
 	journal->j_superblock = (journal_superblock_t *)bh->b_data;
 
 	return journal;
+
+err_cleanup:
+	kfree(journal->j_wbuf);
+	jbd2_journal_destroy_revoke(journal);
+	kfree(journal);
+	return NULL;
 }
 
 /* jbd2_journal_init_dev and jbd2_journal_init_inode:
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index 91171dc..3cd7305 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -280,6 +280,7 @@
 
 fail1:
 	jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]);
+	journal->j_revoke_table[0] = NULL;
 fail0:
 	return -ENOMEM;
 }
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 609840d..1536aeb 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -7426,11 +7426,11 @@
 	struct nfs41_exchange_id_data *cdata =
 					(struct nfs41_exchange_id_data *)data;
 
-	nfs_put_client(cdata->args.client);
 	if (cdata->xprt) {
 		xprt_put(cdata->xprt);
 		rpc_clnt_xprt_switch_put(cdata->args.client->cl_rpcclient);
 	}
+	nfs_put_client(cdata->args.client);
 	kfree(cdata->res.impl_id);
 	kfree(cdata->res.server_scope);
 	kfree(cdata->res.server_owner);
@@ -7537,10 +7537,8 @@
 	task_setup_data.callback_data = calldata;
 
 	task = rpc_run_task(&task_setup_data);
-	if (IS_ERR(task)) {
-	status = PTR_ERR(task);
-		goto out_impl_id;
-	}
+	if (IS_ERR(task))
+		return PTR_ERR(task);
 
 	if (!xprt) {
 		status = rpc_wait_for_completion_task(task);
@@ -7568,6 +7566,7 @@
 	kfree(calldata->res.server_owner);
 out_calldata:
 	kfree(calldata);
+	nfs_put_client(clp);
 	goto out;
 }
 
diff --git a/fs/proc/base.c b/fs/proc/base.c
index c01eeaa..5cc0a36 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1436,6 +1436,204 @@
 
 #endif
 
+/*
+ * Print out various scheduling related per-task fields:
+ */
+
+#ifdef CONFIG_SMP
+
+static int sched_wake_up_idle_show(struct seq_file *m, void *v)
+{
+	struct inode *inode = m->private;
+	struct task_struct *p;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	seq_printf(m, "%d\n", sched_get_wake_up_idle(p));
+
+	put_task_struct(p);
+
+	return 0;
+}
+
+static ssize_t
+sched_wake_up_idle_write(struct file *file, const char __user *buf,
+	    size_t count, loff_t *offset)
+{
+	struct inode *inode = file_inode(file);
+	struct task_struct *p;
+	char buffer[PROC_NUMBUF];
+	int wake_up_idle, err;
+
+	memset(buffer, 0, sizeof(buffer));
+	if (count > sizeof(buffer) - 1)
+		count = sizeof(buffer) - 1;
+	if (copy_from_user(buffer, buf, count)) {
+		err = -EFAULT;
+		goto out;
+	}
+
+	err = kstrtoint(strstrip(buffer), 0, &wake_up_idle);
+	if (err)
+		goto out;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	err = sched_set_wake_up_idle(p, wake_up_idle);
+
+	put_task_struct(p);
+
+out:
+	return err < 0 ? err : count;
+}
+
+static int sched_wake_up_idle_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, sched_wake_up_idle_show, inode);
+}
+
+static const struct file_operations proc_pid_sched_wake_up_idle_operations = {
+	.open		= sched_wake_up_idle_open,
+	.read		= seq_read,
+	.write		= sched_wake_up_idle_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#endif	/* CONFIG_SMP */
+
+#ifdef CONFIG_SCHED_HMP
+
+static int sched_init_task_load_show(struct seq_file *m, void *v)
+{
+	struct inode *inode = m->private;
+	struct task_struct *p;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	seq_printf(m, "%d\n", sched_get_init_task_load(p));
+
+	put_task_struct(p);
+
+	return 0;
+}
+
+static ssize_t
+sched_init_task_load_write(struct file *file, const char __user *buf,
+	    size_t count, loff_t *offset)
+{
+	struct inode *inode = file_inode(file);
+	struct task_struct *p;
+	char buffer[PROC_NUMBUF];
+	int init_task_load, err;
+
+	memset(buffer, 0, sizeof(buffer));
+	if (count > sizeof(buffer) - 1)
+		count = sizeof(buffer) - 1;
+	if (copy_from_user(buffer, buf, count)) {
+		err = -EFAULT;
+		goto out;
+	}
+
+	err = kstrtoint(strstrip(buffer), 0, &init_task_load);
+	if (err)
+		goto out;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	err = sched_set_init_task_load(p, init_task_load);
+
+	put_task_struct(p);
+
+out:
+	return err < 0 ? err : count;
+}
+
+static int sched_init_task_load_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, sched_init_task_load_show, inode);
+}
+
+static const struct file_operations proc_pid_sched_init_task_load_operations = {
+	.open		= sched_init_task_load_open,
+	.read		= seq_read,
+	.write		= sched_init_task_load_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int sched_group_id_show(struct seq_file *m, void *v)
+{
+	struct inode *inode = m->private;
+	struct task_struct *p;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	seq_printf(m, "%d\n", sched_get_group_id(p));
+
+	put_task_struct(p);
+
+	return 0;
+}
+
+static ssize_t
+sched_group_id_write(struct file *file, const char __user *buf,
+	    size_t count, loff_t *offset)
+{
+	struct inode *inode = file_inode(file);
+	struct task_struct *p;
+	char buffer[PROC_NUMBUF];
+	int group_id, err;
+
+	memset(buffer, 0, sizeof(buffer));
+	if (count > sizeof(buffer) - 1)
+		count = sizeof(buffer) - 1;
+	if (copy_from_user(buffer, buf, count)) {
+		err = -EFAULT;
+		goto out;
+	}
+
+	err = kstrtoint(strstrip(buffer), 0, &group_id);
+	if (err)
+		goto out;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	err = sched_set_group_id(p, group_id);
+
+	put_task_struct(p);
+
+out:
+	return err < 0 ? err : count;
+}
+
+static int sched_group_id_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, sched_group_id_show, inode);
+}
+
+static const struct file_operations proc_pid_sched_group_id_operations = {
+	.open		= sched_group_id_open,
+	.read		= seq_read,
+	.write		= sched_group_id_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#endif	/* CONFIG_SCHED_HMP */
+
 #ifdef CONFIG_SCHED_AUTOGROUP
 /*
  * Print out autogroup related information:
@@ -2861,6 +3059,13 @@
 	ONE("status",     S_IRUGO, proc_pid_status),
 	ONE("personality", S_IRUSR, proc_pid_personality),
 	ONE("limits",	  S_IRUGO, proc_pid_limits),
+#ifdef CONFIG_SMP
+	REG("sched_wake_up_idle",      S_IRUGO|S_IWUSR, proc_pid_sched_wake_up_idle_operations),
+#endif
+#ifdef CONFIG_SCHED_HMP
+	REG("sched_init_task_load",      S_IRUGO|S_IWUSR, proc_pid_sched_init_task_load_operations),
+	REG("sched_group_id",      S_IRUGO|S_IWUGO, proc_pid_sched_group_id_operations),
+#endif
 #ifdef CONFIG_SCHED_DEBUG
 	REG("sched",      S_IRUGO|S_IWUSR, proc_pid_sched_operations),
 #endif
diff --git a/fs/sdcardfs/dentry.c b/fs/sdcardfs/dentry.c
index f22de8a..afd9771 100644
--- a/fs/sdcardfs/dentry.c
+++ b/fs/sdcardfs/dentry.c
@@ -46,7 +46,8 @@
 	spin_unlock(&dentry->d_lock);
 
 	/* check uninitialized obb_dentry and
-	 * whether the base obbpath has been changed or not */
+	 * whether the base obbpath has been changed or not
+	 */
 	if (is_obbpath_invalid(dentry)) {
 		d_drop(dentry);
 		return 0;
@@ -76,17 +77,13 @@
 
 	if (dentry < lower_dentry) {
 		spin_lock(&dentry->d_lock);
-		spin_lock(&lower_dentry->d_lock);
+		spin_lock_nested(&lower_dentry->d_lock, DENTRY_D_LOCK_NESTED);
 	} else {
 		spin_lock(&lower_dentry->d_lock);
-		spin_lock(&dentry->d_lock);
+		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 	}
 
-	if (dentry->d_name.len != lower_dentry->d_name.len) {
-		__d_drop(dentry);
-		err = 0;
-	} else if (strncasecmp(dentry->d_name.name, lower_dentry->d_name.name,
-				dentry->d_name.len) != 0) {
+	if (!qstr_case_eq(&dentry->d_name, &lower_dentry->d_name)) {
 		__d_drop(dentry);
 		err = 0;
 	}
@@ -110,12 +107,10 @@
 static void sdcardfs_d_release(struct dentry *dentry)
 {
 	/* release and reset the lower paths */
-	if(has_graft_path(dentry)) {
+	if (has_graft_path(dentry))
 		sdcardfs_put_reset_orig_path(dentry);
-	}
 	sdcardfs_put_reset_lower_path(dentry);
 	free_dentry_private_data(dentry);
-	return;
 }
 
 static int sdcardfs_hash_ci(const struct dentry *dentry,
@@ -132,12 +127,10 @@
 	unsigned long hash;
 
 	name = qstr->name;
-	//len = vfat_striptail_len(qstr);
 	len = qstr->len;
 
 	hash = init_name_hash(dentry);
 	while (len--)
-		//hash = partial_name_hash(nls_tolower(t, *name++), hash);
 		hash = partial_name_hash(tolower(*name++), hash);
 	qstr->hash = end_name_hash(hash);
 
@@ -150,35 +143,25 @@
 static int sdcardfs_cmp_ci(const struct dentry *dentry,
 		unsigned int len, const char *str, const struct qstr *name)
 {
-	/* This function is copy of vfat_cmpi */
-	// FIXME Should we support national language?
-	//struct nls_table *t = MSDOS_SB(parent->d_sb)->nls_io;
-	//unsigned int alen, blen;
+	/* FIXME Should we support national language? */
 
-	/* A filename cannot end in '.' or we treat it like it has none */
-	/*
-	alen = vfat_striptail_len(name);
-	blen = __vfat_striptail_len(len, str);
-	if (alen == blen) {
-		if (nls_strnicmp(t, name->name, str, alen) == 0)
-			return 0;
-	}
-	*/
 	if (name->len == len) {
-		if (strncasecmp(name->name, str, len) == 0)
+		if (str_n_case_eq(name->name, str, len))
 			return 0;
 	}
 	return 1;
 }
 
-static void sdcardfs_canonical_path(const struct path *path, struct path *actual_path) {
+static void sdcardfs_canonical_path(const struct path *path,
+				struct path *actual_path)
+{
 	sdcardfs_get_real_lower(path->dentry, actual_path);
 }
 
 const struct dentry_operations sdcardfs_ci_dops = {
 	.d_revalidate	= sdcardfs_d_revalidate,
 	.d_release	= sdcardfs_d_release,
-	.d_hash 	= sdcardfs_hash_ci,
+	.d_hash	= sdcardfs_hash_ci,
 	.d_compare	= sdcardfs_cmp_ci,
 	.d_canonical_path = sdcardfs_canonical_path,
 };
diff --git a/fs/sdcardfs/derived_perm.c b/fs/sdcardfs/derived_perm.c
index 9408a54..14747a8 100644
--- a/fs/sdcardfs/derived_perm.c
+++ b/fs/sdcardfs/derived_perm.c
@@ -30,12 +30,15 @@
 	ci->userid = pi->userid;
 	ci->d_uid = pi->d_uid;
 	ci->under_android = pi->under_android;
+	ci->under_cache = pi->under_cache;
+	ci->under_obb = pi->under_obb;
 	set_top(ci, pi->top);
 }
 
 /* helper function for derived state */
 void setup_derived_state(struct inode *inode, perm_t perm, userid_t userid,
-                        uid_t uid, bool under_android, struct inode *top)
+						uid_t uid, bool under_android,
+						struct inode *top)
 {
 	struct sdcardfs_inode_info *info = SDCARDFS_I(inode);
 
@@ -43,145 +46,276 @@
 	info->userid = userid;
 	info->d_uid = uid;
 	info->under_android = under_android;
+	info->under_cache = false;
+	info->under_obb = false;
 	set_top(info, top);
 }
 
-/* While renaming, there is a point where we want the path from dentry, but the name from newdentry */
-void get_derived_permission_new(struct dentry *parent, struct dentry *dentry, struct dentry *newdentry)
+/* While renaming, there is a point where we want the path from dentry,
+ * but the name from newdentry
+ */
+void get_derived_permission_new(struct dentry *parent, struct dentry *dentry,
+				const struct qstr *name)
 {
 	struct sdcardfs_inode_info *info = SDCARDFS_I(d_inode(dentry));
-	struct sdcardfs_inode_info *parent_info= SDCARDFS_I(d_inode(parent));
+	struct sdcardfs_inode_info *parent_info = SDCARDFS_I(d_inode(parent));
 	appid_t appid;
+	unsigned long user_num;
+	int err;
+	struct qstr q_Android = QSTR_LITERAL("Android");
+	struct qstr q_data = QSTR_LITERAL("data");
+	struct qstr q_obb = QSTR_LITERAL("obb");
+	struct qstr q_media = QSTR_LITERAL("media");
+	struct qstr q_cache = QSTR_LITERAL("cache");
 
 	/* By default, each inode inherits from its parent.
 	 * the properties are maintained on its private fields
 	 * because the inode attributes will be modified with that of
 	 * its lower inode.
-	 * The derived state will be updated on the last
-	 * stage of each system call by fix_derived_permission(inode).
+	 * These values are used by our custom permission call instead
+	 * of using the inode permissions.
 	 */
 
 	inherit_derived_state(d_inode(parent), d_inode(dentry));
 
+	/* Files don't get special labels */
+	if (!S_ISDIR(d_inode(dentry)->i_mode))
+		return;
 	/* Derive custom permissions based on parent and current node */
 	switch (parent_info->perm) {
-		case PERM_INHERIT:
-			/* Already inherited above */
-			break;
-		case PERM_PRE_ROOT:
-			/* Legacy internal layout places users at top level */
-			info->perm = PERM_ROOT;
-			info->userid = simple_strtoul(newdentry->d_name.name, NULL, 10);
+	case PERM_INHERIT:
+	case PERM_ANDROID_PACKAGE_CACHE:
+		/* Already inherited above */
+		break;
+	case PERM_PRE_ROOT:
+		/* Legacy internal layout places users at top level */
+		info->perm = PERM_ROOT;
+		err = kstrtoul(name->name, 10, &user_num);
+		if (err)
+			info->userid = 0;
+		else
+			info->userid = user_num;
+		set_top(info, &info->vfs_inode);
+		break;
+	case PERM_ROOT:
+		/* Assume masked off by default. */
+		if (qstr_case_eq(name, &q_Android)) {
+			/* App-specific directories inside; let anyone traverse */
+			info->perm = PERM_ANDROID;
+			info->under_android = true;
 			set_top(info, &info->vfs_inode);
-			break;
-		case PERM_ROOT:
-			/* Assume masked off by default. */
-			if (!strcasecmp(newdentry->d_name.name, "Android")) {
-				/* App-specific directories inside; let anyone traverse */
-				info->perm = PERM_ANDROID;
-				info->under_android = true;
-				set_top(info, &info->vfs_inode);
-			}
-			break;
-		case PERM_ANDROID:
-			if (!strcasecmp(newdentry->d_name.name, "data")) {
-				/* App-specific directories inside; let anyone traverse */
-				info->perm = PERM_ANDROID_DATA;
-				set_top(info, &info->vfs_inode);
-			} else if (!strcasecmp(newdentry->d_name.name, "obb")) {
-				/* App-specific directories inside; let anyone traverse */
-				info->perm = PERM_ANDROID_OBB;
-				set_top(info, &info->vfs_inode);
-				/* Single OBB directory is always shared */
-			} else if (!strcasecmp(newdentry->d_name.name, "media")) {
-				/* App-specific directories inside; let anyone traverse */
-				info->perm = PERM_ANDROID_MEDIA;
-				set_top(info, &info->vfs_inode);
-			}
-			break;
-		case PERM_ANDROID_DATA:
-		case PERM_ANDROID_OBB:
-		case PERM_ANDROID_MEDIA:
-			appid = get_appid(newdentry->d_name.name);
-			if (appid != 0) {
-				info->d_uid = multiuser_get_uid(parent_info->userid, appid);
-			}
+		}
+		break;
+	case PERM_ANDROID:
+		if (qstr_case_eq(name, &q_data)) {
+			/* App-specific directories inside; let anyone traverse */
+			info->perm = PERM_ANDROID_DATA;
 			set_top(info, &info->vfs_inode);
-			break;
+		} else if (qstr_case_eq(name, &q_obb)) {
+			/* App-specific directories inside; let anyone traverse */
+			info->perm = PERM_ANDROID_OBB;
+			info->under_obb = true;
+			set_top(info, &info->vfs_inode);
+			/* Single OBB directory is always shared */
+		} else if (qstr_case_eq(name, &q_media)) {
+			/* App-specific directories inside; let anyone traverse */
+			info->perm = PERM_ANDROID_MEDIA;
+			set_top(info, &info->vfs_inode);
+		}
+		break;
+	case PERM_ANDROID_OBB:
+	case PERM_ANDROID_DATA:
+	case PERM_ANDROID_MEDIA:
+		info->perm = PERM_ANDROID_PACKAGE;
+		appid = get_appid(name->name);
+		if (appid != 0 && !is_excluded(name->name, parent_info->userid))
+			info->d_uid = multiuser_get_uid(parent_info->userid, appid);
+		set_top(info, &info->vfs_inode);
+		break;
+	case PERM_ANDROID_PACKAGE:
+		if (qstr_case_eq(name, &q_cache)) {
+			info->perm = PERM_ANDROID_PACKAGE_CACHE;
+			info->under_cache = true;
+		}
+		break;
 	}
 }
 
 void get_derived_permission(struct dentry *parent, struct dentry *dentry)
 {
-	get_derived_permission_new(parent, dentry, dentry);
+	get_derived_permission_new(parent, dentry, &dentry->d_name);
 }
 
-static int descendant_may_need_fixup(perm_t perm) {
-	if (perm == PERM_PRE_ROOT || perm == PERM_ROOT || perm == PERM_ANDROID)
+static appid_t get_type(const char *name)
+{
+	const char *ext = strrchr(name, '.');
+	appid_t id;
+
+	if (ext && ext[0]) {
+		ext = &ext[1];
+		id = get_ext_gid(ext);
+		return id?:AID_MEDIA_RW;
+	}
+	return AID_MEDIA_RW;
+}
+
+void fixup_lower_ownership(struct dentry *dentry, const char *name)
+{
+	struct path path;
+	struct inode *inode;
+	struct inode *delegated_inode = NULL;
+	int error;
+	struct sdcardfs_inode_info *info;
+	struct sdcardfs_inode_info *info_top;
+	perm_t perm;
+	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+	uid_t uid = sbi->options.fs_low_uid;
+	gid_t gid = sbi->options.fs_low_gid;
+	struct iattr newattrs;
+
+	info = SDCARDFS_I(d_inode(dentry));
+	perm = info->perm;
+	if (info->under_obb) {
+		perm = PERM_ANDROID_OBB;
+	} else if (info->under_cache) {
+		perm = PERM_ANDROID_PACKAGE_CACHE;
+	} else if (perm == PERM_INHERIT) {
+		info_top = SDCARDFS_I(grab_top(info));
+		perm = info_top->perm;
+		release_top(info);
+	}
+
+	switch (perm) {
+	case PERM_ROOT:
+	case PERM_ANDROID:
+	case PERM_ANDROID_DATA:
+	case PERM_ANDROID_MEDIA:
+	case PERM_ANDROID_PACKAGE:
+	case PERM_ANDROID_PACKAGE_CACHE:
+		uid = multiuser_get_uid(info->userid, uid);
+		break;
+	case PERM_ANDROID_OBB:
+		uid = AID_MEDIA_OBB;
+		break;
+	case PERM_PRE_ROOT:
+	default:
+		break;
+	}
+	switch (perm) {
+	case PERM_ROOT:
+	case PERM_ANDROID:
+	case PERM_ANDROID_DATA:
+	case PERM_ANDROID_MEDIA:
+		if (S_ISDIR(d_inode(dentry)->i_mode))
+			gid = multiuser_get_uid(info->userid, AID_MEDIA_RW);
+		else
+			gid = multiuser_get_uid(info->userid, get_type(name));
+		break;
+	case PERM_ANDROID_OBB:
+		gid = AID_MEDIA_OBB;
+		break;
+	case PERM_ANDROID_PACKAGE:
+		if (info->d_uid != 0)
+			gid = multiuser_get_ext_gid(info->d_uid);
+		else
+			gid = multiuser_get_uid(info->userid, uid);
+		break;
+	case PERM_ANDROID_PACKAGE_CACHE:
+		if (info->d_uid != 0)
+			gid = multiuser_get_cache_gid(info->d_uid);
+		else
+			gid = multiuser_get_uid(info->userid, uid);
+		break;
+	case PERM_PRE_ROOT:
+	default:
+		break;
+	}
+
+	sdcardfs_get_lower_path(dentry, &path);
+	inode = d_inode(path.dentry);
+	if (d_inode(path.dentry)->i_gid.val != gid || d_inode(path.dentry)->i_uid.val != uid) {
+retry_deleg:
+		newattrs.ia_valid = ATTR_GID | ATTR_UID | ATTR_FORCE;
+		newattrs.ia_uid = make_kuid(current_user_ns(), uid);
+		newattrs.ia_gid = make_kgid(current_user_ns(), gid);
+		if (!S_ISDIR(inode->i_mode))
+			newattrs.ia_valid |=
+				ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
+		inode_lock(inode);
+		error = security_path_chown(&path, newattrs.ia_uid, newattrs.ia_gid);
+		if (!error)
+			error = notify_change2(path.mnt, path.dentry, &newattrs, &delegated_inode);
+		inode_unlock(inode);
+		if (delegated_inode) {
+			error = break_deleg_wait(&delegated_inode);
+			if (!error)
+				goto retry_deleg;
+		}
+		if (error)
+			pr_err("sdcardfs: Failed to touch up lower fs gid/uid.\n");
+	}
+	sdcardfs_put_lower_path(dentry, &path);
+}
+
+static int descendant_may_need_fixup(struct sdcardfs_inode_info *info, struct limit_search *limit)
+{
+	if (info->perm == PERM_ROOT)
+		return (limit->flags & BY_USERID)?info->userid == limit->userid:1;
+	if (info->perm == PERM_PRE_ROOT || info->perm == PERM_ANDROID)
 		return 1;
 	return 0;
 }
 
-static int needs_fixup(perm_t perm) {
+static int needs_fixup(perm_t perm)
+{
 	if (perm == PERM_ANDROID_DATA || perm == PERM_ANDROID_OBB
 			|| perm == PERM_ANDROID_MEDIA)
 		return 1;
 	return 0;
 }
 
-void fixup_perms_recursive(struct dentry *dentry, const char* name, size_t len) {
+static void __fixup_perms_recursive(struct dentry *dentry, struct limit_search *limit, int depth)
+{
 	struct dentry *child;
 	struct sdcardfs_inode_info *info;
-	if (!dget(dentry))
-		return;
+
+	/*
+	 * All paths will terminate their recursion on hitting PERM_ANDROID_OBB,
+	 * PERM_ANDROID_MEDIA, or PERM_ANDROID_DATA. This happens at a depth of
+	 * at most 3.
+	 */
+	WARN(depth > 3, "%s: Max expected depth exceeded!\n", __func__);
+	spin_lock_nested(&dentry->d_lock, depth);
 	if (!d_inode(dentry)) {
-		dput(dentry);
+		spin_unlock(&dentry->d_lock);
 		return;
 	}
 	info = SDCARDFS_I(d_inode(dentry));
 
 	if (needs_fixup(info->perm)) {
-		spin_lock(&dentry->d_lock);
 		list_for_each_entry(child, &dentry->d_subdirs, d_child) {
-				dget(child);
-				if (!strncasecmp(child->d_name.name, name, len)) {
-					if (child->d_inode) {
-						get_derived_permission(dentry, child);
-						fixup_tmp_permissions(child->d_inode);
-						dput(child);
-						break;
-					}
+			spin_lock_nested(&child->d_lock, depth + 1);
+			if (!(limit->flags & BY_NAME) || qstr_case_eq(&child->d_name, &limit->name)) {
+				if (d_inode(child)) {
+					get_derived_permission(dentry, child);
+					fixup_tmp_permissions(d_inode(child));
+					spin_unlock(&child->d_lock);
+					break;
 				}
-				dput(child);
+			}
+			spin_unlock(&child->d_lock);
 		}
-		spin_unlock(&dentry->d_lock);
-	} else 	if (descendant_may_need_fixup(info->perm)) {
-		spin_lock(&dentry->d_lock);
+	} else if (descendant_may_need_fixup(info, limit)) {
 		list_for_each_entry(child, &dentry->d_subdirs, d_child) {
-				fixup_perms_recursive(child, name, len);
+			__fixup_perms_recursive(child, limit, depth + 1);
 		}
-		spin_unlock(&dentry->d_lock);
 	}
-	dput(dentry);
+	spin_unlock(&dentry->d_lock);
 }
 
-void fixup_top_recursive(struct dentry *parent) {
-	struct dentry *dentry;
-	struct sdcardfs_inode_info *info;
-	if (!d_inode(parent))
-		return;
-	info = SDCARDFS_I(d_inode(parent));
-	spin_lock(&parent->d_lock);
-	list_for_each_entry(dentry, &parent->d_subdirs, d_child) {
-		if (d_inode(dentry)) {
-			if (SDCARDFS_I(d_inode(parent))->top != SDCARDFS_I(d_inode(dentry))->top) {
-				get_derived_permission(parent, dentry);
-				fixup_tmp_permissions(d_inode(dentry));
-				fixup_top_recursive(dentry);
-			}
-		}
-	}
-	spin_unlock(&parent->d_lock);
+void fixup_perms_recursive(struct dentry *dentry, struct limit_search *limit)
+{
+	__fixup_perms_recursive(dentry, limit, 0);
 }
 
 /* main function for updating derived permission */
@@ -189,19 +323,17 @@
 {
 	struct dentry *parent;
 
-	if(!dentry || !d_inode(dentry)) {
-		printk(KERN_ERR "sdcardfs: %s: invalid dentry\n", __func__);
+	if (!dentry || !d_inode(dentry)) {
+		pr_err("sdcardfs: %s: invalid dentry\n", __func__);
 		return;
 	}
 	/* FIXME:
 	 * 1. need to check whether the dentry is updated or not
 	 * 2. remove the root dentry update
 	 */
-	if(IS_ROOT(dentry)) {
-		//setup_default_pre_root_state(d_inode(dentry));
-	} else {
+	if (!IS_ROOT(dentry)) {
 		parent = dget_parent(dentry);
-		if(parent) {
+		if (parent) {
 			get_derived_permission(parent, dentry);
 			dput(parent);
 		}
@@ -213,14 +345,15 @@
 {
 	int ret = 0;
 	struct dentry *parent = dget_parent(dentry);
-	struct sdcardfs_inode_info *parent_info= SDCARDFS_I(d_inode(parent));
+	struct sdcardfs_inode_info *parent_info = SDCARDFS_I(d_inode(parent));
 	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+	struct qstr obb = QSTR_LITERAL("obb");
 
-	if(parent_info->perm == PERM_ANDROID &&
-			!strcasecmp(dentry->d_name.name, "obb")) {
+	if (parent_info->perm == PERM_ANDROID &&
+			qstr_case_eq(&dentry->d_name, &obb)) {
 
 		/* /Android/obb is the base obbpath of DERIVED_UNIFIED */
-		if(!(sbi->options.multiuser == false
+		if (!(sbi->options.multiuser == false
 				&& parent_info->userid == 0)) {
 			ret = 1;
 		}
@@ -235,36 +368,40 @@
 	struct sdcardfs_dentry_info *di = SDCARDFS_D(dent);
 	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dent->d_sb);
 	char *path_buf, *obbpath_s;
+	int need_put = 0;
+	struct path lower_path;
 
 	/* check the base obbpath has been changed.
 	 * this routine can check an uninitialized obb dentry as well.
-	 * regarding the uninitialized obb, refer to the sdcardfs_mkdir() */
+	 * regarding the uninitialized obb, refer to the sdcardfs_mkdir()
+	 */
 	spin_lock(&di->lock);
-	if(di->orig_path.dentry) {
- 		if(!di->lower_path.dentry) {
+	if (di->orig_path.dentry) {
+		if (!di->lower_path.dentry) {
 			ret = 1;
 		} else {
 			path_get(&di->lower_path);
-			//lower_parent = lock_parent(lower_path->dentry);
 
 			path_buf = kmalloc(PATH_MAX, GFP_ATOMIC);
-			if(!path_buf) {
+			if (!path_buf) {
 				ret = 1;
-				printk(KERN_ERR "sdcardfs: fail to allocate path_buf in %s.\n", __func__);
+				pr_err("sdcardfs: fail to allocate path_buf in %s.\n", __func__);
 			} else {
 				obbpath_s = d_path(&di->lower_path, path_buf, PATH_MAX);
 				if (d_unhashed(di->lower_path.dentry) ||
-					strcasecmp(sbi->obbpath_s, obbpath_s)) {
+					!str_case_eq(sbi->obbpath_s, obbpath_s)) {
 					ret = 1;
 				}
 				kfree(path_buf);
 			}
 
-			//unlock_dir(lower_parent);
-			path_put(&di->lower_path);
+			pathcpy(&lower_path, &di->lower_path);
+			need_put = 1;
 		}
 	}
 	spin_unlock(&di->lock);
+	if (need_put)
+		path_put(&lower_path);
 	return ret;
 }
 
@@ -272,17 +409,18 @@
 {
 	int ret = 0;
 	struct dentry *parent = dget_parent(dentry);
-	struct sdcardfs_inode_info *parent_info= SDCARDFS_I(d_inode(parent));
+	struct sdcardfs_inode_info *parent_info = SDCARDFS_I(d_inode(parent));
 	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+	struct qstr q_obb = QSTR_LITERAL("obb");
 
 	spin_lock(&SDCARDFS_D(dentry)->lock);
 	if (sbi->options.multiuser) {
-		if(parent_info->perm == PERM_PRE_ROOT &&
-				!strcasecmp(dentry->d_name.name, "obb")) {
+		if (parent_info->perm == PERM_PRE_ROOT &&
+				qstr_case_eq(&dentry->d_name, &q_obb)) {
 			ret = 1;
 		}
 	} else  if (parent_info->perm == PERM_ANDROID &&
-			!strcasecmp(dentry->d_name.name, "obb")) {
+			qstr_case_eq(&dentry->d_name, &q_obb)) {
 		ret = 1;
 	}
 	spin_unlock(&SDCARDFS_D(dentry)->lock);
@@ -292,7 +430,8 @@
 /* The lower_path will be stored to the dentry's orig_path
  * and the base obbpath will be copyed to the lower_path variable.
  * if an error returned, there's no change in the lower_path
- * returns: -ERRNO if error (0: no error) */
+ * returns: -ERRNO if error (0: no error)
+ */
 int setup_obb_dentry(struct dentry *dentry, struct path *lower_path)
 {
 	int err = 0;
@@ -301,23 +440,24 @@
 
 	/* A local obb dentry must have its own orig_path to support rmdir
 	 * and mkdir of itself. Usually, we expect that the sbi->obbpath
-	 * is avaiable on this stage. */
+	 * is avaiable on this stage.
+	 */
 	sdcardfs_set_orig_path(dentry, lower_path);
 
 	err = kern_path(sbi->obbpath_s,
 			LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &obbpath);
 
-	if(!err) {
+	if (!err) {
 		/* the obbpath base has been found */
-		printk(KERN_INFO "sdcardfs: the sbi->obbpath is found\n");
 		pathcpy(lower_path, &obbpath);
 	} else {
 		/* if the sbi->obbpath is not available, we can optionally
 		 * setup the lower_path with its orig_path.
 		 * but, the current implementation just returns an error
 		 * because the sdcard daemon also regards this case as
-		 * a lookup fail. */
-		printk(KERN_INFO "sdcardfs: the sbi->obbpath is not available\n");
+		 * a lookup fail.
+		 */
+		pr_info("sdcardfs: the sbi->obbpath is not available\n");
 	}
 	return err;
 }
diff --git a/fs/sdcardfs/file.c b/fs/sdcardfs/file.c
index 7750a04..eee4eb5 100644
--- a/fs/sdcardfs/file.c
+++ b/fs/sdcardfs/file.c
@@ -65,7 +65,7 @@
 
 	/* check disk space */
 	if (!check_min_free_space(dentry, count, 0)) {
-		printk(KERN_INFO "No minimum free space.\n");
+		pr_err("No minimum free space.\n");
 		return -ENOSPC;
 	}
 
@@ -160,8 +160,7 @@
 	lower_file = sdcardfs_lower_file(file);
 	if (willwrite && !lower_file->f_mapping->a_ops->writepage) {
 		err = -EINVAL;
-		printk(KERN_ERR "sdcardfs: lower file system does not "
-		       "support writeable mmap\n");
+		pr_err("sdcardfs: lower file system does not support writeable mmap\n");
 		goto out;
 	}
 
@@ -173,14 +172,14 @@
 	if (!SDCARDFS_F(file)->lower_vm_ops) {
 		err = lower_file->f_op->mmap(lower_file, vma);
 		if (err) {
-			printk(KERN_ERR "sdcardfs: lower mmap failed %d\n", err);
+			pr_err("sdcardfs: lower mmap failed %d\n", err);
 			goto out;
 		}
 		saved_vm_ops = vma->vm_ops; /* save: came from lower ->mmap */
 		err = do_munmap(current->mm, vma->vm_start,
 				vma->vm_end - vma->vm_start);
 		if (err) {
-			printk(KERN_ERR "sdcardfs: do_munmap failed %d\n", err);
+			pr_err("sdcardfs: do_munmap failed %d\n", err);
 			goto out;
 		}
 	}
@@ -216,16 +215,13 @@
 		goto out_err;
 	}
 
-	if(!check_caller_access_to_name(d_inode(parent), dentry->d_name.name)) {
-		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
-                         "	dentry: %s, task:%s\n",
-						 __func__, dentry->d_name.name, current->comm);
+	if (!check_caller_access_to_name(d_inode(parent), &dentry->d_name)) {
 		err = -EACCES;
 		goto out_err;
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(sbi, saved_cred);
+	OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(inode));
 
 	file->private_data =
 		kzalloc(sizeof(struct sdcardfs_file_info), GFP_KERNEL);
@@ -251,9 +247,8 @@
 
 	if (err)
 		kfree(SDCARDFS_F(file));
-	else {
+	else
 		sdcardfs_copy_and_fix_attrs(inode, sdcardfs_lower_inode(inode));
-	}
 
 out_revert_cred:
 	REVERT_CRED(saved_cred);
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
index 5b31170..92afceb 100644
--- a/fs/sdcardfs/inode.c
+++ b/fs/sdcardfs/inode.c
@@ -20,18 +20,24 @@
 
 #include "sdcardfs.h"
 #include <linux/fs_struct.h>
+#include <linux/ratelimit.h>
 
 /* Do not directly use this function. Use OVERRIDE_CRED() instead. */
-const struct cred * override_fsids(struct sdcardfs_sb_info* sbi)
+const struct cred *override_fsids(struct sdcardfs_sb_info *sbi, struct sdcardfs_inode_info *info)
 {
-	struct cred * cred;
-	const struct cred * old_cred;
+	struct cred *cred;
+	const struct cred *old_cred;
+	uid_t uid;
 
 	cred = prepare_creds();
 	if (!cred)
 		return NULL;
 
-	cred->fsuid = make_kuid(&init_user_ns, sbi->options.fs_low_uid);
+	if (info->under_obb)
+		uid = AID_MEDIA_OBB;
+	else
+		uid = multiuser_get_uid(info->userid, sbi->options.fs_low_uid);
+	cred->fsuid = make_kuid(&init_user_ns, uid);
 	cred->fsgid = make_kgid(&init_user_ns, sbi->options.fs_low_gid);
 
 	old_cred = override_creds(cred);
@@ -40,9 +46,9 @@
 }
 
 /* Do not directly use this function, use REVERT_CRED() instead. */
-void revert_fsids(const struct cred * old_cred)
+void revert_fsids(const struct cred *old_cred)
 {
-	const struct cred * cur_cred;
+	const struct cred *cur_cred;
 
 	cur_cred = current->cred;
 	revert_creds(old_cred);
@@ -61,16 +67,13 @@
 	struct fs_struct *saved_fs;
 	struct fs_struct *copied_fs;
 
-	if(!check_caller_access_to_name(dir, dentry->d_name.name)) {
-		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
-						 "  dentry: %s, task:%s\n",
-						 __func__, dentry->d_name.name, current->comm);
+	if (!check_caller_access_to_name(dir, &dentry->d_name)) {
 		err = -EACCES;
 		goto out_eacces;
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred);
+	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
 
 	sdcardfs_get_lower_path(dentry, &lower_path);
 	lower_dentry = lower_path.dentry;
@@ -98,6 +101,7 @@
 		goto out;
 	fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
 	fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
+	fixup_lower_ownership(dentry, dentry->d_name.name);
 
 out:
 	current->fs = saved_fs;
@@ -162,16 +166,13 @@
 	struct path lower_path;
 	const struct cred *saved_cred = NULL;
 
-	if(!check_caller_access_to_name(dir, dentry->d_name.name)) {
-		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
-						 "  dentry: %s, task:%s\n",
-						 __func__, dentry->d_name.name, current->comm);
+	if (!check_caller_access_to_name(dir, &dentry->d_name)) {
 		err = -EACCES;
 		goto out_eacces;
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred);
+	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
 
 	sdcardfs_get_lower_path(dentry, &lower_path);
 	lower_dentry = lower_path.dentry;
@@ -239,14 +240,15 @@
 }
 #endif
 
-static int touch(char *abs_path, mode_t mode) {
+static int touch(char *abs_path, mode_t mode)
+{
 	struct file *filp = filp_open(abs_path, O_RDWR|O_CREAT|O_EXCL|O_NOFOLLOW, mode);
+
 	if (IS_ERR(filp)) {
 		if (PTR_ERR(filp) == -EEXIST) {
 			return 0;
-		}
-		else {
-			printk(KERN_ERR "sdcardfs: failed to open(%s): %ld\n",
+		} else {
+			pr_err("sdcardfs: failed to open(%s): %ld\n",
 						abs_path, PTR_ERR(filp));
 			return PTR_ERR(filp);
 		}
@@ -269,21 +271,20 @@
 	int touch_err = 0;
 	struct fs_struct *saved_fs;
 	struct fs_struct *copied_fs;
+	struct qstr q_obb = QSTR_LITERAL("obb");
+	struct qstr q_data = QSTR_LITERAL("data");
 
-	if(!check_caller_access_to_name(dir, dentry->d_name.name)) {
-		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
-						 "  dentry: %s, task:%s\n",
-						 __func__, dentry->d_name.name, current->comm);
+	if (!check_caller_access_to_name(dir, &dentry->d_name)) {
 		err = -EACCES;
 		goto out_eacces;
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred);
+	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
 
 	/* check disk space */
 	if (!check_min_free_space(dentry, 0, 1)) {
-		printk(KERN_INFO "sdcardfs: No minimum free space.\n");
+		pr_err("sdcardfs: No minimum free space.\n");
 		err = -ENOSPC;
 		goto out_revert;
 	}
@@ -315,19 +316,21 @@
 	}
 
 	/* if it is a local obb dentry, setup it with the base obbpath */
-	if(need_graft_path(dentry)) {
+	if (need_graft_path(dentry)) {
 
 		err = setup_obb_dentry(dentry, &lower_path);
-		if(err) {
+		if (err) {
 			/* if the sbi->obbpath is not available, the lower_path won't be
 			 * changed by setup_obb_dentry() but the lower path is saved to
 			 * its orig_path. this dentry will be revalidated later.
-			 * but now, the lower_path should be NULL */
+			 * but now, the lower_path should be NULL
+			 */
 			sdcardfs_put_reset_lower_path(dentry);
 
 			/* the newly created lower path which saved to its orig_path or
 			 * the lower_path is the base obbpath.
-			 * therefore, an additional path_get is required */
+			 * therefore, an additional path_get is required
+			 */
 			path_get(&lower_path);
 		} else
 			make_nomedia_in_obb = 1;
@@ -343,20 +346,21 @@
 	fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
 	/* update number of links on parent directory */
 	set_nlink(dir, sdcardfs_lower_inode(dir)->i_nlink);
-
+	fixup_lower_ownership(dentry, dentry->d_name.name);
 	unlock_dir(lower_parent_dentry);
-
-	if ((!sbi->options.multiuser) && (!strcasecmp(dentry->d_name.name, "obb"))
+	if ((!sbi->options.multiuser) && (qstr_case_eq(&dentry->d_name, &q_obb))
 		&& (pi->perm == PERM_ANDROID) && (pi->userid == 0))
 		make_nomedia_in_obb = 1;
 
 	/* When creating /Android/data and /Android/obb, mark them as .nomedia */
 	if (make_nomedia_in_obb ||
-		((pi->perm == PERM_ANDROID) && (!strcasecmp(dentry->d_name.name, "data")))) {
+		((pi->perm == PERM_ANDROID) && (qstr_case_eq(&dentry->d_name, &q_data)))) {
+		REVERT_CRED(saved_cred);
+		OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(d_inode(dentry)));
 		set_fs_pwd(current->fs, &lower_path);
 		touch_err = touch(".nomedia", 0664);
 		if (touch_err) {
-			printk(KERN_ERR "sdcardfs: failed to create .nomedia in %s: %d\n",
+			pr_err("sdcardfs: failed to create .nomedia in %s: %d\n",
 							lower_path.dentry->d_name.name, touch_err);
 			goto out;
 		}
@@ -381,19 +385,17 @@
 	struct path lower_path;
 	const struct cred *saved_cred = NULL;
 
-	if(!check_caller_access_to_name(dir, dentry->d_name.name)) {
-		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
-						 "  dentry: %s, task:%s\n",
-						 __func__, dentry->d_name.name, current->comm);
+	if (!check_caller_access_to_name(dir, &dentry->d_name)) {
 		err = -EACCES;
 		goto out_eacces;
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred);
+	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
 
 	/* sdcardfs_get_real_lower(): in case of remove an user's obb dentry
-	 * the dentry on the original path should be deleted. */
+	 * the dentry on the original path should be deleted.
+	 */
 	sdcardfs_get_real_lower(dentry, &lower_path);
 
 	lower_dentry = lower_path.dentry;
@@ -467,24 +469,20 @@
 	struct dentry *lower_new_dir_dentry = NULL;
 	struct vfsmount *lower_mnt = NULL;
 	struct dentry *trap = NULL;
-	struct dentry *new_parent = NULL;
 	struct path lower_old_path, lower_new_path;
 	const struct cred *saved_cred = NULL;
 
 	if (flags)
 		return -EINVAL;
 
-	if(!check_caller_access_to_name(old_dir, old_dentry->d_name.name) ||
-		!check_caller_access_to_name(new_dir, new_dentry->d_name.name)) {
-		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
-						 "  new_dentry: %s, task:%s\n",
-						 __func__, new_dentry->d_name.name, current->comm);
+	if (!check_caller_access_to_name(old_dir, &old_dentry->d_name) ||
+		!check_caller_access_to_name(new_dir, &new_dentry->d_name)) {
 		err = -EACCES;
 		goto out_eacces;
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(old_dir->i_sb), saved_cred);
+	OVERRIDE_CRED(SDCARDFS_SB(old_dir->i_sb), saved_cred, SDCARDFS_I(new_dir));
 
 	sdcardfs_get_real_lower(old_dentry, &lower_old_path);
 	sdcardfs_get_lower_path(new_dentry, &lower_new_path);
@@ -520,23 +518,11 @@
 	if (new_dir != old_dir) {
 		sdcardfs_copy_and_fix_attrs(old_dir, d_inode(lower_old_dir_dentry));
 		fsstack_copy_inode_size(old_dir, d_inode(lower_old_dir_dentry));
-
-		/* update the derived permission of the old_dentry
-		 * with its new parent
-		 */
-		new_parent = dget_parent(new_dentry);
-		if(new_parent) {
-			if(d_inode(old_dentry)) {
-				update_derived_permission_lock(old_dentry);
-			}
-			dput(new_parent);
-		}
 	}
-	/* At this point, not all dentry information has been moved, so
-	 * we pass along new_dentry for the name.*/
-	get_derived_permission_new(new_dentry->d_parent, old_dentry, new_dentry);
+	get_derived_permission_new(new_dentry->d_parent, old_dentry, &new_dentry->d_name);
 	fixup_tmp_permissions(d_inode(old_dentry));
-	fixup_top_recursive(old_dentry);
+	fixup_lower_ownership(old_dentry, new_dentry->d_name.name);
+	d_invalidate(old_dentry); /* Can't fixup ownership recursively :( */
 out:
 	unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
 	dput(lower_old_dir_dentry);
@@ -607,7 +593,7 @@
 
 static int sdcardfs_permission_wrn(struct inode *inode, int mask)
 {
-	WARN(1, "sdcardfs does not support permission. Use permission2.\n");
+	WARN_RATELIMIT(1, "sdcardfs does not support permission. Use permission2.\n");
 	return -EINVAL;
 }
 
@@ -660,7 +646,7 @@
 	release_top(SDCARDFS_I(inode));
 	tmp.i_sb = inode->i_sb;
 	if (IS_POSIXACL(inode))
-		printk(KERN_WARNING "%s: This may be undefined behavior... \n", __func__);
+		pr_warn("%s: This may be undefined behavior...\n", __func__);
 	err = generic_permission(&tmp, mask);
 	/* XXX
 	 * Original sdcardfs code calls inode_permission(lower_inode,.. )
@@ -678,6 +664,7 @@
 		 * we check it with AID_MEDIA_RW permission
 		 */
 		struct inode *lower_inode;
+
 		OVERRIDE_CRED(SDCARDFS_SB(inode->sb));
 
 		lower_inode = sdcardfs_lower_inode(inode);
@@ -692,7 +679,7 @@
 
 static int sdcardfs_setattr_wrn(struct dentry *dentry, struct iattr *ia)
 {
-	WARN(1, "sdcardfs does not support setattr. User setattr2.\n");
+	WARN_RATELIMIT(1, "sdcardfs does not support setattr. User setattr2.\n");
 	return -EINVAL;
 }
 
@@ -745,17 +732,19 @@
 	 * this user can change the lower inode: that should happen when
 	 * calling notify_change on the lower inode.
 	 */
+	/* prepare our own lower struct iattr (with the lower file) */
+	memcpy(&lower_ia, ia, sizeof(lower_ia));
+	/* Allow touch updating timestamps. A previous permission check ensures
+	 * we have write access. Changes to mode, owner, and group are ignored
+	 */
+	ia->ia_valid |= ATTR_FORCE;
 	err = setattr_prepare(&tmp_d, ia);
 
 	if (!err) {
 		/* check the Android group ID */
 		parent = dget_parent(dentry);
-		if(!check_caller_access_to_name(d_inode(parent), dentry->d_name.name)) {
-			printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
-							 "  dentry: %s, task:%s\n",
-							 __func__, dentry->d_name.name, current->comm);
+		if (!check_caller_access_to_name(d_inode(parent), &dentry->d_name))
 			err = -EACCES;
-		}
 		dput(parent);
 	}
 
@@ -763,15 +752,13 @@
 		goto out_err;
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(dentry->d_sb), saved_cred);
+	OVERRIDE_CRED(SDCARDFS_SB(dentry->d_sb), saved_cred, SDCARDFS_I(inode));
 
 	sdcardfs_get_lower_path(dentry, &lower_path);
 	lower_dentry = lower_path.dentry;
 	lower_mnt = lower_path.mnt;
 	lower_inode = sdcardfs_lower_inode(inode);
 
-	/* prepare our own lower struct iattr (with the lower file) */
-	memcpy(&lower_ia, ia, sizeof(lower_ia));
 	if (ia->ia_valid & ATTR_FILE)
 		lower_ia.ia_file = sdcardfs_lower_file(ia->ia_file);
 
@@ -835,10 +822,12 @@
 	return err;
 }
 
-static int sdcardfs_fillattr(struct vfsmount *mnt, struct inode *inode, struct kstat *stat)
+static int sdcardfs_fillattr(struct vfsmount *mnt,
+				struct inode *inode, struct kstat *stat)
 {
 	struct sdcardfs_inode_info *info = SDCARDFS_I(inode);
 	struct inode *top = grab_top(info);
+
 	if (!top)
 		return -EINVAL;
 
@@ -862,33 +851,27 @@
 static int sdcardfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
 		 struct kstat *stat)
 {
-	struct dentry *lower_dentry;
-	struct inode *inode;
-	struct inode *lower_inode;
+	struct kstat lower_stat;
 	struct path lower_path;
 	struct dentry *parent;
 	int err;
 
 	parent = dget_parent(dentry);
-	if(!check_caller_access_to_name(d_inode(parent), dentry->d_name.name)) {
-		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
-						 "  dentry: %s, task:%s\n",
-						 __func__, dentry->d_name.name, current->comm);
+	if (!check_caller_access_to_name(d_inode(parent), &dentry->d_name)) {
 		dput(parent);
 		return -EACCES;
 	}
 	dput(parent);
 
-	inode = d_inode(dentry);
-
 	sdcardfs_get_lower_path(dentry, &lower_path);
-	lower_dentry = lower_path.dentry;
-	lower_inode = sdcardfs_lower_inode(inode);
-
-	sdcardfs_copy_and_fix_attrs(inode, lower_inode);
-	fsstack_copy_inode_size(inode, lower_inode);
-
-	err = sdcardfs_fillattr(mnt, inode, stat);
+	err = vfs_getattr(&lower_path, &lower_stat);
+	if (err)
+		goto out;
+	sdcardfs_copy_and_fix_attrs(d_inode(dentry),
+			      d_inode(lower_path.dentry));
+	err = sdcardfs_fillattr(mnt, d_inode(dentry), stat);
+	stat->blocks = lower_stat.blocks;
+out:
 	sdcardfs_put_lower_path(dentry, &lower_path);
 	return err;
 }
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
index d271617..f028bfd 100644
--- a/fs/sdcardfs/lookup.c
+++ b/fs/sdcardfs/lookup.c
@@ -36,8 +36,7 @@
 
 void sdcardfs_destroy_dentry_cache(void)
 {
-	if (sdcardfs_dentry_cachep)
-		kmem_cache_destroy(sdcardfs_dentry_cachep);
+	kmem_cache_destroy(sdcardfs_dentry_cachep);
 }
 
 void free_dentry_private_data(struct dentry *dentry)
@@ -73,6 +72,7 @@
 {
 	struct inode *current_lower_inode = sdcardfs_lower_inode(inode);
 	userid_t current_userid = SDCARDFS_I(inode)->userid;
+
 	if (current_lower_inode == ((struct inode_data *)candidate_data)->lower_inode &&
 			current_userid == ((struct inode_data *)candidate_data)->id)
 		return 1; /* found a match */
@@ -102,7 +102,7 @@
 			      * instead.
 			      */
 			     lower_inode->i_ino, /* hashval */
-			     sdcardfs_inode_test,	/* inode comparison function */
+			     sdcardfs_inode_test, /* inode comparison function */
 			     sdcardfs_inode_set, /* inode init function */
 			     &data); /* data passed to test+set fxns */
 	if (!inode) {
@@ -206,6 +206,28 @@
 	return err;
 }
 
+struct sdcardfs_name_data {
+	struct dir_context ctx;
+	const struct qstr *to_find;
+	char *name;
+	bool found;
+};
+
+static int sdcardfs_name_match(struct dir_context *ctx, const char *name,
+		int namelen, loff_t offset, u64 ino, unsigned int d_type)
+{
+	struct sdcardfs_name_data *buf = container_of(ctx, struct sdcardfs_name_data, ctx);
+	struct qstr candidate = QSTR_INIT(name, namelen);
+
+	if (qstr_case_eq(buf->to_find, &candidate)) {
+		memcpy(buf->name, name, namelen);
+		buf->name[namelen] = 0;
+		buf->found = true;
+		return 1;
+	}
+	return 0;
+}
+
 /*
  * Main driver function for sdcardfs's lookup.
  *
@@ -219,9 +241,9 @@
 	struct vfsmount *lower_dir_mnt;
 	struct dentry *lower_dir_dentry = NULL;
 	struct dentry *lower_dentry;
-	const char *name;
+	const struct qstr *name;
 	struct path lower_path;
-	struct qstr this;
+	struct qstr dname;
 	struct sdcardfs_sb_info *sbi;
 
 	sbi = SDCARDFS_SB(dentry->d_sb);
@@ -231,62 +253,77 @@
 	if (IS_ROOT(dentry))
 		goto out;
 
-	name = dentry->d_name.name;
+	name = &dentry->d_name;
 
 	/* now start the actual lookup procedure */
 	lower_dir_dentry = lower_parent_path->dentry;
 	lower_dir_mnt = lower_parent_path->mnt;
 
 	/* Use vfs_path_lookup to check if the dentry exists or not */
-	err = vfs_path_lookup(lower_dir_dentry, lower_dir_mnt, name, 0,
+	err = vfs_path_lookup(lower_dir_dentry, lower_dir_mnt, name->name, 0,
 				&lower_path);
 	/* check for other cases */
 	if (err == -ENOENT) {
-		struct dentry *child;
-		struct dentry *match = NULL;
-		inode_lock(d_inode(lower_dir_dentry));
-		spin_lock(&lower_dir_dentry->d_lock);
-		list_for_each_entry(child, &lower_dir_dentry->d_subdirs, d_child) {
-			if (child && d_inode(child)) {
-				if (strcasecmp(child->d_name.name, name)==0) {
-					match = dget(child);
-					break;
-				}
-			}
+		struct file *file;
+		const struct cred *cred = current_cred();
+
+		struct sdcardfs_name_data buffer = {
+			.ctx.actor = sdcardfs_name_match,
+			.to_find = name,
+			.name = __getname(),
+			.found = false,
+		};
+
+		if (!buffer.name) {
+			err = -ENOMEM;
+			goto out;
 		}
-		spin_unlock(&lower_dir_dentry->d_lock);
-		inode_unlock(d_inode(lower_dir_dentry));
-		if (match) {
+		file = dentry_open(lower_parent_path, O_RDONLY, cred);
+		if (IS_ERR(file)) {
+			err = PTR_ERR(file);
+			goto put_name;
+		}
+		err = iterate_dir(file, &buffer.ctx);
+		fput(file);
+		if (err)
+			goto put_name;
+
+		if (buffer.found)
 			err = vfs_path_lookup(lower_dir_dentry,
 						lower_dir_mnt,
-						match->d_name.name, 0,
+						buffer.name, 0,
 						&lower_path);
-			dput(match);
-		}
+		else
+			err = -ENOENT;
+put_name:
+		__putname(buffer.name);
 	}
 
 	/* no error: handle positive dentries */
 	if (!err) {
 		/* check if the dentry is an obb dentry
 		 * if true, the lower_inode must be replaced with
-		 * the inode of the graft path */
+		 * the inode of the graft path
+		 */
 
-		if(need_graft_path(dentry)) {
+		if (need_graft_path(dentry)) {
 
 			/* setup_obb_dentry()
- 			 * The lower_path will be stored to the dentry's orig_path
+			 * The lower_path will be stored to the dentry's orig_path
 			 * and the base obbpath will be copyed to the lower_path variable.
 			 * if an error returned, there's no change in the lower_path
-			 * 		returns: -ERRNO if error (0: no error) */
+			 * returns: -ERRNO if error (0: no error)
+			 */
 			err = setup_obb_dentry(dentry, &lower_path);
 
-			if(err) {
+			if (err) {
 				/* if the sbi->obbpath is not available, we can optionally
 				 * setup the lower_path with its orig_path.
 				 * but, the current implementation just returns an error
 				 * because the sdcard daemon also regards this case as
-				 * a lookup fail. */
-				printk(KERN_INFO "sdcardfs: base obbpath is not available\n");
+				 * a lookup fail.
+				 */
+				pr_info("sdcardfs: base obbpath is not available\n");
 				sdcardfs_put_reset_orig_path(dentry);
 				goto out;
 			}
@@ -307,14 +344,14 @@
 		goto out;
 
 	/* instatiate a new negative dentry */
-	this.name = name;
-	this.len = strlen(name);
-	this.hash = full_name_hash(dentry, this.name, this.len);
-	lower_dentry = d_lookup(lower_dir_dentry, &this);
+	dname.name = name->name;
+	dname.len = name->len;
+	dname.hash = full_name_hash(lower_dir_dentry, dname.name, dname.len);
+	lower_dentry = d_lookup(lower_dir_dentry, &dname);
 	if (lower_dentry)
 		goto setup_lower;
 
-	lower_dentry = d_alloc(lower_dir_dentry, &this);
+	lower_dentry = d_alloc(lower_dir_dentry, &dname);
 	if (!lower_dentry) {
 		err = -ENOMEM;
 		goto out;
@@ -340,9 +377,9 @@
 
 /*
  * On success:
- * 	fills dentry object appropriate values and returns NULL.
+ * fills dentry object appropriate values and returns NULL.
  * On fail (== error)
- * 	returns error ptr
+ * returns error ptr
  *
  * @dir : Parent inode.
  * @dentry : Target dentry to lookup. we should set each of fields.
@@ -359,16 +396,13 @@
 
 	parent = dget_parent(dentry);
 
-	if(!check_caller_access_to_name(d_inode(parent), dentry->d_name.name)) {
+	if (!check_caller_access_to_name(d_inode(parent), &dentry->d_name)) {
 		ret = ERR_PTR(-EACCES);
-		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
-                         "	dentry: %s, task:%s\n",
-						 __func__, dentry->d_name.name, current->comm);
 		goto out_err;
-        }
+	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED_PTR(SDCARDFS_SB(dir->i_sb), saved_cred);
+	OVERRIDE_CRED_PTR(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
 
 	sdcardfs_get_lower_path(parent, &lower_parent_path);
 
@@ -381,9 +415,7 @@
 
 	ret = __sdcardfs_lookup(dentry, flags, &lower_parent_path, SDCARDFS_I(dir)->userid);
 	if (IS_ERR(ret))
-	{
 		goto out;
-	}
 	if (ret)
 		dentry = ret;
 	if (d_inode(dentry)) {
@@ -392,6 +424,7 @@
 		/* get derived permission */
 		get_derived_permission(parent, dentry);
 		fixup_tmp_permissions(d_inode(dentry));
+		fixup_lower_ownership(dentry, dentry->d_name.name);
 	}
 	/* update parent directory's atime */
 	fsstack_copy_attr_atime(d_inode(parent),
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
index 7a8eae2..7344635 100644
--- a/fs/sdcardfs/main.c
+++ b/fs/sdcardfs/main.c
@@ -29,7 +29,7 @@
 	Opt_gid,
 	Opt_debug,
 	Opt_mask,
-	Opt_multiuser, // May need?
+	Opt_multiuser,
 	Opt_userid,
 	Opt_reserved_mb,
 	Opt_err,
@@ -72,6 +72,7 @@
 
 	while ((p = strsep(&options, ",")) != NULL) {
 		int token;
+
 		if (!*p)
 			continue;
 
@@ -116,19 +117,17 @@
 			break;
 		/* unknown option */
 		default:
-			if (!silent) {
-				printk( KERN_ERR "Unrecognized mount option \"%s\" "
-						"or missing value", p);
-			}
+			if (!silent)
+				pr_err("Unrecognized mount option \"%s\" or missing value", p);
 			return -EINVAL;
 		}
 	}
 
 	if (*debug) {
-		printk( KERN_INFO "sdcardfs : options - debug:%d\n", *debug);
-		printk( KERN_INFO "sdcardfs : options - uid:%d\n",
+		pr_info("sdcardfs : options - debug:%d\n", *debug);
+		pr_info("sdcardfs : options - uid:%d\n",
 							opts->fs_low_uid);
-		printk( KERN_INFO "sdcardfs : options - gid:%d\n",
+		pr_info("sdcardfs : options - gid:%d\n",
 							opts->fs_low_gid);
 	}
 
@@ -148,6 +147,7 @@
 
 	while ((p = strsep(&options, ",")) != NULL) {
 		int token;
+
 		if (!*p)
 			continue;
 
@@ -173,22 +173,20 @@
 		case Opt_fsuid:
 		case Opt_fsgid:
 		case Opt_reserved_mb:
-			printk( KERN_WARNING "Option \"%s\" can't be changed during remount\n", p);
+			pr_warn("Option \"%s\" can't be changed during remount\n", p);
 			break;
 		/* unknown option */
 		default:
-			if (!silent) {
-				printk( KERN_ERR "Unrecognized mount option \"%s\" "
-						"or missing value", p);
-			}
+			if (!silent)
+				pr_err("Unrecognized mount option \"%s\" or missing value", p);
 			return -EINVAL;
 		}
 	}
 
 	if (debug) {
-		printk( KERN_INFO "sdcardfs : options - debug:%d\n", debug);
-		printk( KERN_INFO "sdcardfs : options - gid:%d\n", vfsopts->gid);
-		printk( KERN_INFO "sdcardfs : options - mask:%d\n", vfsopts->mask);
+		pr_info("sdcardfs : options - debug:%d\n", debug);
+		pr_info("sdcardfs : options - gid:%d\n", vfsopts->gid);
+		pr_info("sdcardfs : options - mask:%d\n", vfsopts->mask);
 	}
 
 	return 0;
@@ -223,8 +221,8 @@
 #endif
 
 DEFINE_MUTEX(sdcardfs_super_list_lock);
-LIST_HEAD(sdcardfs_super_list);
 EXPORT_SYMBOL_GPL(sdcardfs_super_list_lock);
+LIST_HEAD(sdcardfs_super_list);
 EXPORT_SYMBOL_GPL(sdcardfs_super_list);
 
 /*
@@ -242,31 +240,30 @@
 	struct sdcardfs_vfsmount_options *mnt_opt = mnt->data;
 	struct inode *inode;
 
-	printk(KERN_INFO "sdcardfs version 2.0\n");
+	pr_info("sdcardfs version 2.0\n");
 
 	if (!dev_name) {
-		printk(KERN_ERR
-		       "sdcardfs: read_super: missing dev_name argument\n");
+		pr_err("sdcardfs: read_super: missing dev_name argument\n");
 		err = -EINVAL;
 		goto out;
 	}
 
-	printk(KERN_INFO "sdcardfs: dev_name -> %s\n", dev_name);
-	printk(KERN_INFO "sdcardfs: options -> %s\n", (char *)raw_data);
-	printk(KERN_INFO "sdcardfs: mnt -> %p\n", mnt);
+	pr_info("sdcardfs: dev_name -> %s\n", dev_name);
+	pr_info("sdcardfs: options -> %s\n", (char *)raw_data);
+	pr_info("sdcardfs: mnt -> %p\n", mnt);
 
 	/* parse lower path */
 	err = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
 			&lower_path);
 	if (err) {
-		printk(KERN_ERR	"sdcardfs: error accessing lower directory '%s'\n", dev_name);
+		pr_err("sdcardfs: error accessing lower directory '%s'\n", dev_name);
 		goto out;
 	}
 
 	/* allocate superblock private data */
 	sb->s_fs_info = kzalloc(sizeof(struct sdcardfs_sb_info), GFP_KERNEL);
 	if (!SDCARDFS_SB(sb)) {
-		printk(KERN_CRIT "sdcardfs: read_super: out of memory\n");
+		pr_crit("sdcardfs: read_super: out of memory\n");
 		err = -ENOMEM;
 		goto out_free;
 	}
@@ -275,7 +272,7 @@
 	/* parse options */
 	err = parse_options(sb, raw_data, silent, &debug, mnt_opt, &sb_info->options);
 	if (err) {
-		printk(KERN_ERR	"sdcardfs: invalid options\n");
+		pr_err("sdcardfs: invalid options\n");
 		goto out_freesbi;
 	}
 
@@ -328,14 +325,15 @@
 	/* setup permission policy */
 	sb_info->obbpath_s = kzalloc(PATH_MAX, GFP_KERNEL);
 	mutex_lock(&sdcardfs_super_list_lock);
-	if(sb_info->options.multiuser) {
-		setup_derived_state(d_inode(sb->s_root), PERM_PRE_ROOT, sb_info->options.fs_user_id, AID_ROOT, false, d_inode(sb->s_root));
+	if (sb_info->options.multiuser) {
+		setup_derived_state(d_inode(sb->s_root), PERM_PRE_ROOT,
+					sb_info->options.fs_user_id, AID_ROOT,
+					false, d_inode(sb->s_root));
 		snprintf(sb_info->obbpath_s, PATH_MAX, "%s/obb", dev_name);
-		/*err =  prepare_dir(sb_info->obbpath_s,
-					sb_info->options.fs_low_uid,
-					sb_info->options.fs_low_gid, 00755);*/
 	} else {
-		setup_derived_state(d_inode(sb->s_root), PERM_ROOT, sb_info->options.fs_user_id, AID_ROOT, false, d_inode(sb->s_root));
+		setup_derived_state(d_inode(sb->s_root), PERM_ROOT,
+					sb_info->options.fs_user_id, AID_ROOT,
+					false, d_inode(sb->s_root));
 		snprintf(sb_info->obbpath_s, PATH_MAX, "%s/Android/obb", dev_name);
 	}
 	fixup_tmp_permissions(d_inode(sb->s_root));
@@ -344,7 +342,7 @@
 	mutex_unlock(&sdcardfs_super_list_lock);
 
 	if (!silent)
-		printk(KERN_INFO "sdcardfs: mounted on top of %s type %s\n",
+		pr_info("sdcardfs: mounted on top of %s type %s\n",
 				dev_name, lower_sb->s_type->name);
 	goto out; /* all is well */
 
@@ -368,8 +366,10 @@
 
 /* A feature which supports mount_nodev() with options */
 static struct dentry *mount_nodev_with_options(struct vfsmount *mnt,
-	struct file_system_type *fs_type, int flags, const char *dev_name, void *data,
-        int (*fill_super)(struct vfsmount *, struct super_block *, const char *, void *, int))
+			struct file_system_type *fs_type, int flags,
+			const char *dev_name, void *data,
+			int (*fill_super)(struct vfsmount *, struct super_block *,
+						const char *, void *, int))
 
 {
 	int error;
@@ -401,19 +401,22 @@
 						raw_data, sdcardfs_read_super);
 }
 
-static struct dentry *sdcardfs_mount_wrn(struct file_system_type *fs_type, int flags,
-		    const char *dev_name, void *raw_data)
+static struct dentry *sdcardfs_mount_wrn(struct file_system_type *fs_type,
+		    int flags, const char *dev_name, void *raw_data)
 {
 	WARN(1, "sdcardfs does not support mount. Use mount2.\n");
 	return ERR_PTR(-EINVAL);
 }
 
-void *sdcardfs_alloc_mnt_data(void) {
+void *sdcardfs_alloc_mnt_data(void)
+{
 	return kmalloc(sizeof(struct sdcardfs_vfsmount_options), GFP_KERNEL);
 }
 
-void sdcardfs_kill_sb(struct super_block *sb) {
+void sdcardfs_kill_sb(struct super_block *sb)
+{
 	struct sdcardfs_sb_info *sbi;
+
 	if (sb->s_magic == SDCARDFS_SUPER_MAGIC) {
 		sbi = SDCARDFS_SB(sb);
 		mutex_lock(&sdcardfs_super_list_lock);
@@ -432,6 +435,7 @@
 	.kill_sb	= sdcardfs_kill_sb,
 	.fs_flags	= 0,
 };
+MODULE_ALIAS_FS(SDCARDFS_NAME);
 
 static int __init init_sdcardfs_fs(void)
 {
diff --git a/fs/sdcardfs/mmap.c b/fs/sdcardfs/mmap.c
index ac5f3de..51266f5 100644
--- a/fs/sdcardfs/mmap.c
+++ b/fs/sdcardfs/mmap.c
@@ -48,33 +48,54 @@
 	return err;
 }
 
+static int sdcardfs_page_mkwrite(struct vm_area_struct *vma,
+			       struct vm_fault *vmf)
+{
+	int err = 0;
+	struct file *file, *lower_file;
+	const struct vm_operations_struct *lower_vm_ops;
+	struct vm_area_struct lower_vma;
+
+	memcpy(&lower_vma, vma, sizeof(struct vm_area_struct));
+	file = lower_vma.vm_file;
+	lower_vm_ops = SDCARDFS_F(file)->lower_vm_ops;
+	BUG_ON(!lower_vm_ops);
+	if (!lower_vm_ops->page_mkwrite)
+		goto out;
+
+	lower_file = sdcardfs_lower_file(file);
+	/*
+	 * XXX: vm_ops->page_mkwrite may be called in parallel.
+	 * Because we have to resort to temporarily changing the
+	 * vma->vm_file to point to the lower file, a concurrent
+	 * invocation of sdcardfs_page_mkwrite could see a different
+	 * value.  In this workaround, we keep a different copy of the
+	 * vma structure in our stack, so we never expose a different
+	 * value of the vma->vm_file called to us, even temporarily.
+	 * A better fix would be to change the calling semantics of
+	 * ->page_mkwrite to take an explicit file pointer.
+	 */
+	lower_vma.vm_file = lower_file;
+	err = lower_vm_ops->page_mkwrite(&lower_vma, vmf);
+out:
+	return err;
+}
+
 static ssize_t sdcardfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	/*
-     * This function returns zero on purpose in order to support direct IO.
-	 * __dentry_open checks a_ops->direct_IO and returns EINVAL if it is null.
-     *
-	 * However, this function won't be called by certain file operations
-     * including generic fs functions.  * reads and writes are delivered to
-     * the lower file systems and the direct IOs will be handled by them.
-	 *
-     * NOTE: exceptionally, on the recent kernels (since Linux 3.8.x),
-     * swap_writepage invokes this function directly.
+	 * This function should never be called directly.  We need it
+	 * to exist, to get past a check in open_check_o_direct(),
+	 * which is called from do_last().
 	 */
-	printk(KERN_INFO "%s, operation is not supported\n", __func__);
-	return 0;
+	return -EINVAL;
 }
 
-/*
- * XXX: the default address_space_ops for sdcardfs is empty.  We cannot set
- * our inode->i_mapping->a_ops to NULL because too many code paths expect
- * the a_ops vector to be non-NULL.
- */
 const struct address_space_operations sdcardfs_aops = {
-	/* empty on purpose */
 	.direct_IO	= sdcardfs_direct_IO,
 };
 
 const struct vm_operations_struct sdcardfs_vm_ops = {
 	.fault		= sdcardfs_fault,
+	.page_mkwrite	= sdcardfs_page_mkwrite,
 };
diff --git a/fs/sdcardfs/multiuser.h b/fs/sdcardfs/multiuser.h
index 923ba10..2e89b58 100644
--- a/fs/sdcardfs/multiuser.h
+++ b/fs/sdcardfs/multiuser.h
@@ -18,20 +18,27 @@
  * General Public License.
  */
 
-#define MULTIUSER_APP_PER_USER_RANGE 100000
+#define AID_USER_OFFSET     100000 /* offset for uid ranges for each user */
+#define AID_APP_START        10000 /* first app user */
+#define AID_APP_END          19999 /* last app user */
+#define AID_CACHE_GID_START  20000 /* start of gids for apps to mark cached data */
+#define AID_EXT_GID_START    30000 /* start of gids for apps to mark external data */
+#define AID_SHARED_GID_START 50000 /* start of gids for apps in each user to share */
 
 typedef uid_t userid_t;
 typedef uid_t appid_t;
 
-static inline userid_t multiuser_get_user_id(uid_t uid) {
-    return uid / MULTIUSER_APP_PER_USER_RANGE;
+static inline uid_t multiuser_get_uid(userid_t user_id, appid_t app_id)
+{
+	return (user_id * AID_USER_OFFSET) + (app_id % AID_USER_OFFSET);
 }
 
-static inline appid_t multiuser_get_app_id(uid_t uid) {
-    return uid % MULTIUSER_APP_PER_USER_RANGE;
+static inline gid_t multiuser_get_cache_gid(uid_t uid)
+{
+	return uid - AID_APP_START + AID_CACHE_GID_START;
 }
 
-static inline uid_t multiuser_get_uid(userid_t userId, appid_t appId) {
-    return userId * MULTIUSER_APP_PER_USER_RANGE + (appId % MULTIUSER_APP_PER_USER_RANGE);
+static inline gid_t multiuser_get_ext_gid(uid_t uid)
+{
+	return uid - AID_APP_START + AID_EXT_GID_START;
 }
-
diff --git a/fs/sdcardfs/packagelist.c b/fs/sdcardfs/packagelist.c
index 03776fa..5ea6469 100644
--- a/fs/sdcardfs/packagelist.c
+++ b/fs/sdcardfs/packagelist.c
@@ -20,8 +20,10 @@
 
 #include "sdcardfs.h"
 #include <linux/hashtable.h>
+#include <linux/ctype.h>
 #include <linux/delay.h>
-
+#include <linux/radix-tree.h>
+#include <linux/dcache.h>
 
 #include <linux/init.h>
 #include <linux/module.h>
@@ -31,35 +33,51 @@
 
 struct hashtable_entry {
 	struct hlist_node hlist;
-	const char *key;
+	struct hlist_node dlist; /* for deletion cleanup */
+	struct qstr key;
 	atomic_t value;
 };
 
 static DEFINE_HASHTABLE(package_to_appid, 8);
+static DEFINE_HASHTABLE(package_to_userid, 8);
+static DEFINE_HASHTABLE(ext_to_groupid, 8);
+
 
 static struct kmem_cache *hashtable_entry_cachep;
 
-static unsigned int str_hash(const char *key) {
-	int i;
-	unsigned int h = strlen(key);
-	char *data = (char *)key;
+static unsigned int full_name_case_hash(const void *salt, const unsigned char *name, unsigned int len)
+{
+	unsigned long hash = init_name_hash(salt);
 
-	for (i = 0; i < strlen(key); i++) {
-		h = h * 31 + *data;
-		data++;
-	}
-	return h;
+	while (len--)
+		hash = partial_name_hash(tolower(*name++), hash);
+	return end_name_hash(hash);
 }
 
-appid_t get_appid(const char *app_name)
+static inline void qstr_init(struct qstr *q, const char *name)
+{
+	q->name = name;
+	q->len = strlen(q->name);
+	q->hash = full_name_case_hash(0, q->name, q->len);
+}
+
+static inline int qstr_copy(const struct qstr *src, struct qstr *dest)
+{
+	dest->name = kstrdup(src->name, GFP_KERNEL);
+	dest->hash_len = src->hash_len;
+	return !!dest->name;
+}
+
+
+static appid_t __get_appid(const struct qstr *key)
 {
 	struct hashtable_entry *hash_cur;
-	unsigned int hash = str_hash(app_name);
+	unsigned int hash = key->hash;
 	appid_t ret_id;
 
 	rcu_read_lock();
 	hash_for_each_possible_rcu(package_to_appid, hash_cur, hlist, hash) {
-		if (!strcasecmp(app_name, hash_cur->key)) {
+		if (qstr_case_eq(key, &hash_cur->key)) {
 			ret_id = atomic_read(&hash_cur->value);
 			rcu_read_unlock();
 			return ret_id;
@@ -69,53 +87,117 @@
 	return 0;
 }
 
+appid_t get_appid(const char *key)
+{
+	struct qstr q;
+
+	qstr_init(&q, key);
+	return __get_appid(&q);
+}
+
+static appid_t __get_ext_gid(const struct qstr *key)
+{
+	struct hashtable_entry *hash_cur;
+	unsigned int hash = key->hash;
+	appid_t ret_id;
+
+	rcu_read_lock();
+	hash_for_each_possible_rcu(ext_to_groupid, hash_cur, hlist, hash) {
+		if (qstr_case_eq(key, &hash_cur->key)) {
+			ret_id = atomic_read(&hash_cur->value);
+			rcu_read_unlock();
+			return ret_id;
+		}
+	}
+	rcu_read_unlock();
+	return 0;
+}
+
+appid_t get_ext_gid(const char *key)
+{
+	struct qstr q;
+
+	qstr_init(&q, key);
+	return __get_ext_gid(&q);
+}
+
+static appid_t __is_excluded(const struct qstr *app_name, userid_t user)
+{
+	struct hashtable_entry *hash_cur;
+	unsigned int hash = app_name->hash;
+
+	rcu_read_lock();
+	hash_for_each_possible_rcu(package_to_userid, hash_cur, hlist, hash) {
+		if (atomic_read(&hash_cur->value) == user &&
+				qstr_case_eq(app_name, &hash_cur->key)) {
+			rcu_read_unlock();
+			return 1;
+		}
+	}
+	rcu_read_unlock();
+	return 0;
+}
+
+appid_t is_excluded(const char *key, userid_t user)
+{
+	struct qstr q;
+	qstr_init(&q, key);
+	return __is_excluded(&q, user);
+}
+
 /* Kernel has already enforced everything we returned through
  * derive_permissions_locked(), so this is used to lock down access
- * even further, such as enforcing that apps hold sdcard_rw. */
-int check_caller_access_to_name(struct inode *parent_node, const char* name) {
+ * even further, such as enforcing that apps hold sdcard_rw.
+ */
+int check_caller_access_to_name(struct inode *parent_node, const struct qstr *name)
+{
+	struct qstr q_autorun = QSTR_LITERAL("autorun.inf");
+	struct qstr q__android_secure = QSTR_LITERAL(".android_secure");
+	struct qstr q_android_secure = QSTR_LITERAL("android_secure");
 
 	/* Always block security-sensitive files at root */
 	if (parent_node && SDCARDFS_I(parent_node)->perm == PERM_ROOT) {
-		if (!strcasecmp(name, "autorun.inf")
-			|| !strcasecmp(name, ".android_secure")
-			|| !strcasecmp(name, "android_secure")) {
+		if (qstr_case_eq(name, &q_autorun)
+			|| qstr_case_eq(name, &q__android_secure)
+			|| qstr_case_eq(name, &q_android_secure)) {
 			return 0;
 		}
 	}
 
 	/* Root always has access; access for any other UIDs should always
-	 * be controlled through packages.list. */
-	if (from_kuid(&init_user_ns, current_fsuid()) == 0) {
+	 * be controlled through packages.list.
+	 */
+	if (from_kuid(&init_user_ns, current_fsuid()) == 0)
 		return 1;
-	}
 
 	/* No extra permissions to enforce */
 	return 1;
 }
 
 /* This function is used when file opening. The open flags must be
- * checked before calling check_caller_access_to_name() */
-int open_flags_to_access_mode(int open_flags) {
-	if((open_flags & O_ACCMODE) == O_RDONLY) {
+ * checked before calling check_caller_access_to_name()
+ */
+int open_flags_to_access_mode(int open_flags)
+{
+	if ((open_flags & O_ACCMODE) == O_RDONLY)
 		return 0; /* R_OK */
-	} else if ((open_flags & O_ACCMODE) == O_WRONLY) {
+	if ((open_flags & O_ACCMODE) == O_WRONLY)
 		return 1; /* W_OK */
-	} else {
-		/* Probably O_RDRW, but treat as default to be safe */
+	/* Probably O_RDRW, but treat as default to be safe */
 		return 1; /* R_OK | W_OK */
-	}
 }
 
-static struct hashtable_entry *alloc_packagelist_entry(const char *key,
+static struct hashtable_entry *alloc_hashtable_entry(const struct qstr *key,
 		appid_t value)
 {
 	struct hashtable_entry *ret = kmem_cache_alloc(hashtable_entry_cachep,
 			GFP_KERNEL);
 	if (!ret)
 		return NULL;
+	INIT_HLIST_NODE(&ret->dlist);
+	INIT_HLIST_NODE(&ret->hlist);
 
-	ret->key = kstrdup(key, GFP_KERNEL);
-	if (!ret->key) {
+	if (!qstr_copy(key, &ret->key)) {
 		kmem_cache_free(hashtable_entry_cachep, ret);
 		return NULL;
 	}
@@ -124,81 +206,249 @@
 	return ret;
 }
 
-static int insert_packagelist_entry_locked(const char *key, appid_t value)
+static int insert_packagelist_appid_entry_locked(const struct qstr *key, appid_t value)
 {
 	struct hashtable_entry *hash_cur;
 	struct hashtable_entry *new_entry;
-	unsigned int hash = str_hash(key);
+	unsigned int hash = key->hash;
 
 	hash_for_each_possible_rcu(package_to_appid, hash_cur, hlist, hash) {
-		if (!strcasecmp(key, hash_cur->key)) {
+		if (qstr_case_eq(key, &hash_cur->key)) {
 			atomic_set(&hash_cur->value, value);
 			return 0;
 		}
 	}
-	new_entry = alloc_packagelist_entry(key, value);
+	new_entry = alloc_hashtable_entry(key, value);
 	if (!new_entry)
 		return -ENOMEM;
 	hash_add_rcu(package_to_appid, &new_entry->hlist, hash);
 	return 0;
 }
 
-static void fixup_perms(struct super_block *sb, const char *key) {
-	if (sb && sb->s_magic == SDCARDFS_SUPER_MAGIC) {
-		fixup_perms_recursive(sb->s_root, key, strlen(key));
+static int insert_ext_gid_entry_locked(const struct qstr *key, appid_t value)
+{
+	struct hashtable_entry *hash_cur;
+	struct hashtable_entry *new_entry;
+	unsigned int hash = key->hash;
+
+	/* An extension can only belong to one gid */
+	hash_for_each_possible_rcu(ext_to_groupid, hash_cur, hlist, hash) {
+		if (qstr_case_eq(key, &hash_cur->key))
+			return -EINVAL;
+	}
+	new_entry = alloc_hashtable_entry(key, value);
+	if (!new_entry)
+		return -ENOMEM;
+	hash_add_rcu(ext_to_groupid, &new_entry->hlist, hash);
+	return 0;
+}
+
+static int insert_userid_exclude_entry_locked(const struct qstr *key, userid_t value)
+{
+	struct hashtable_entry *hash_cur;
+	struct hashtable_entry *new_entry;
+	unsigned int hash = key->hash;
+
+	/* Only insert if not already present */
+	hash_for_each_possible_rcu(package_to_userid, hash_cur, hlist, hash) {
+		if (atomic_read(&hash_cur->value) == value &&
+				qstr_case_eq(key, &hash_cur->key))
+			return 0;
+	}
+	new_entry = alloc_hashtable_entry(key, value);
+	if (!new_entry)
+		return -ENOMEM;
+	hash_add_rcu(package_to_userid, &new_entry->hlist, hash);
+	return 0;
+}
+
+static void fixup_all_perms_name(const struct qstr *key)
+{
+	struct sdcardfs_sb_info *sbinfo;
+	struct limit_search limit = {
+		.flags = BY_NAME,
+		.name = QSTR_INIT(key->name, key->len),
+	};
+	list_for_each_entry(sbinfo, &sdcardfs_super_list, list) {
+		if (sbinfo_has_sdcard_magic(sbinfo))
+			fixup_perms_recursive(sbinfo->sb->s_root, &limit);
 	}
 }
 
-static void fixup_all_perms(const char *key)
+static void fixup_all_perms_name_userid(const struct qstr *key, userid_t userid)
 {
 	struct sdcardfs_sb_info *sbinfo;
-	list_for_each_entry(sbinfo, &sdcardfs_super_list, list)
-		if (sbinfo)
-			fixup_perms(sbinfo->sb, key);
+	struct limit_search limit = {
+		.flags = BY_NAME | BY_USERID,
+		.name = QSTR_INIT(key->name, key->len),
+		.userid = userid,
+	};
+	list_for_each_entry(sbinfo, &sdcardfs_super_list, list) {
+		if (sbinfo_has_sdcard_magic(sbinfo))
+			fixup_perms_recursive(sbinfo->sb->s_root, &limit);
+	}
 }
 
-static int insert_packagelist_entry(const char *key, appid_t value)
+static void fixup_all_perms_userid(userid_t userid)
+{
+	struct sdcardfs_sb_info *sbinfo;
+	struct limit_search limit = {
+		.flags = BY_USERID,
+		.userid = userid,
+	};
+	list_for_each_entry(sbinfo, &sdcardfs_super_list, list) {
+		if (sbinfo_has_sdcard_magic(sbinfo))
+			fixup_perms_recursive(sbinfo->sb->s_root, &limit);
+	}
+}
+
+static int insert_packagelist_entry(const struct qstr *key, appid_t value)
 {
 	int err;
 
 	mutex_lock(&sdcardfs_super_list_lock);
-	err = insert_packagelist_entry_locked(key, value);
+	err = insert_packagelist_appid_entry_locked(key, value);
 	if (!err)
-		fixup_all_perms(key);
+		fixup_all_perms_name(key);
 	mutex_unlock(&sdcardfs_super_list_lock);
 
 	return err;
 }
 
-static void free_packagelist_entry(struct hashtable_entry *entry)
+static int insert_ext_gid_entry(const struct qstr *key, appid_t value)
 {
-	kfree(entry->key);
-	hash_del_rcu(&entry->hlist);
+	int err;
+
+	mutex_lock(&sdcardfs_super_list_lock);
+	err = insert_ext_gid_entry_locked(key, value);
+	mutex_unlock(&sdcardfs_super_list_lock);
+
+	return err;
+}
+
+static int insert_userid_exclude_entry(const struct qstr *key, userid_t value)
+{
+	int err;
+
+	mutex_lock(&sdcardfs_super_list_lock);
+	err = insert_userid_exclude_entry_locked(key, value);
+	if (!err)
+		fixup_all_perms_name_userid(key, value);
+	mutex_unlock(&sdcardfs_super_list_lock);
+
+	return err;
+}
+
+static void free_hashtable_entry(struct hashtable_entry *entry)
+{
+	kfree(entry->key.name);
 	kmem_cache_free(hashtable_entry_cachep, entry);
 }
 
-static void remove_packagelist_entry_locked(const char *key)
+static void remove_packagelist_entry_locked(const struct qstr *key)
 {
 	struct hashtable_entry *hash_cur;
-	unsigned int hash = str_hash(key);
+	unsigned int hash = key->hash;
+	struct hlist_node *h_t;
+	HLIST_HEAD(free_list);
 
+	hash_for_each_possible_rcu(package_to_userid, hash_cur, hlist, hash) {
+		if (qstr_case_eq(key, &hash_cur->key)) {
+			hash_del_rcu(&hash_cur->hlist);
+			hlist_add_head(&hash_cur->dlist, &free_list);
+		}
+	}
 	hash_for_each_possible_rcu(package_to_appid, hash_cur, hlist, hash) {
-		if (!strcasecmp(key, hash_cur->key)) {
+		if (qstr_case_eq(key, &hash_cur->key)) {
+			hash_del_rcu(&hash_cur->hlist);
+			hlist_add_head(&hash_cur->dlist, &free_list);
+			break;
+		}
+	}
+	synchronize_rcu();
+	hlist_for_each_entry_safe(hash_cur, h_t, &free_list, dlist)
+		free_hashtable_entry(hash_cur);
+}
+
+static void remove_packagelist_entry(const struct qstr *key)
+{
+	mutex_lock(&sdcardfs_super_list_lock);
+	remove_packagelist_entry_locked(key);
+	fixup_all_perms_name(key);
+	mutex_unlock(&sdcardfs_super_list_lock);
+}
+
+static void remove_ext_gid_entry_locked(const struct qstr *key, gid_t group)
+{
+	struct hashtable_entry *hash_cur;
+	unsigned int hash = key->hash;
+
+	hash_for_each_possible_rcu(ext_to_groupid, hash_cur, hlist, hash) {
+		if (qstr_case_eq(key, &hash_cur->key) && atomic_read(&hash_cur->value) == group) {
 			hash_del_rcu(&hash_cur->hlist);
 			synchronize_rcu();
-			free_packagelist_entry(hash_cur);
-			return;
+			free_hashtable_entry(hash_cur);
+			break;
 		}
 	}
 }
 
-static void remove_packagelist_entry(const char *key)
+static void remove_ext_gid_entry(const struct qstr *key, gid_t group)
 {
 	mutex_lock(&sdcardfs_super_list_lock);
-	remove_packagelist_entry_locked(key);
-	fixup_all_perms(key);
+	remove_ext_gid_entry_locked(key, group);
 	mutex_unlock(&sdcardfs_super_list_lock);
-	return;
+}
+
+static void remove_userid_all_entry_locked(userid_t userid)
+{
+	struct hashtable_entry *hash_cur;
+	struct hlist_node *h_t;
+	HLIST_HEAD(free_list);
+	int i;
+
+	hash_for_each_rcu(package_to_userid, i, hash_cur, hlist) {
+		if (atomic_read(&hash_cur->value) == userid) {
+			hash_del_rcu(&hash_cur->hlist);
+			hlist_add_head(&hash_cur->dlist, &free_list);
+		}
+	}
+	synchronize_rcu();
+	hlist_for_each_entry_safe(hash_cur, h_t, &free_list, dlist) {
+		free_hashtable_entry(hash_cur);
+	}
+}
+
+static void remove_userid_all_entry(userid_t userid)
+{
+	mutex_lock(&sdcardfs_super_list_lock);
+	remove_userid_all_entry_locked(userid);
+	fixup_all_perms_userid(userid);
+	mutex_unlock(&sdcardfs_super_list_lock);
+}
+
+static void remove_userid_exclude_entry_locked(const struct qstr *key, userid_t userid)
+{
+	struct hashtable_entry *hash_cur;
+	unsigned int hash = key->hash;
+
+	hash_for_each_possible_rcu(package_to_userid, hash_cur, hlist, hash) {
+		if (qstr_case_eq(key, &hash_cur->key) &&
+				atomic_read(&hash_cur->value) == userid) {
+			hash_del_rcu(&hash_cur->hlist);
+			synchronize_rcu();
+			free_hashtable_entry(hash_cur);
+			break;
+		}
+	}
+}
+
+static void remove_userid_exclude_entry(const struct qstr *key, userid_t userid)
+{
+	mutex_lock(&sdcardfs_super_list_lock);
+	remove_userid_exclude_entry_locked(key, userid);
+	fixup_all_perms_name_userid(key, userid);
+	mutex_unlock(&sdcardfs_super_list_lock);
 }
 
 static void packagelist_destroy(void)
@@ -207,39 +457,66 @@
 	struct hlist_node *h_t;
 	HLIST_HEAD(free_list);
 	int i;
+
 	mutex_lock(&sdcardfs_super_list_lock);
 	hash_for_each_rcu(package_to_appid, i, hash_cur, hlist) {
 		hash_del_rcu(&hash_cur->hlist);
-		hlist_add_head(&hash_cur->hlist, &free_list);
-
+		hlist_add_head(&hash_cur->dlist, &free_list);
+	}
+	hash_for_each_rcu(package_to_userid, i, hash_cur, hlist) {
+		hash_del_rcu(&hash_cur->hlist);
+		hlist_add_head(&hash_cur->dlist, &free_list);
 	}
 	synchronize_rcu();
-	hlist_for_each_entry_safe(hash_cur, h_t, &free_list, hlist)
-		free_packagelist_entry(hash_cur);
+	hlist_for_each_entry_safe(hash_cur, h_t, &free_list, dlist)
+		free_hashtable_entry(hash_cur);
 	mutex_unlock(&sdcardfs_super_list_lock);
-	printk(KERN_INFO "sdcardfs: destroyed packagelist pkgld\n");
+	pr_info("sdcardfs: destroyed packagelist pkgld\n");
 }
 
-struct package_appid {
+#define SDCARDFS_CONFIGFS_ATTR(_pfx, _name)			\
+static struct configfs_attribute _pfx##attr_##_name = {	\
+	.ca_name	= __stringify(_name),		\
+	.ca_mode	= S_IRUGO | S_IWUGO,		\
+	.ca_owner	= THIS_MODULE,			\
+	.show		= _pfx##_name##_show,		\
+	.store		= _pfx##_name##_store,		\
+}
+
+#define SDCARDFS_CONFIGFS_ATTR_RO(_pfx, _name)			\
+static struct configfs_attribute _pfx##attr_##_name = {	\
+	.ca_name	= __stringify(_name),		\
+	.ca_mode	= S_IRUGO,			\
+	.ca_owner	= THIS_MODULE,			\
+	.show		= _pfx##_name##_show,		\
+}
+
+#define SDCARDFS_CONFIGFS_ATTR_WO(_pfx, _name)			\
+static struct configfs_attribute _pfx##attr_##_name = {	\
+	.ca_name	= __stringify(_name),		\
+	.ca_mode	= S_IWUGO,			\
+	.ca_owner	= THIS_MODULE,			\
+	.store		= _pfx##_name##_store,		\
+}
+
+struct package_details {
 	struct config_item item;
-	int add_pid;
+	struct qstr name;
 };
 
-static inline struct package_appid *to_package_appid(struct config_item *item)
+static inline struct package_details *to_package_details(struct config_item *item)
 {
-	return item ? container_of(item, struct package_appid, item) : NULL;
+	return item ? container_of(item, struct package_details, item) : NULL;
 }
 
-static ssize_t package_appid_attr_show(struct config_item *item,
-				      char *page)
+static ssize_t package_details_appid_show(struct config_item *item, char *page)
 {
-	return scnprintf(page, PAGE_SIZE, "%u\n", get_appid(item->ci_name));
+	return scnprintf(page, PAGE_SIZE, "%u\n", __get_appid(&to_package_details(item)->name));
 }
 
-static ssize_t package_appid_attr_store(struct config_item *item,
+static ssize_t package_details_appid_store(struct config_item *item,
 				       const char *page, size_t count)
 {
-	struct package_appid *package_appid = to_package_appid(item);
 	unsigned int tmp;
 	int ret;
 
@@ -247,84 +524,263 @@
 	if (ret)
 		return ret;
 
-	ret = insert_packagelist_entry(item->ci_name, tmp);
-	package_appid->add_pid = tmp;
+	ret = insert_packagelist_entry(&to_package_details(item)->name, tmp);
+
 	if (ret)
 		return ret;
 
 	return count;
 }
 
-static struct configfs_attribute package_appid_attr_add_pid = {
-	.ca_owner = THIS_MODULE,
-	.ca_name = "appid",
-	.ca_mode = S_IRUGO | S_IWUGO,
-	.show = package_appid_attr_show,
-	.store = package_appid_attr_store,
-};
+static ssize_t package_details_excluded_userids_show(struct config_item *item,
+				      char *page)
+{
+	struct package_details *package_details = to_package_details(item);
+	struct hashtable_entry *hash_cur;
+	unsigned int hash = package_details->name.hash;
+	int count = 0;
 
-static struct configfs_attribute *package_appid_attrs[] = {
-	&package_appid_attr_add_pid,
+	rcu_read_lock();
+	hash_for_each_possible_rcu(package_to_userid, hash_cur, hlist, hash) {
+		if (qstr_case_eq(&package_details->name, &hash_cur->key))
+			count += scnprintf(page + count, PAGE_SIZE - count,
+					"%d ", atomic_read(&hash_cur->value));
+	}
+	rcu_read_unlock();
+	if (count)
+		count--;
+	count += scnprintf(page + count, PAGE_SIZE - count, "\n");
+	return count;
+}
+
+static ssize_t package_details_excluded_userids_store(struct config_item *item,
+				       const char *page, size_t count)
+{
+	unsigned int tmp;
+	int ret;
+
+	ret = kstrtouint(page, 10, &tmp);
+	if (ret)
+		return ret;
+
+	ret = insert_userid_exclude_entry(&to_package_details(item)->name, tmp);
+
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static ssize_t package_details_clear_userid_store(struct config_item *item,
+				       const char *page, size_t count)
+{
+	unsigned int tmp;
+	int ret;
+
+	ret = kstrtouint(page, 10, &tmp);
+	if (ret)
+		return ret;
+	remove_userid_exclude_entry(&to_package_details(item)->name, tmp);
+	return count;
+}
+
+static void package_details_release(struct config_item *item)
+{
+	struct package_details *package_details = to_package_details(item);
+
+	pr_info("sdcardfs: removing %s\n", package_details->name.name);
+	remove_packagelist_entry(&package_details->name);
+	kfree(package_details->name.name);
+	kfree(package_details);
+}
+
+SDCARDFS_CONFIGFS_ATTR(package_details_, appid);
+SDCARDFS_CONFIGFS_ATTR(package_details_, excluded_userids);
+SDCARDFS_CONFIGFS_ATTR_WO(package_details_, clear_userid);
+
+static struct configfs_attribute *package_details_attrs[] = {
+	&package_details_attr_appid,
+	&package_details_attr_excluded_userids,
+	&package_details_attr_clear_userid,
 	NULL,
 };
 
-static void package_appid_release(struct config_item *item)
-{
-	printk(KERN_INFO "sdcardfs: removing %s\n", item->ci_dentry->d_name.name);
-	/* item->ci_name is freed already, so we rely on the dentry */
-	remove_packagelist_entry(item->ci_dentry->d_name.name);
-	kfree(to_package_appid(item));
-}
-
-static struct configfs_item_operations package_appid_item_ops = {
-	.release		= package_appid_release,
+static struct configfs_item_operations package_details_item_ops = {
+	.release = package_details_release,
 };
 
 static struct config_item_type package_appid_type = {
-	.ct_item_ops	= &package_appid_item_ops,
-	.ct_attrs	= package_appid_attrs,
+	.ct_item_ops	= &package_details_item_ops,
+	.ct_attrs	= package_details_attrs,
 	.ct_owner	= THIS_MODULE,
 };
 
-
-struct sdcardfs_packages {
+struct extensions_value {
 	struct config_group group;
+	unsigned int num;
 };
 
-static inline struct sdcardfs_packages *to_sdcardfs_packages(struct config_item *item)
+struct extension_details {
+	struct config_item item;
+	struct qstr name;
+	unsigned int num;
+};
+
+static inline struct extensions_value *to_extensions_value(struct config_item *item)
 {
-	return item ? container_of(to_config_group(item), struct sdcardfs_packages, group) : NULL;
+	return item ? container_of(to_config_group(item), struct extensions_value, group) : NULL;
 }
 
-static struct config_item *sdcardfs_packages_make_item(struct config_group *group, const char *name)
+static inline struct extension_details *to_extension_details(struct config_item *item)
 {
-	struct package_appid *package_appid;
+	return item ? container_of(item, struct extension_details, item) : NULL;
+}
 
-	package_appid = kzalloc(sizeof(struct package_appid), GFP_KERNEL);
-	if (!package_appid)
+static void extension_details_release(struct config_item *item)
+{
+	struct extension_details *extension_details = to_extension_details(item);
+
+	pr_info("sdcardfs: No longer mapping %s files to gid %d\n",
+			extension_details->name.name, extension_details->num);
+	remove_ext_gid_entry(&extension_details->name, extension_details->num);
+	kfree(extension_details->name.name);
+	kfree(extension_details);
+}
+
+static struct configfs_item_operations extension_details_item_ops = {
+	.release = extension_details_release,
+};
+
+static struct config_item_type extension_details_type = {
+	.ct_item_ops = &extension_details_item_ops,
+	.ct_owner = THIS_MODULE,
+};
+
+static struct config_item *extension_details_make_item(struct config_group *group, const char *name)
+{
+	struct extensions_value *extensions_value = to_extensions_value(&group->cg_item);
+	struct extension_details *extension_details = kzalloc(sizeof(struct extension_details), GFP_KERNEL);
+	const char *tmp;
+	int ret;
+
+	if (!extension_details)
 		return ERR_PTR(-ENOMEM);
 
-	config_item_init_type_name(&package_appid->item, name,
-				   &package_appid_type);
+	tmp = kstrdup(name, GFP_KERNEL);
+	if (!tmp) {
+		kfree(extension_details);
+		return ERR_PTR(-ENOMEM);
+	}
+	qstr_init(&extension_details->name, tmp);
+	ret = insert_ext_gid_entry(&extension_details->name, extensions_value->num);
 
-	package_appid->add_pid = 0;
+	if (ret) {
+		kfree(extension_details->name.name);
+		kfree(extension_details);
+		return ERR_PTR(ret);
+	}
+	config_item_init_type_name(&extension_details->item, name, &extension_details_type);
 
-	return &package_appid->item;
+	return &extension_details->item;
 }
 
-static ssize_t packages_attr_show(struct config_item *item,
-					 char *page)
+static struct configfs_group_operations extensions_value_group_ops = {
+	.make_item = extension_details_make_item,
+};
+
+static struct config_item_type extensions_name_type = {
+	.ct_group_ops	= &extensions_value_group_ops,
+	.ct_owner	= THIS_MODULE,
+};
+
+static struct config_group *extensions_make_group(struct config_group *group, const char *name)
 {
-	struct hashtable_entry *hash_cur;
+	struct extensions_value *extensions_value;
+	unsigned int tmp;
+	int ret;
+
+	extensions_value = kzalloc(sizeof(struct extensions_value), GFP_KERNEL);
+	if (!extensions_value)
+		return ERR_PTR(-ENOMEM);
+	ret = kstrtouint(name, 10, &tmp);
+	if (ret) {
+		kfree(extensions_value);
+		return ERR_PTR(ret);
+	}
+
+	extensions_value->num = tmp;
+	config_group_init_type_name(&extensions_value->group, name,
+						&extensions_name_type);
+	return &extensions_value->group;
+}
+
+static void extensions_drop_group(struct config_group *group, struct config_item *item)
+{
+	struct extensions_value *value = to_extensions_value(item);
+
+	pr_info("sdcardfs: No longer mapping any files to gid %d\n", value->num);
+	kfree(value);
+}
+
+static struct configfs_group_operations extensions_group_ops = {
+	.make_group	= extensions_make_group,
+	.drop_item	= extensions_drop_group,
+};
+
+static struct config_item_type extensions_type = {
+	.ct_group_ops	= &extensions_group_ops,
+	.ct_owner	= THIS_MODULE,
+};
+
+struct config_group extension_group = {
+	.cg_item = {
+		.ci_namebuf = "extensions",
+		.ci_type = &extensions_type,
+	},
+};
+
+static struct config_item *packages_make_item(struct config_group *group, const char *name)
+{
+	struct package_details *package_details;
+	const char *tmp;
+
+	package_details = kzalloc(sizeof(struct package_details), GFP_KERNEL);
+	if (!package_details)
+		return ERR_PTR(-ENOMEM);
+	tmp = kstrdup(name, GFP_KERNEL);
+	if (!tmp) {
+		kfree(package_details);
+		return ERR_PTR(-ENOMEM);
+	}
+	qstr_init(&package_details->name, tmp);
+	config_item_init_type_name(&package_details->item, name,
+						&package_appid_type);
+
+	return &package_details->item;
+}
+
+static ssize_t packages_list_show(struct config_item *item, char *page)
+{
+	struct hashtable_entry *hash_cur_app;
+	struct hashtable_entry *hash_cur_user;
 	int i;
 	int count = 0, written = 0;
 	const char errormsg[] = "<truncated>\n";
+	unsigned int hash;
 
 	rcu_read_lock();
-	hash_for_each_rcu(package_to_appid, i, hash_cur, hlist) {
+	hash_for_each_rcu(package_to_appid, i, hash_cur_app, hlist) {
 		written = scnprintf(page + count, PAGE_SIZE - sizeof(errormsg) - count, "%s %d\n",
-					(const char *)hash_cur->key, atomic_read(&hash_cur->value));
-		if (count + written == PAGE_SIZE - sizeof(errormsg)) {
+					hash_cur_app->key.name, atomic_read(&hash_cur_app->value));
+		hash = hash_cur_app->key.hash;
+		hash_for_each_possible_rcu(package_to_userid, hash_cur_user, hlist, hash) {
+			if (qstr_case_eq(&hash_cur_app->key, &hash_cur_user->key)) {
+				written += scnprintf(page + count + written - 1,
+					PAGE_SIZE - sizeof(errormsg) - count - written + 1,
+					" %d\n", atomic_read(&hash_cur_user->value)) - 1;
+			}
+		}
+		if (count + written == PAGE_SIZE - sizeof(errormsg) - 1) {
 			count += scnprintf(page + count, PAGE_SIZE - count, errormsg);
 			break;
 		}
@@ -335,63 +791,76 @@
 	return count;
 }
 
-static struct configfs_attribute sdcardfs_packages_attr_description = {
-	.ca_owner = THIS_MODULE,
-	.ca_name = "packages_gid.list",
-	.ca_mode = S_IRUGO,
-	.show = packages_attr_show,
-};
-
-static struct configfs_attribute *sdcardfs_packages_attrs[] = {
-	&sdcardfs_packages_attr_description,
-	NULL,
-};
-
-static void sdcardfs_packages_release(struct config_item *item)
+static ssize_t packages_remove_userid_store(struct config_item *item,
+				       const char *page, size_t count)
 {
+	unsigned int tmp;
+	int ret;
 
-	printk(KERN_INFO "sdcardfs: destroyed something?\n");
-	kfree(to_sdcardfs_packages(item));
+	ret = kstrtouint(page, 10, &tmp);
+	if (ret)
+		return ret;
+	remove_userid_all_entry(tmp);
+	return count;
 }
 
-static struct configfs_item_operations sdcardfs_packages_item_ops = {
-	.release	= sdcardfs_packages_release,
+static struct configfs_attribute packages_attr_packages_gid_list = {
+	.ca_name	= "packages_gid.list",
+	.ca_mode	= S_IRUGO,
+	.ca_owner	= THIS_MODULE,
+	.show		= packages_list_show,
+};
+
+SDCARDFS_CONFIGFS_ATTR_WO(packages_, remove_userid);
+
+static struct configfs_attribute *packages_attrs[] = {
+	&packages_attr_packages_gid_list,
+	&packages_attr_remove_userid,
+	NULL,
 };
 
 /*
  * Note that, since no extra work is required on ->drop_item(),
  * no ->drop_item() is provided.
  */
-static struct configfs_group_operations sdcardfs_packages_group_ops = {
-	.make_item	= sdcardfs_packages_make_item,
+static struct configfs_group_operations packages_group_ops = {
+	.make_item	= packages_make_item,
 };
 
-static struct config_item_type sdcardfs_packages_type = {
-	.ct_item_ops	= &sdcardfs_packages_item_ops,
-	.ct_group_ops	= &sdcardfs_packages_group_ops,
-	.ct_attrs	= sdcardfs_packages_attrs,
+static struct config_item_type packages_type = {
+	.ct_group_ops	= &packages_group_ops,
+	.ct_attrs	= packages_attrs,
 	.ct_owner	= THIS_MODULE,
 };
 
-static struct configfs_subsystem sdcardfs_packages_subsys = {
+struct config_group *sd_default_groups[] = {
+	&extension_group,
+	NULL,
+};
+
+static struct configfs_subsystem sdcardfs_packages = {
 	.su_group = {
 		.cg_item = {
 			.ci_namebuf = "sdcardfs",
-			.ci_type = &sdcardfs_packages_type,
+			.ci_type = &packages_type,
 		},
 	},
 };
 
 static int configfs_sdcardfs_init(void)
 {
-	int ret;
-	struct configfs_subsystem *subsys = &sdcardfs_packages_subsys;
+	int ret, i;
+	struct configfs_subsystem *subsys = &sdcardfs_packages;
 
 	config_group_init(&subsys->su_group);
+	for (i = 0; sd_default_groups[i]; i++) {
+		config_group_init(sd_default_groups[i]);
+		configfs_add_default_group(sd_default_groups[i], &subsys->su_group);
+	}
 	mutex_init(&subsys->su_mutex);
 	ret = configfs_register_subsystem(subsys);
 	if (ret) {
-		printk(KERN_ERR "Error %d while registering subsystem %s\n",
+		pr_err("Error %d while registering subsystem %s\n",
 		       ret,
 		       subsys->su_group.cg_item.ci_namebuf);
 	}
@@ -400,7 +869,7 @@
 
 static void configfs_sdcardfs_exit(void)
 {
-	configfs_unregister_subsystem(&sdcardfs_packages_subsys);
+	configfs_unregister_subsystem(&sdcardfs_packages);
 }
 
 int packagelist_init(void)
@@ -409,18 +878,17 @@
 		kmem_cache_create("packagelist_hashtable_entry",
 					sizeof(struct hashtable_entry), 0, 0, NULL);
 	if (!hashtable_entry_cachep) {
-		printk(KERN_ERR "sdcardfs: failed creating pkgl_hashtable entry slab cache\n");
+		pr_err("sdcardfs: failed creating pkgl_hashtable entry slab cache\n");
 		return -ENOMEM;
 	}
 
 	configfs_sdcardfs_init();
-        return 0;
+	return 0;
 }
 
 void packagelist_exit(void)
 {
 	configfs_sdcardfs_exit();
 	packagelist_destroy();
-	if (hashtable_entry_cachep)
-		kmem_cache_destroy(hashtable_entry_cachep);
+	kmem_cache_destroy(hashtable_entry_cachep);
 }
diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h
index 66a97ef..380982b 100644
--- a/fs/sdcardfs/sdcardfs.h
+++ b/fs/sdcardfs/sdcardfs.h
@@ -29,6 +29,7 @@
 #include <linux/dcache.h>
 #include <linux/file.h>
 #include <linux/fs.h>
+#include <linux/aio.h>
 #include <linux/mm.h>
 #include <linux/mount.h>
 #include <linux/namei.h>
@@ -52,7 +53,7 @@
 #define SDCARDFS_ROOT_INO     1
 
 /* useful for tracking code reachability */
-#define UDBG printk(KERN_DEFAULT "DBG:%s:%s:%d\n", __FILE__, __func__, __LINE__)
+#define UDBG pr_default("DBG:%s:%s:%d\n", __FILE__, __func__, __LINE__)
 
 #define SDCARDFS_DIRENT_SIZE 256
 
@@ -65,6 +66,9 @@
 #define AID_SDCARD_PICS   1033	/* external storage photos access */
 #define AID_SDCARD_AV     1034	/* external storage audio/video access */
 #define AID_SDCARD_ALL    1035	/* access all users external storage */
+#define AID_MEDIA_OBB     1059  /* obb files */
+
+#define AID_SDCARD_IMAGE  1057
 
 #define AID_PACKAGE_INFO  1027
 
@@ -83,59 +87,66 @@
 	} while (0)
 
 /* OVERRIDE_CRED() and REVERT_CRED()
- * 	OVERRID_CRED()
- * 		backup original task->cred
- * 		and modifies task->cred->fsuid/fsgid to specified value.
+ *	OVERRIDE_CRED()
+ *		backup original task->cred
+ *		and modifies task->cred->fsuid/fsgid to specified value.
  *	REVERT_CRED()
- * 		restore original task->cred->fsuid/fsgid.
+ *		restore original task->cred->fsuid/fsgid.
  * These two macro should be used in pair, and OVERRIDE_CRED() should be
  * placed at the beginning of a function, right after variable declaration.
  */
-#define OVERRIDE_CRED(sdcardfs_sbi, saved_cred)		\
-	saved_cred = override_fsids(sdcardfs_sbi);	\
-	if (!saved_cred) { return -ENOMEM; }
+#define OVERRIDE_CRED(sdcardfs_sbi, saved_cred, info)		\
+	do {	\
+		saved_cred = override_fsids(sdcardfs_sbi, info);	\
+		if (!saved_cred)	\
+			return -ENOMEM;	\
+	} while (0)
 
-#define OVERRIDE_CRED_PTR(sdcardfs_sbi, saved_cred)	\
-	saved_cred = override_fsids(sdcardfs_sbi);	\
-	if (!saved_cred) { return ERR_PTR(-ENOMEM); }
+#define OVERRIDE_CRED_PTR(sdcardfs_sbi, saved_cred, info)	\
+	do {	\
+		saved_cred = override_fsids(sdcardfs_sbi, info);	\
+		if (!saved_cred)	\
+			return ERR_PTR(-ENOMEM);	\
+	} while (0)
 
 #define REVERT_CRED(saved_cred)	revert_fsids(saved_cred)
 
-#define DEBUG_CRED()		\
-	printk("KAKJAGI: %s:%d fsuid %d fsgid %d\n", 	\
-		__FUNCTION__, __LINE__, 		\
-		(int)current->cred->fsuid, 		\
-		(int)current->cred->fsgid);
-
 /* Android 5.0 support */
 
 /* Permission mode for a specific node. Controls how file permissions
- * are derived for children nodes. */
+ * are derived for children nodes.
+ */
 typedef enum {
-    /* Nothing special; this node should just inherit from its parent. */
-    PERM_INHERIT,
-    /* This node is one level above a normal root; used for legacy layouts
-     * which use the first level to represent user_id. */
-    PERM_PRE_ROOT,
-    /* This node is "/" */
-    PERM_ROOT,
-    /* This node is "/Android" */
-    PERM_ANDROID,
-    /* This node is "/Android/data" */
-    PERM_ANDROID_DATA,
-    /* This node is "/Android/obb" */
-    PERM_ANDROID_OBB,
-    /* This node is "/Android/media" */
-    PERM_ANDROID_MEDIA,
+	/* Nothing special; this node should just inherit from its parent. */
+	PERM_INHERIT,
+	/* This node is one level above a normal root; used for legacy layouts
+	 * which use the first level to represent user_id.
+	 */
+	PERM_PRE_ROOT,
+	/* This node is "/" */
+	PERM_ROOT,
+	/* This node is "/Android" */
+	PERM_ANDROID,
+	/* This node is "/Android/data" */
+	PERM_ANDROID_DATA,
+	/* This node is "/Android/obb" */
+	PERM_ANDROID_OBB,
+	/* This node is "/Android/media" */
+	PERM_ANDROID_MEDIA,
+	/* This node is "/Android/[data|media|obb]/[package]" */
+	PERM_ANDROID_PACKAGE,
+	/* This node is "/Android/[data|media|obb]/[package]/cache" */
+	PERM_ANDROID_PACKAGE_CACHE,
 } perm_t;
 
 struct sdcardfs_sb_info;
 struct sdcardfs_mount_options;
+struct sdcardfs_inode_info;
 
 /* Do not directly use this function. Use OVERRIDE_CRED() instead. */
-const struct cred * override_fsids(struct sdcardfs_sb_info* sbi);
+const struct cred *override_fsids(struct sdcardfs_sb_info *sbi, struct sdcardfs_inode_info *info);
 /* Do not directly use this function, use REVERT_CRED() instead. */
-void revert_fsids(const struct cred * old_cred);
+void revert_fsids(const struct cred *old_cred);
 
 /* operations vectors defined in specific files */
 extern const struct file_operations sdcardfs_main_fops;
@@ -175,6 +186,8 @@
 	userid_t userid;
 	uid_t d_uid;
 	bool under_android;
+	bool under_cache;
+	bool under_obb;
 	/* top folder for ownership */
 	struct inode *top;
 
@@ -210,7 +223,8 @@
 	struct super_block *sb;
 	struct super_block *lower_sb;
 	/* derived perm policy : some of options have been added
-	 * to sdcardfs_mount_options (Android 4.4 support) */
+	 * to sdcardfs_mount_options (Android 4.4 support)
+	 */
 	struct sdcardfs_mount_options options;
 	spinlock_t lock;	/* protects obbpath */
 	char *obbpath_s;
@@ -321,7 +335,7 @@
 { \
 	struct path pname; \
 	spin_lock(&SDCARDFS_D(dent)->lock); \
-	if(SDCARDFS_D(dent)->pname.dentry) { \
+	if (SDCARDFS_D(dent)->pname.dentry) { \
 		pathcpy(&pname, &SDCARDFS_D(dent)->pname); \
 		SDCARDFS_D(dent)->pname.dentry = NULL; \
 		SDCARDFS_D(dent)->pname.mnt = NULL; \
@@ -335,14 +349,19 @@
 SDCARDFS_DENT_FUNC(lower_path)
 SDCARDFS_DENT_FUNC(orig_path)
 
+static inline bool sbinfo_has_sdcard_magic(struct sdcardfs_sb_info *sbinfo)
+{
+	return sbinfo && sbinfo->sb && sbinfo->sb->s_magic == SDCARDFS_SUPER_MAGIC;
+}
+
 /* grab a refererence if we aren't linking to ourself */
 static inline void set_top(struct sdcardfs_inode_info *info, struct inode *top)
 {
 	struct inode *old_top = NULL;
+
 	BUG_ON(IS_ERR_OR_NULL(top));
-	if (info->top && info->top != &info->vfs_inode) {
+	if (info->top && info->top != &info->vfs_inode)
 		old_top = info->top;
-	}
 	if (top != &info->vfs_inode)
 		igrab(top);
 	info->top = top;
@@ -352,11 +371,11 @@
 static inline struct inode *grab_top(struct sdcardfs_inode_info *info)
 {
 	struct inode *top = info->top;
-	if (top) {
+
+	if (top)
 		return igrab(top);
-	} else {
+	else
 		return NULL;
-	}
 }
 
 static inline void release_top(struct sdcardfs_inode_info *info)
@@ -364,21 +383,24 @@
 	iput(info->top);
 }
 
-static inline int get_gid(struct vfsmount *mnt, struct sdcardfs_inode_info *info) {
+static inline int get_gid(struct vfsmount *mnt, struct sdcardfs_inode_info *info)
+{
 	struct sdcardfs_vfsmount_options *opts = mnt->data;
 
-	if (opts->gid == AID_SDCARD_RW) {
+	if (opts->gid == AID_SDCARD_RW)
 		/* As an optimization, certain trusted system components only run
 		 * as owner but operate across all users. Since we're now handing
 		 * out the sdcard_rw GID only to trusted apps, we're okay relaxing
 		 * the user boundary enforcement for the default view. The UIDs
-		 * assigned to app directories are still multiuser aware. */
+		 * assigned to app directories are still multiuser aware.
+		 */
 		return AID_SDCARD_RW;
-	} else {
+	else
 		return multiuser_get_uid(info->userid, opts->gid);
-	}
 }
-static inline int get_mode(struct vfsmount *mnt, struct sdcardfs_inode_info *info) {
+
+static inline int get_mode(struct vfsmount *mnt, struct sdcardfs_inode_info *info)
+{
 	int owner_mode;
 	int filtered_mode;
 	struct sdcardfs_vfsmount_options *opts = mnt->data;
@@ -387,17 +409,18 @@
 
 	if (info->perm == PERM_PRE_ROOT) {
 		/* Top of multi-user view should always be visible to ensure
-		* secondary users can traverse inside. */
+		* secondary users can traverse inside.
+		*/
 		visible_mode = 0711;
 	} else if (info->under_android) {
 		/* Block "other" access to Android directories, since only apps
 		* belonging to a specific user should be in there; we still
-		* leave +x open for the default view. */
-		if (opts->gid == AID_SDCARD_RW) {
+		* leave +x open for the default view.
+		*/
+		if (opts->gid == AID_SDCARD_RW)
 			visible_mode = visible_mode & ~0006;
-		} else {
+		else
 			visible_mode = visible_mode & ~0007;
-		}
 	}
 	owner_mode = info->lower_inode->i_mode & 0700;
 	filtered_mode = visible_mode & (owner_mode | (owner_mode >> 3) | (owner_mode >> 6));
@@ -422,7 +445,7 @@
 	/* in case of a local obb dentry
 	 * the orig_path should be returned
 	 */
-	if(has_graft_path(dent))
+	if (has_graft_path(dent))
 		sdcardfs_get_orig_path(dent, real_lower);
 	else
 		sdcardfs_get_lower_path(dent, real_lower);
@@ -431,7 +454,7 @@
 static inline void sdcardfs_put_real_lower(const struct dentry *dent,
 						struct path *real_lower)
 {
-	if(has_graft_path(dent))
+	if (has_graft_path(dent))
 		sdcardfs_put_orig_path(dent, real_lower);
 	else
 		sdcardfs_put_lower_path(dent, real_lower);
@@ -442,20 +465,30 @@
 
 /* for packagelist.c */
 extern appid_t get_appid(const char *app_name);
-extern int check_caller_access_to_name(struct inode *parent_node, const char* name);
+extern appid_t get_ext_gid(const char *app_name);
+extern appid_t is_excluded(const char *app_name, userid_t userid);
+extern int check_caller_access_to_name(struct inode *parent_node, const struct qstr *name);
 extern int open_flags_to_access_mode(int open_flags);
 extern int packagelist_init(void);
 extern void packagelist_exit(void);
 
 /* for derived_perm.c */
+#define BY_NAME		(1 << 0)
+#define BY_USERID	(1 << 1)
+struct limit_search {
+	unsigned int flags;
+	struct qstr name;
+	userid_t userid;
+};
+
 extern void setup_derived_state(struct inode *inode, perm_t perm, userid_t userid,
 			uid_t uid, bool under_android, struct inode *top);
 extern void get_derived_permission(struct dentry *parent, struct dentry *dentry);
-extern void get_derived_permission_new(struct dentry *parent, struct dentry *dentry, struct dentry *newdentry);
-extern void fixup_top_recursive(struct dentry *parent);
-extern void fixup_perms_recursive(struct dentry *dentry, const char *name, size_t len);
+extern void get_derived_permission_new(struct dentry *parent, struct dentry *dentry, const struct qstr *name);
+extern void fixup_perms_recursive(struct dentry *dentry, struct limit_search *limit);
 
 extern void update_derived_permission_lock(struct dentry *dentry);
+void fixup_lower_ownership(struct dentry *dentry, const char *name);
 extern int need_graft_path(struct dentry *dentry);
 extern int is_base_obbpath(struct dentry *dentry);
 extern int is_obbpath_invalid(struct dentry *dentry);
@@ -465,6 +498,7 @@
 static inline struct dentry *lock_parent(struct dentry *dentry)
 {
 	struct dentry *dir = dget_parent(dentry);
+
 	inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
 	return dir;
 }
@@ -577,4 +611,22 @@
 	dest->i_flags = src->i_flags;
 	set_nlink(dest, src->i_nlink);
 }
+
+static inline bool str_case_eq(const char *s1, const char *s2)
+{
+	return !strcasecmp(s1, s2);
+}
+
+static inline bool str_n_case_eq(const char *s1, const char *s2, size_t len)
+{
+	return !strncasecmp(s1, s2, len);
+}
+
+static inline bool qstr_case_eq(const struct qstr *q1, const struct qstr *q2)
+{
+	return q1->len == q2->len && str_case_eq(q1->name, q2->name);
+}
+
+#define QSTR_LITERAL(string) QSTR_INIT(string, sizeof(string)-1)
+
 #endif	/* not _SDCARDFS_H_ */
diff --git a/fs/sdcardfs/super.c b/fs/sdcardfs/super.c
index edda32b..a3393e9 100644
--- a/fs/sdcardfs/super.c
+++ b/fs/sdcardfs/super.c
@@ -36,7 +36,7 @@
 	if (!spd)
 		return;
 
-	if(spd->obbpath_s) {
+	if (spd->obbpath_s) {
 		kfree(spd->obbpath_s);
 		path_put(&spd->obbpath);
 	}
@@ -64,7 +64,7 @@
 	if (sbi->options.reserved_mb) {
 		/* Invalid statfs informations. */
 		if (buf->f_bsize == 0) {
-			printk(KERN_ERR "Returned block size is zero.\n");
+			pr_err("Returned block size is zero.\n");
 			return -EINVAL;
 		}
 
@@ -100,8 +100,7 @@
 	 * SILENT, but anything else left over is an error.
 	 */
 	if ((*flags & ~(MS_RDONLY | MS_MANDLOCK | MS_SILENT)) != 0) {
-		printk(KERN_ERR
-		       "sdcardfs: remount flags 0x%x unsupported\n", *flags);
+		pr_err("sdcardfs: remount flags 0x%x unsupported\n", *flags);
 		err = -EINVAL;
 	}
 
@@ -125,29 +124,33 @@
 	 * SILENT, but anything else left over is an error.
 	 */
 	if ((*flags & ~(MS_RDONLY | MS_MANDLOCK | MS_SILENT | MS_REMOUNT)) != 0) {
-		printk(KERN_ERR
-		       "sdcardfs: remount flags 0x%x unsupported\n", *flags);
+		pr_err("sdcardfs: remount flags 0x%x unsupported\n", *flags);
 		err = -EINVAL;
 	}
-	printk(KERN_INFO "Remount options were %s for vfsmnt %p.\n", options, mnt);
+	pr_info("Remount options were %s for vfsmnt %p.\n", options, mnt);
 	err = parse_options_remount(sb, options, *flags & ~MS_SILENT, mnt->data);
 
 
 	return err;
 }
 
-static void* sdcardfs_clone_mnt_data(void *data) {
-	struct sdcardfs_vfsmount_options* opt = kmalloc(sizeof(struct sdcardfs_vfsmount_options), GFP_KERNEL);
-	struct sdcardfs_vfsmount_options* old = data;
-	if(!opt) return NULL;
+static void *sdcardfs_clone_mnt_data(void *data)
+{
+	struct sdcardfs_vfsmount_options *opt = kmalloc(sizeof(struct sdcardfs_vfsmount_options), GFP_KERNEL);
+	struct sdcardfs_vfsmount_options *old = data;
+
+	if (!opt)
+		return NULL;
 	opt->gid = old->gid;
 	opt->mask = old->mask;
 	return opt;
 }
 
-static void sdcardfs_copy_mnt_data(void *data, void *newdata) {
-	struct sdcardfs_vfsmount_options* old = data;
-	struct sdcardfs_vfsmount_options* new = newdata;
+static void sdcardfs_copy_mnt_data(void *data, void *newdata)
+{
+	struct sdcardfs_vfsmount_options *old = data;
+	struct sdcardfs_vfsmount_options *new = newdata;
+
 	old->gid = new->gid;
 	old->mask = new->mask;
 }
@@ -218,8 +221,7 @@
 /* sdcardfs inode cache destructor */
 void sdcardfs_destroy_inode_cache(void)
 {
-	if (sdcardfs_inode_cachep)
-		kmem_cache_destroy(sdcardfs_inode_cachep);
+	kmem_cache_destroy(sdcardfs_inode_cachep);
 }
 
 /*
@@ -235,7 +237,8 @@
 		lower_sb->s_op->umount_begin(lower_sb);
 }
 
-static int sdcardfs_show_options(struct vfsmount *mnt, struct seq_file *m, struct dentry *root)
+static int sdcardfs_show_options(struct vfsmount *mnt, struct seq_file *m,
+			struct dentry *root)
 {
 	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(root->d_sb);
 	struct sdcardfs_mount_options *opts = &sbi->options;
@@ -248,7 +251,7 @@
 	if (vfsopts->gid != 0)
 		seq_printf(m, ",gid=%u", vfsopts->gid);
 	if (opts->multiuser)
-		seq_printf(m, ",multiuser");
+		seq_puts(m, ",multiuser");
 	if (vfsopts->mask)
 		seq_printf(m, ",mask=%u", vfsopts->mask);
 	if (opts->fs_user_id)
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 6726440..e9fb2e8 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -361,6 +361,7 @@
 /* Event queued up for userspace to read */
 struct drm_pending_event {
 	struct completion *completion;
+	void (*completion_release)(struct completion *completion);
 	struct drm_event *event;
 	struct fence *fence;
 	struct list_head link;
diff --git a/include/dt-bindings/clock/mdss-10nm-pll-clk.h b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
new file mode 100644
index 0000000..c1350ce
--- /dev/null
+++ b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
@@ -0,0 +1,37 @@
+
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_10NM_PLL_CLK_H
+#define __MDSS_10NM_PLL_CLK_H
+
+/* DSI PLL clocks */
+#define VCO_CLK_0		0
+#define BITCLK_SRC_0_CLK	1
+#define BYTECLK_SRC_0_CLK	2
+#define POST_BIT_DIV_0_CLK	3
+#define POST_VCO_DIV_0_CLK	4
+#define BYTECLK_MUX_0_CLK	5
+#define PCLK_SRC_MUX_0_CLK	6
+#define PCLK_SRC_0_CLK		7
+#define PCLK_MUX_0_CLK		8
+#define VCO_CLK_1		9
+#define BITCLK_SRC_1_CLK	10
+#define BYTECLK_SRC_1_CLK	11
+#define POST_BIT_DIV_1_CLK	12
+#define POST_VCO_DIV_1_CLK	13
+#define BYTECLK_MUX_1_CLK	14
+#define PCLK_SRC_MUX_1_CLK	15
+#define PCLK_SRC_1_CLK		16
+#define PCLK_MUX_1_CLK		17
+#endif
diff --git a/include/dt-bindings/clock/qcom,camcc-sdm845.h b/include/dt-bindings/clock/qcom,camcc-sdm845.h
index dbee8901..e169172 100644
--- a/include/dt-bindings/clock/qcom,camcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,camcc-sdm845.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -88,34 +88,17 @@
 #define CAM_CC_MCLK3_CLK_SRC					71
 #define CAM_CC_PLL0						72
 #define CAM_CC_PLL0_OUT_EVEN					73
-#define CAM_CC_PLL0_OUT_MAIN					74
-#define CAM_CC_PLL0_OUT_ODD					75
-#define CAM_CC_PLL0_OUT_TEST					76
-#define CAM_CC_PLL1						77
-#define CAM_CC_PLL1_OUT_EVEN					78
-#define CAM_CC_PLL1_OUT_MAIN					79
-#define CAM_CC_PLL1_OUT_ODD					80
-#define CAM_CC_PLL1_OUT_TEST					81
-#define CAM_CC_PLL2						82
-#define CAM_CC_PLL2_OUT_EVEN					83
-#define CAM_CC_PLL2_OUT_MAIN					84
-#define CAM_CC_PLL2_OUT_ODD					85
-#define CAM_CC_PLL2_OUT_TEST					86
-#define CAM_CC_PLL3						87
-#define CAM_CC_PLL3_OUT_EVEN					88
-#define CAM_CC_PLL3_OUT_MAIN					89
-#define CAM_CC_PLL3_OUT_ODD					90
-#define CAM_CC_PLL3_OUT_TEST					91
-#define CAM_CC_PLL_TEST_CLK					92
-#define CAM_CC_SLOW_AHB_CLK_SRC					93
-#define CAM_CC_SOC_AHB_CLK					94
-#define CAM_CC_SPDM_BPS_CLK					95
-#define CAM_CC_SPDM_IFE_0_CLK					96
-#define CAM_CC_SPDM_IFE_0_CSID_CLK				97
-#define CAM_CC_SPDM_IPE_0_CLK					98
-#define CAM_CC_SPDM_IPE_1_CLK					99
-#define CAM_CC_SPDM_JPEG_CLK					100
-#define CAM_CC_SYS_TMR_CLK					101
+#define CAM_CC_PLL1						74
+#define CAM_CC_PLL1_OUT_EVEN					75
+#define CAM_CC_PLL2						76
+#define CAM_CC_PLL2_OUT_EVEN					77
+#define CAM_CC_PLL2_OUT_ODD					78
+#define CAM_CC_PLL3						79
+#define CAM_CC_PLL3_OUT_EVEN					80
+#define CAM_CC_PLL_TEST_CLK					81
+#define CAM_CC_SLOW_AHB_CLK_SRC					82
+#define CAM_CC_SOC_AHB_CLK					83
+#define CAM_CC_SYS_TMR_CLK					84
 
 #define TITAN_CAM_CC_BPS_BCR					0
 #define TITAN_CAM_CC_CAMNOC_BCR					1
diff --git a/include/dt-bindings/clock/qcom,cpucc-sdm845.h b/include/dt-bindings/clock/qcom,cpucc-sdm845.h
index c1ff2a0..db3c940 100644
--- a/include/dt-bindings/clock/qcom,cpucc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,cpucc-sdm845.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,18 +14,18 @@
 #ifndef _DT_BINDINGS_CLK_MSM_CPU_CC_SDM845_H
 #define _DT_BINDINGS_CLK_MSM_CPU_CC_SDM845_H
 
-#define L3_CLUSTER0_VOTE_CLK					0
-#define L3_CLUSTER1_VOTE_CLK					1
-#define L3_CLK							2
-#define CPU0_PWRCL_CLK						3
-#define CPU1_PWRCL_CLK						4
-#define CPU2_PWRCL_CLK						5
-#define CPU3_PWRCL_CLK						6
-#define PWRCL_CLK						7
-#define CPU4_PERFCL_CLK						8
-#define CPU5_PERFCL_CLK						9
-#define CPU6_PERFCL_CLK						10
-#define CPU7_PERFCL_CLK						11
-#define PERFCL_CLK						12
+#define L3_CLK							0
+#define PWRCL_CLK						1
+#define PERFCL_CLK						2
+#define L3_CLUSTER0_VOTE_CLK					3
+#define L3_CLUSTER1_VOTE_CLK					4
+#define CPU0_PWRCL_CLK						5
+#define CPU1_PWRCL_CLK						6
+#define CPU2_PWRCL_CLK						7
+#define CPU3_PWRCL_CLK						8
+#define CPU4_PERFCL_CLK						9
+#define CPU5_PERFCL_CLK						10
+#define CPU6_PERFCL_CLK						11
+#define CPU7_PERFCL_CLK						12
 
 #endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sdm845.h b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
index 10530c5..b1988e4 100644
--- a/include/dt-bindings/clock/qcom,dispcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -51,25 +51,16 @@
 #define DISP_CC_MDSS_ROT_CLK_SRC				34
 #define DISP_CC_MDSS_RSCC_AHB_CLK				35
 #define DISP_CC_MDSS_RSCC_VSYNC_CLK				36
-#define DISP_CC_MDSS_SPDM_DEBUG_CLK				37
-#define DISP_CC_MDSS_SPDM_DP_CRYPTO_CLK				38
-#define DISP_CC_MDSS_SPDM_DP_PIXEL1_CLK				39
-#define DISP_CC_MDSS_SPDM_DP_PIXEL_CLK				40
-#define DISP_CC_MDSS_SPDM_MDP_CLK				41
-#define DISP_CC_MDSS_SPDM_PCLK0_CLK				42
-#define DISP_CC_MDSS_SPDM_PCLK1_CLK				43
-#define DISP_CC_MDSS_SPDM_ROT_CLK				44
-#define DISP_CC_MDSS_VSYNC_CLK					45
-#define DISP_CC_MDSS_VSYNC_CLK_SRC				46
-#define DISP_CC_PLL0						47
-#define DISP_CC_PLL0_OUT_EVEN					48
-#define DISP_CC_PLL0_OUT_MAIN					49
-#define DISP_CC_PLL0_OUT_ODD					50
-#define DISP_CC_PLL0_OUT_TEST					51
+#define DISP_CC_MDSS_VSYNC_CLK					37
+#define DISP_CC_MDSS_VSYNC_CLK_SRC				38
+#define DISP_CC_PLL0						39
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC				40
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC				41
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC			42
 
-#define DISP_CC_DISP_CC_MDSS_CORE_BCR				0
-#define DISP_CC_DISP_CC_MDSS_GCC_CLOCKS_BCR			1
-#define DISP_CC_DISP_CC_MDSS_RSCC_BCR				2
-#define DISP_CC_DISP_CC_MDSS_SPDM_BCR				3
+#define DISP_CC_MDSS_CORE_BCR					0
+#define DISP_CC_MDSS_GCC_CLOCKS_BCR				1
+#define DISP_CC_MDSS_RSCC_BCR					2
+#define DISP_CC_MDSS_SPDM_BCR					3
 
 #endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index 1e55c1d..d52e335 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -53,152 +53,149 @@
 #define GCC_GPU_GPLL0_DIV_CLK_SRC				35
 #define GCC_GPU_MEMNOC_GFX_CLK					36
 #define GCC_GPU_SNOC_DVM_GFX_CLK				37
-#define GCC_MMSS_QM_AHB_CLK					38
-#define GCC_MMSS_QM_CORE_CLK					39
-#define GCC_MMSS_QM_CORE_CLK_SRC				40
-#define GCC_MSS_AXIS2_CLK					41
-#define GCC_MSS_CFG_AHB_CLK					42
-#define GCC_MSS_GPLL0_DIV_CLK_SRC				43
-#define GCC_MSS_MFAB_AXIS_CLK					44
-#define GCC_MSS_Q6_MEMNOC_AXI_CLK				45
-#define GCC_MSS_SNOC_AXI_CLK					46
-#define GCC_PCIE_0_AUX_CLK					47
-#define GCC_PCIE_0_AUX_CLK_SRC					48
-#define GCC_PCIE_0_CFG_AHB_CLK					49
-#define GCC_PCIE_0_CLKREF_CLK					50
-#define GCC_PCIE_0_MSTR_AXI_CLK					51
-#define GCC_PCIE_0_PIPE_CLK					52
-#define GCC_PCIE_0_SLV_AXI_CLK					53
-#define GCC_PCIE_0_SLV_Q2A_AXI_CLK				54
-#define GCC_PCIE_1_AUX_CLK					55
-#define GCC_PCIE_1_AUX_CLK_SRC					56
-#define GCC_PCIE_1_CFG_AHB_CLK					57
-#define GCC_PCIE_1_CLKREF_CLK					58
-#define GCC_PCIE_1_MSTR_AXI_CLK					59
-#define GCC_PCIE_1_PIPE_CLK					60
-#define GCC_PCIE_1_SLV_AXI_CLK					61
-#define GCC_PCIE_1_SLV_Q2A_AXI_CLK				62
-#define GCC_PCIE_PHY_AUX_CLK					63
-#define GCC_PCIE_PHY_REFGEN_CLK					64
-#define GCC_PCIE_PHY_REFGEN_CLK_SRC				65
-#define GCC_PDM2_CLK						66
-#define GCC_PDM2_CLK_SRC					67
-#define GCC_PDM_AHB_CLK						68
-#define GCC_PDM_XO4_CLK						69
-#define GCC_PRNG_AHB_CLK					70
-#define GCC_QMIP_CAMERA_AHB_CLK					71
-#define GCC_QMIP_DISP_AHB_CLK					72
-#define GCC_QMIP_VIDEO_AHB_CLK					73
-#define GCC_QUPV3_WRAP0_CORE_2X_CLK				74
-#define GCC_QUPV3_WRAP0_CORE_2X_CLK_SRC				75
-#define GCC_QUPV3_WRAP0_CORE_CLK				76
-#define GCC_QUPV3_WRAP0_S0_CLK					77
-#define GCC_QUPV3_WRAP0_S0_CLK_SRC				78
-#define GCC_QUPV3_WRAP0_S1_CLK					79
-#define GCC_QUPV3_WRAP0_S1_CLK_SRC				80
-#define GCC_QUPV3_WRAP0_S2_CLK					81
-#define GCC_QUPV3_WRAP0_S2_CLK_SRC				82
-#define GCC_QUPV3_WRAP0_S3_CLK					83
-#define GCC_QUPV3_WRAP0_S3_CLK_SRC				84
-#define GCC_QUPV3_WRAP0_S4_CLK					85
-#define GCC_QUPV3_WRAP0_S4_CLK_SRC				86
-#define GCC_QUPV3_WRAP0_S5_CLK					87
-#define GCC_QUPV3_WRAP0_S5_CLK_SRC				88
-#define GCC_QUPV3_WRAP0_S6_CLK					89
-#define GCC_QUPV3_WRAP0_S6_CLK_SRC				90
-#define GCC_QUPV3_WRAP0_S7_CLK					91
-#define GCC_QUPV3_WRAP0_S7_CLK_SRC				92
-#define GCC_QUPV3_WRAP1_CORE_2X_CLK				93
-#define GCC_QUPV3_WRAP1_CORE_CLK				94
-#define GCC_QUPV3_WRAP1_S0_CLK					95
-#define GCC_QUPV3_WRAP1_S0_CLK_SRC				96
-#define GCC_QUPV3_WRAP1_S1_CLK					97
-#define GCC_QUPV3_WRAP1_S1_CLK_SRC				98
-#define GCC_QUPV3_WRAP1_S2_CLK					99
-#define GCC_QUPV3_WRAP1_S2_CLK_SRC				100
-#define GCC_QUPV3_WRAP1_S3_CLK					101
-#define GCC_QUPV3_WRAP1_S3_CLK_SRC				102
-#define GCC_QUPV3_WRAP1_S4_CLK					103
-#define GCC_QUPV3_WRAP1_S4_CLK_SRC				104
-#define GCC_QUPV3_WRAP1_S5_CLK					105
-#define GCC_QUPV3_WRAP1_S5_CLK_SRC				106
-#define GCC_QUPV3_WRAP1_S6_CLK					107
-#define GCC_QUPV3_WRAP1_S6_CLK_SRC				108
-#define GCC_QUPV3_WRAP1_S7_CLK					109
-#define GCC_QUPV3_WRAP1_S7_CLK_SRC				110
-#define GCC_QUPV3_WRAP_0_M_AHB_CLK				111
-#define GCC_QUPV3_WRAP_0_S_AHB_CLK				112
-#define GCC_QUPV3_WRAP_1_M_AHB_CLK				113
-#define GCC_QUPV3_WRAP_1_S_AHB_CLK				114
-#define GCC_RX1_USB2_CLKREF_CLK					115
-#define GCC_RX2_QLINK_CLKREF_CLK				116
-#define GCC_RX3_MODEM_CLKREF_CLK				117
-#define GCC_SDCC2_AHB_CLK					118
-#define GCC_SDCC2_APPS_CLK					119
-#define GCC_SDCC2_APPS_CLK_SRC					120
-#define GCC_SDCC4_AHB_CLK					121
-#define GCC_SDCC4_APPS_CLK					122
-#define GCC_SDCC4_APPS_CLK_SRC					123
-#define GCC_SYS_NOC_CPUSS_AHB_CLK				124
-#define GCC_TSIF_AHB_CLK					125
-#define GCC_TSIF_INACTIVITY_TIMERS_CLK				126
-#define GCC_TSIF_REF_CLK					127
-#define GCC_TSIF_REF_CLK_SRC					128
-#define GCC_UFS_CARD_AHB_CLK					129
-#define GCC_UFS_CARD_AXI_CLK					130
-#define GCC_UFS_CARD_AXI_CLK_SRC				131
-#define GCC_UFS_CARD_CLKREF_CLK					132
-#define GCC_UFS_CARD_ICE_CORE_CLK				133
-#define GCC_UFS_CARD_ICE_CORE_CLK_SRC				134
-#define GCC_UFS_CARD_PHY_AUX_CLK				135
-#define GCC_UFS_CARD_PHY_AUX_CLK_SRC				136
-#define GCC_UFS_CARD_RX_SYMBOL_0_CLK				137
-#define GCC_UFS_CARD_RX_SYMBOL_1_CLK				138
-#define GCC_UFS_CARD_TX_SYMBOL_0_CLK				139
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK				140
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC			141
-#define GCC_UFS_MEM_CLKREF_CLK					142
-#define GCC_UFS_PHY_AHB_CLK					143
-#define GCC_UFS_PHY_AXI_CLK					144
-#define GCC_UFS_PHY_AXI_CLK_SRC					145
-#define GCC_UFS_PHY_ICE_CORE_CLK				146
-#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				147
-#define GCC_UFS_PHY_PHY_AUX_CLK					148
-#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				149
-#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				150
-#define GCC_UFS_PHY_RX_SYMBOL_1_CLK				151
-#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				152
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK				153
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				154
-#define GCC_USB30_PRIM_MASTER_CLK				155
-#define GCC_USB30_PRIM_MASTER_CLK_SRC				156
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK				157
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			158
-#define GCC_USB30_PRIM_SLEEP_CLK				159
-#define GCC_USB30_SEC_MASTER_CLK				160
-#define GCC_USB30_SEC_MASTER_CLK_SRC				161
-#define GCC_USB30_SEC_MOCK_UTMI_CLK				162
-#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC				163
-#define GCC_USB30_SEC_SLEEP_CLK					164
-#define GCC_USB3_PRIM_CLKREF_CLK				165
-#define GCC_USB3_PRIM_PHY_AUX_CLK				166
-#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				167
-#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				168
-#define GCC_USB3_PRIM_PHY_PIPE_CLK				169
-#define GCC_USB3_SEC_CLKREF_CLK					170
-#define GCC_USB3_SEC_PHY_AUX_CLK				171
-#define GCC_USB3_SEC_PHY_AUX_CLK_SRC				172
-#define GCC_USB3_SEC_PHY_COM_AUX_CLK				173
-#define GCC_USB3_SEC_PHY_PIPE_CLK				174
-#define GCC_USB_PHY_CFG_AHB2PHY_CLK				175
-#define GCC_VIDEO_AHB_CLK					176
-#define GCC_VIDEO_AXI_CLK					177
-#define GCC_VIDEO_XO_CLK					178
-#define GPLL0							179
-#define GPLL0_OUT_EVEN						180
-#define GPLL0_OUT_MAIN						181
-#define GPLL1							182
-#define GPLL1_OUT_MAIN						183
+#define GCC_MSS_AXIS2_CLK					38
+#define GCC_MSS_CFG_AHB_CLK					39
+#define GCC_MSS_GPLL0_DIV_CLK_SRC				40
+#define GCC_MSS_MFAB_AXIS_CLK					41
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK				42
+#define GCC_MSS_SNOC_AXI_CLK					43
+#define GCC_PCIE_0_AUX_CLK					44
+#define GCC_PCIE_0_AUX_CLK_SRC					45
+#define GCC_PCIE_0_CFG_AHB_CLK					46
+#define GCC_PCIE_0_CLKREF_CLK					47
+#define GCC_PCIE_0_MSTR_AXI_CLK					48
+#define GCC_PCIE_0_PIPE_CLK					49
+#define GCC_PCIE_0_SLV_AXI_CLK					50
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK				51
+#define GCC_PCIE_1_AUX_CLK					52
+#define GCC_PCIE_1_AUX_CLK_SRC					53
+#define GCC_PCIE_1_CFG_AHB_CLK					54
+#define GCC_PCIE_1_CLKREF_CLK					55
+#define GCC_PCIE_1_MSTR_AXI_CLK					56
+#define GCC_PCIE_1_PIPE_CLK					57
+#define GCC_PCIE_1_SLV_AXI_CLK					58
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK				59
+#define GCC_PCIE_PHY_AUX_CLK					60
+#define GCC_PCIE_PHY_REFGEN_CLK					61
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC				62
+#define GCC_PDM2_CLK						63
+#define GCC_PDM2_CLK_SRC					64
+#define GCC_PDM_AHB_CLK						65
+#define GCC_PDM_XO4_CLK						66
+#define GCC_PRNG_AHB_CLK					67
+#define GCC_QMIP_CAMERA_AHB_CLK					68
+#define GCC_QMIP_DISP_AHB_CLK					69
+#define GCC_QMIP_VIDEO_AHB_CLK					70
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK				71
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK_SRC				72
+#define GCC_QUPV3_WRAP0_CORE_CLK				73
+#define GCC_QUPV3_WRAP0_S0_CLK					74
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC				75
+#define GCC_QUPV3_WRAP0_S1_CLK					76
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC				77
+#define GCC_QUPV3_WRAP0_S2_CLK					78
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC				79
+#define GCC_QUPV3_WRAP0_S3_CLK					80
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC				81
+#define GCC_QUPV3_WRAP0_S4_CLK					82
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC				83
+#define GCC_QUPV3_WRAP0_S5_CLK					84
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC				85
+#define GCC_QUPV3_WRAP0_S6_CLK					86
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC				87
+#define GCC_QUPV3_WRAP0_S7_CLK					88
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC				89
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK				90
+#define GCC_QUPV3_WRAP1_CORE_CLK				91
+#define GCC_QUPV3_WRAP1_S0_CLK					92
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC				93
+#define GCC_QUPV3_WRAP1_S1_CLK					94
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC				95
+#define GCC_QUPV3_WRAP1_S2_CLK					96
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC				97
+#define GCC_QUPV3_WRAP1_S3_CLK					98
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC				99
+#define GCC_QUPV3_WRAP1_S4_CLK					100
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC				101
+#define GCC_QUPV3_WRAP1_S5_CLK					102
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC				103
+#define GCC_QUPV3_WRAP1_S6_CLK					104
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC				105
+#define GCC_QUPV3_WRAP1_S7_CLK					106
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC				107
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK				108
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK				109
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK				110
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK				111
+#define GCC_RX1_USB2_CLKREF_CLK					112
+#define GCC_RX2_QLINK_CLKREF_CLK				113
+#define GCC_RX3_MODEM_CLKREF_CLK				114
+#define GCC_SDCC2_AHB_CLK					115
+#define GCC_SDCC2_APPS_CLK					116
+#define GCC_SDCC2_APPS_CLK_SRC					117
+#define GCC_SDCC4_AHB_CLK					118
+#define GCC_SDCC4_APPS_CLK					119
+#define GCC_SDCC4_APPS_CLK_SRC					120
+#define GCC_SYS_NOC_CPUSS_AHB_CLK				121
+#define GCC_TSIF_AHB_CLK					122
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK				123
+#define GCC_TSIF_REF_CLK					124
+#define GCC_TSIF_REF_CLK_SRC					125
+#define GCC_UFS_CARD_AHB_CLK					126
+#define GCC_UFS_CARD_AXI_CLK					127
+#define GCC_UFS_CARD_AXI_CLK_SRC				128
+#define GCC_UFS_CARD_CLKREF_CLK					129
+#define GCC_UFS_CARD_ICE_CORE_CLK				130
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC				131
+#define GCC_UFS_CARD_PHY_AUX_CLK				132
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC				133
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK				134
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK				135
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK				136
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK				137
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC			138
+#define GCC_UFS_MEM_CLKREF_CLK					139
+#define GCC_UFS_PHY_AHB_CLK					140
+#define GCC_UFS_PHY_AXI_CLK					141
+#define GCC_UFS_PHY_AXI_CLK_SRC					142
+#define GCC_UFS_PHY_ICE_CORE_CLK				143
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				144
+#define GCC_UFS_PHY_PHY_AUX_CLK					145
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				146
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				147
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK				148
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				149
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK				150
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				151
+#define GCC_USB30_PRIM_MASTER_CLK				152
+#define GCC_USB30_PRIM_MASTER_CLK_SRC				153
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK				154
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			155
+#define GCC_USB30_PRIM_SLEEP_CLK				156
+#define GCC_USB30_SEC_MASTER_CLK				157
+#define GCC_USB30_SEC_MASTER_CLK_SRC				158
+#define GCC_USB30_SEC_MOCK_UTMI_CLK				159
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC				160
+#define GCC_USB30_SEC_SLEEP_CLK					161
+#define GCC_USB3_PRIM_CLKREF_CLK				162
+#define GCC_USB3_PRIM_PHY_AUX_CLK				163
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				164
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				165
+#define GCC_USB3_PRIM_PHY_PIPE_CLK				166
+#define GCC_USB3_SEC_CLKREF_CLK					167
+#define GCC_USB3_SEC_PHY_AUX_CLK				168
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC				169
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK				170
+#define GCC_USB3_SEC_PHY_PIPE_CLK				171
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK				172
+#define GCC_VIDEO_AHB_CLK					173
+#define GCC_VIDEO_AXI_CLK					174
+#define GCC_VIDEO_XO_CLK					175
+#define GPLL0							176
+#define GPLL0_OUT_EVEN						177
+#define GPLL0_OUT_MAIN						178
+#define GPLL1							179
+#define GPLL1_OUT_MAIN						180
 
 /* GCC reset clocks */
 #define GCC_GPU_BCR						0
diff --git a/include/dt-bindings/clock/qcom,gpucc-sdm845.h b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
index 41eb823..13de1e1 100644
--- a/include/dt-bindings/clock/qcom,gpucc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,42 +14,42 @@
 #ifndef _DT_BINDINGS_CLK_MSM_GPU_CC_SDM845_H
 #define _DT_BINDINGS_CLK_MSM_GPU_CC_SDM845_H
 
+/* GPUCC clock registers */
 #define GPU_CC_ACD_AHB_CLK					0
 #define GPU_CC_ACD_CXO_CLK					1
-#define GPU_CC_AHB_CLK						2
+#define GPU_CC_AHB_CLK					2
 #define GPU_CC_CRC_AHB_CLK					3
 #define GPU_CC_CX_APB_CLK					4
-#define GPU_CC_CX_GFX3D_CLK					5
-#define GPU_CC_CX_GFX3D_SLV_CLK					6
-#define GPU_CC_CX_GMU_CLK					7
-#define GPU_CC_CX_QDSS_AT_CLK					8
-#define GPU_CC_CX_QDSS_TRIG_CLK					9
-#define GPU_CC_CX_QDSS_TSCTR_CLK				10
-#define GPU_CC_CX_SNOC_DVM_CLK					11
-#define GPU_CC_CXO_AON_CLK					12
-#define GPU_CC_CXO_CLK						13
-#define GPU_CC_DEBUG_CLK					14
-#define GPU_CC_GX_CXO_CLK					15
-#define GPU_CC_GX_GMU_CLK					16
-#define GPU_CC_GX_QDSS_TSCTR_CLK				17
-#define GPU_CC_GX_VSENSE_CLK					18
-#define GPU_CC_PLL0						19
-#define GPU_CC_PLL0_OUT_EVEN					20
-#define GPU_CC_PLL0_OUT_MAIN					21
-#define GPU_CC_PLL0_OUT_ODD					22
-#define GPU_CC_PLL0_OUT_TEST					23
-#define GPU_CC_PLL1						24
-#define GPU_CC_PLL1_OUT_EVEN					25
-#define GPU_CC_PLL1_OUT_MAIN					26
-#define GPU_CC_PLL1_OUT_ODD					27
-#define GPU_CC_PLL1_OUT_TEST					28
-#define GPU_CC_PLL_TEST_CLK					29
-#define GPU_CC_RBCPR_AHB_CLK					30
-#define GPU_CC_RBCPR_CLK					31
-#define GPU_CC_RBCPR_CLK_SRC					32
-#define GPU_CC_SLEEP_CLK					33
-#define GPU_CC_SPDM_GX_GFX3D_DIV_CLK				34
+#define GPU_CC_CX_GMU_CLK					5
+#define GPU_CC_CX_QDSS_AT_CLK					6
+#define GPU_CC_CX_QDSS_TRIG_CLK					7
+#define GPU_CC_CX_QDSS_TSCTR_CLK					8
+#define GPU_CC_CX_SNOC_DVM_CLK						9
+#define GPU_CC_CXO_AON_CLK					10
+#define GPU_CC_CXO_CLK					11
+#define GPU_CC_DEBUG_CLK					12
+#define GPU_CC_GX_CXO_CLK					13
+#define GPU_CC_GX_GMU_CLK					14
+#define GPU_CC_GX_QDSS_TSCTR_CLK					15
+#define GPU_CC_GX_VSENSE_CLK					16
+#define GPU_CC_PLL0_OUT_MAIN					 17
+#define GPU_CC_PLL0_OUT_ODD						18
+#define GPU_CC_PLL0_OUT_TEST					19
+#define GPU_CC_PLL1						20
+#define GPU_CC_PLL1_OUT_EVEN					21
+#define GPU_CC_PLL1_OUT_MAIN					22
+#define GPU_CC_PLL1_OUT_ODD					23
+#define GPU_CC_PLL1_OUT_TEST					24
+#define GPU_CC_PLL_TEST_CLK					25
+#define GPU_CC_RBCPR_AHB_CLK					26
+#define GPU_CC_RBCPR_CLK					27
+#define GPU_CC_RBCPR_CLK_SRC					28
+#define GPU_CC_SLEEP_CLK					29
+#define GPU_CC_GMU_CLK_SRC					30
+#define GPU_CC_CX_GFX3D_CLK					31
+#define GPU_CC_CX_GFX3D_SLV_CLK					32
 
+/* GPUCC reset clock registers */
 #define GPUCC_GPU_CC_ACD_BCR					0
 #define GPUCC_GPU_CC_CX_BCR					1
 #define GPUCC_GPU_CC_GFX3D_AON_BCR				2
@@ -59,4 +59,9 @@
 #define GPUCC_GPU_CC_SPDM_BCR					6
 #define GPUCC_GPU_CC_XO_BCR					7
 
+/* GFX3D clock registers */
+#define GPU_CC_PLL0						0
+#define GPU_CC_PLL0_OUT_EVEN					1
+#define GPU_CC_GX_GFX3D_CLK_SRC					2
+#define GPU_CC_GX_GFX3D_CLK						3
 #endif
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
index 86ac8d4..8135da9 100644
--- a/include/dt-bindings/msm/msm-bus-ids.h
+++ b/include/dt-bindings/msm/msm-bus-ids.h
@@ -42,6 +42,7 @@
 #define	MSM_BUS_FAB_DC_NOC 6150
 #define	MSM_BUS_FAB_MC_VIRT 6151
 #define	MSM_BUS_FAB_MEM_NOC 6152
+#define	MSM_BUS_FAB_IPA_VIRT 6153
 
 #define MSM_BUS_FAB_MC_VIRT_DISPLAY 26000
 #define MSM_BUS_FAB_MEM_NOC_DISPLAY 26001
@@ -86,6 +87,10 @@
 #define	MSM_BUS_BCM_CN0 7036
 #define	MSM_BUS_BCM_ACV 7037
 #define	MSM_BUS_BCM_ALC 7038
+#define	MSM_BUS_BCM_QUP0 7039
+
+#define	MSM_BUS_RSC_APPS 8000
+#define	MSM_BUS_RSC_DISP 8001
 
 #define MSM_BUS_BCM_MC0_DISPLAY 27000
 #define MSM_BUS_BCM_SH0_DISPLAY 27001
@@ -238,7 +243,8 @@
 #define	MSM_BUS_MASTER_ANOC_PCIE_SNOC 140
 #define	MSM_BUS_MASTER_PIMEM 141
 #define	MSM_BUS_MASTER_MEM_NOC_SNOC 142
-#define	MSM_BUS_MASTER_MASTER_LAST 143
+#define	MSM_BUS_MASTER_IPA_CORE 143
+#define	MSM_BUS_MASTER_MASTER_LAST 144
 
 #define MSM_BUS_MASTER_LLCC_DISPLAY 20000
 #define MSM_BUS_MASTER_MNOC_HF_MEM_NOC_DISPLAY 20001
@@ -576,7 +582,8 @@
 #define	MSM_BUS_SLAVE_SNOC_MEM_NOC_GC 774
 #define	MSM_BUS_SLAVE_SNOC_MEM_NOC_SF 775
 #define	MSM_BUS_SLAVE_MEM_NOC_SNOC 776
-#define	MSM_BUS_SLAVE_LAST 777
+#define	MSM_BUS_SLAVE_IPA 777
+#define	MSM_BUS_SLAVE_LAST 778
 
 #define	MSM_BUS_SLAVE_EBI_CH0_DISPLAY 20512
 #define	MSM_BUS_SLAVE_LLCC_DISPLAY 20513
diff --git a/include/dt-bindings/msm/power-on.h b/include/dt-bindings/msm/power-on.h
new file mode 100644
index 0000000..f43841e
--- /dev/null
+++ b/include/dt-bindings/msm/power-on.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_POWER_ON_H__
+#define __MSM_POWER_ON_H__
+
+#define PON_POWER_OFF_RESERVED		0x00
+#define PON_POWER_OFF_WARM_RESET	0x01
+#define PON_POWER_OFF_SHUTDOWN		0x04
+#define PON_POWER_OFF_DVDD_SHUTDOWN	0x05
+#define PON_POWER_OFF_HARD_RESET	0x07
+#define PON_POWER_OFF_DVDD_HARD_RESET	0x08
+#define PON_POWER_OFF_MAX_TYPE		0x10
+
+#endif
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 5f2fd61..d5ff4c30 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -12,6 +12,7 @@
 #include <linux/fb.h>
 #include <linux/mutex.h>
 #include <linux/notifier.h>
+#include <linux/thermal.h>
 
 /* Notes on locking:
  *
@@ -110,6 +111,12 @@
 	struct list_head entry;
 
 	struct device dev;
+	/* Backlight cooling device */
+	struct thermal_cooling_device *cdev;
+	/* Thermally limited max brightness */
+	int thermal_brightness_limit;
+	/* User brightness request */
+	int usr_brightness_req;
 
 	/* Multiple framebuffers may share one backlight device */
 	bool fb_bl_on[FB_MAX];
diff --git a/include/linux/batterydata-lib.h b/include/linux/batterydata-lib.h
new file mode 100644
index 0000000..39517f8
--- /dev/null
+++ b/include/linux/batterydata-lib.h
@@ -0,0 +1,218 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __BMS_BATTERYDATA_H
+#define __BMS_BATTERYDATA_H
+
+#include <linux/errno.h>
+
+#define FCC_CC_COLS		5
+#define FCC_TEMP_COLS		8
+
+#define PC_CC_ROWS             31
+#define PC_CC_COLS             13
+
+#define PC_TEMP_ROWS		31
+#define PC_TEMP_COLS		8
+
+#define ACC_IBAT_ROWS		4
+#define ACC_TEMP_COLS		3
+
+#define MAX_SINGLE_LUT_COLS	20
+
+#define MAX_BATT_ID_NUM		4
+#define DEGC_SCALE		10
+
+struct single_row_lut {
+	int x[MAX_SINGLE_LUT_COLS];
+	int y[MAX_SINGLE_LUT_COLS];
+	int cols;
+};
+
+/**
+ * struct sf_lut -
+ * @rows:	number of percent charge entries should be <= PC_CC_ROWS
+ * @cols:	number of charge cycle entries should be <= PC_CC_COLS
+ * @row_entries:	the charge cycles/temperature at which sf data
+ *			is available in the table.
+ *		The charge cycles must be in increasing order from 0 to rows.
+ * @percent:	the percent charge at which sf data is available in the table
+ *		The  percentcharge must be in decreasing order from 0 to cols.
+ * @sf:		the scaling factor data
+ */
+struct sf_lut {
+	int rows;
+	int cols;
+	int row_entries[PC_CC_COLS];
+	int percent[PC_CC_ROWS];
+	int sf[PC_CC_ROWS][PC_CC_COLS];
+};
+
+/**
+ * struct pc_temp_ocv_lut -
+ * @rows:	number of percent charge entries should be <= PC_TEMP_ROWS
+ * @cols:	number of temperature entries should be <= PC_TEMP_COLS
+ * @temp:	the temperatures at which ocv data is available in the table
+ *		The temperatures must be in increasing order from 0 to rows.
+ * @percent:	the percent charge at which ocv data is available in the table
+ *		The  percentcharge must be in decreasing order from 0 to cols.
+ * @ocv:	the open circuit voltage
+ */
+struct pc_temp_ocv_lut {
+	int rows;
+	int cols;
+	int temp[PC_TEMP_COLS];
+	int percent[PC_TEMP_ROWS];
+	int ocv[PC_TEMP_ROWS][PC_TEMP_COLS];
+};
+
+struct ibat_temp_acc_lut {
+	int rows;
+	int cols;
+	int temp[ACC_TEMP_COLS];
+	int ibat[ACC_IBAT_ROWS];
+	int acc[ACC_IBAT_ROWS][ACC_TEMP_COLS];
+};
+
+struct batt_ids {
+	int kohm[MAX_BATT_ID_NUM];
+	int num;
+};
+
+enum battery_type {
+	BATT_UNKNOWN = 0,
+	BATT_PALLADIUM,
+	BATT_DESAY,
+	BATT_OEM,
+	BATT_QRD_4V35_2000MAH,
+	BATT_QRD_4V2_1300MAH,
+};
+
+/**
+ * struct bms_battery_data -
+ * @fcc:		full charge capacity (mAmpHour)
+ * @fcc_temp_lut:	table to get fcc at a given temp
+ * @pc_temp_ocv_lut:	table to get percent charge given batt temp and cycles
+ * @pc_sf_lut:		table to get percent charge scaling factor given cycles
+ *			and percent charge
+ * @rbatt_sf_lut:	table to get battery resistance scaling factor given
+ *			temperature and percent charge
+ * @default_rbatt_mohm:	the default value of battery resistance to use when
+ *			readings from bms are not available.
+ * @delta_rbatt_mohm:	the resistance to be added towards lower soc to
+ *			compensate for battery capacitance.
+ * @rbatt_capacitve_mohm: the resistance to be added to compensate for
+ *				battery capacitance
+ * @flat_ocv_threshold_uv: the voltage where the battery's discharge curve
+ *				starts flattening out.
+ * @max_voltage_uv:	max voltage of the battery
+ * @cutoff_uv:		cutoff voltage of the battery
+ * @iterm_ua:		termination current of the battery when charging
+ *			to 100%
+ * @batt_id_kohm:	the best matched battery id resistor value
+ * @fastchg_current_ma: maximum fast charge current
+ * @fg_cc_cv_threshold_mv: CC to CV threashold voltage
+ */
+
+struct bms_battery_data {
+	unsigned int		fcc;
+	struct single_row_lut	*fcc_temp_lut;
+	struct single_row_lut	*fcc_sf_lut;
+	struct pc_temp_ocv_lut	*pc_temp_ocv_lut;
+	struct ibat_temp_acc_lut *ibat_acc_lut;
+	struct sf_lut		*pc_sf_lut;
+	struct sf_lut		*rbatt_sf_lut;
+	int			default_rbatt_mohm;
+	int			delta_rbatt_mohm;
+	int			rbatt_capacitive_mohm;
+	int			flat_ocv_threshold_uv;
+	int			max_voltage_uv;
+	int			cutoff_uv;
+	int			iterm_ua;
+	int			batt_id_kohm;
+	int			fastchg_current_ma;
+	int			fg_cc_cv_threshold_mv;
+	const char		*battery_type;
+};
+
+#define is_between(left, right, value) \
+		(((left) >= (right) && (left) >= (value) \
+			&& (value) >= (right)) \
+		|| ((left) <= (right) && (left) <= (value) \
+			&& (value) <= (right)))
+
+#if defined(CONFIG_PM8921_BMS) || \
+	defined(CONFIG_PM8921_BMS_MODULE) || \
+	defined(CONFIG_QPNP_BMS) || \
+	defined(CONFIG_QPNP_VM_BMS)
+extern struct bms_battery_data  palladium_1500_data;
+extern struct bms_battery_data  desay_5200_data;
+extern struct bms_battery_data  oem_batt_data;
+extern struct bms_battery_data QRD_4v35_2000mAh_data;
+extern struct bms_battery_data  qrd_4v2_1300mah_data;
+
+int interpolate_fcc(struct single_row_lut *fcc_temp_lut, int batt_temp);
+int interpolate_scalingfactor(struct sf_lut *sf_lut, int row_entry, int pc);
+int interpolate_scalingfactor_fcc(struct single_row_lut *fcc_sf_lut,
+				int cycles);
+int interpolate_pc(struct pc_temp_ocv_lut *pc_temp_ocv,
+				int batt_temp_degc, int ocv);
+int interpolate_ocv(struct pc_temp_ocv_lut *pc_temp_ocv,
+				int batt_temp_degc, int pc);
+int interpolate_slope(struct pc_temp_ocv_lut *pc_temp_ocv,
+					int batt_temp, int pc);
+int interpolate_acc(struct ibat_temp_acc_lut *ibat_acc_lut,
+					int batt_temp, int ibat);
+int linear_interpolate(int y0, int x0, int y1, int x1, int x);
+#else
+static inline int interpolate_fcc(struct single_row_lut *fcc_temp_lut,
+			int batt_temp)
+{
+	return -EINVAL;
+}
+static inline int interpolate_scalingfactor(struct sf_lut *sf_lut,
+			int row_entry, int pc)
+{
+	return -EINVAL;
+}
+static inline int interpolate_scalingfactor_fcc(
+			struct single_row_lut *fcc_sf_lut, int cycles)
+{
+	return -EINVAL;
+}
+static inline int interpolate_pc(struct pc_temp_ocv_lut *pc_temp_ocv,
+			int batt_temp_degc, int ocv)
+{
+	return -EINVAL;
+}
+static inline int interpolate_ocv(struct pc_temp_ocv_lut *pc_temp_ocv,
+			int batt_temp_degc, int pc)
+{
+	return -EINVAL;
+}
+static inline int interpolate_slope(struct pc_temp_ocv_lut *pc_temp_ocv,
+					int batt_temp, int pc)
+{
+	return -EINVAL;
+}
+static inline int linear_interpolate(int y0, int x0, int y1, int x1, int x)
+{
+	return -EINVAL;
+}
+static inline int interpolate_acc(struct ibat_temp_acc_lut *ibat_acc_lut,
+						int batt_temp, int ibat)
+{
+	return -EINVAL;
+}
+#endif
+
+#endif
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 6aaf425..a13b031 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -18,19 +18,12 @@
 
 struct bpf_reg_state {
 	enum bpf_reg_type type;
-	/*
-	 * Used to determine if any memory access using this register will
-	 * result in a bad access.
-	 */
-	s64 min_value;
-	u64 max_value;
 	union {
 		/* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
 		s64 imm;
 
 		/* valid when type == PTR_TO_PACKET* */
 		struct {
-			u32 id;
 			u16 off;
 			u16 range;
 		};
@@ -40,6 +33,13 @@
 		 */
 		struct bpf_map *map_ptr;
 	};
+	u32 id;
+	/* Used to determine if any memory access using this register will
+	 * result in a bad access. These two fields must be last.
+	 * See states_equal()
+	 */
+	s64 min_value;
+	u64 max_value;
 };
 
 enum bpf_stack_slot_type {
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 292d6a1..6f3da08 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -121,4 +121,10 @@
 }
 
 #endif	/* CONFIG_GENERIC_BUG */
+
+#ifdef CONFIG_PANIC_ON_DATA_CORRUPTION
+#define PANIC_CORRUPTION 1
+#else
+#define PANIC_CORRUPTION 0
+#endif  /* CONFIG_PANIC_ON_DATA_CORRUPTION */
 #endif	/* _LINUX_BUG_H */
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index a765333..edc5d04 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -556,7 +556,7 @@
  * struct ccp_cmd - CPP operation request
  * @entry: list element (ccp driver use only)
  * @work: work element used for callbacks (ccp driver use only)
- * @ccp: CCP device to be run on (ccp driver use only)
+ * @ccp: CCP device to be run on
  * @ret: operation return code (ccp driver use only)
  * @flags: cmd processing flags
  * @engine: CCP operation to perform
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 31a7f91..8fd5fba 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -36,6 +36,8 @@
 #define CLK_IS_CRITICAL		BIT(11) /* do not gate, ever */
 /* parents need enable during gate/ungate, set rate and re-parent */
 #define CLK_OPS_PARENT_ENABLE	BIT(12)
+				/* unused */
+#define CLK_IS_MEASURE          BIT(14) /* measure clock */
 
 struct clk;
 struct clk_hw;
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index c156f50..4fa2623 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -30,6 +30,11 @@
 
 typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
 			    unsigned long voltage, u32 *power);
+typedef int (*plat_mitig_t)(int cpu, u32 clip_freq);
+
+struct cpu_cooling_ops {
+	plat_mitig_t ceil_limit, floor_limit;
+};
 
 #ifdef CONFIG_CPU_THERMAL
 /**
@@ -43,6 +48,10 @@
 cpufreq_power_cooling_register(const struct cpumask *clip_cpus,
 			       u32 capacitance, get_static_t plat_static_func);
 
+struct thermal_cooling_device *
+cpufreq_platform_cooling_register(const struct cpumask *clip_cpus,
+					struct cpu_cooling_ops *ops);
+
 /**
  * of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
  * @np: a valid struct device_node to the cooling device device tree node.
@@ -112,6 +121,13 @@
 	return NULL;
 }
 
+static inline struct thermal_cooling_device *
+cpufreq_platform_cooling_register(const struct cpumask *clip_cpus,
+					struct cpu_cooling_ops *ops)
+{
+	return NULL;
+}
+
 static inline
 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
 {
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index cc57986..23beb58 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -412,6 +412,7 @@
 
 #define CPUFREQ_TRANSITION_NOTIFIER	(0)
 #define CPUFREQ_POLICY_NOTIFIER		(1)
+#define CPUFREQ_GOVINFO_NOTIFIER	(2)
 
 /* Transition notifiers */
 #define CPUFREQ_PRECHANGE		(0)
@@ -424,6 +425,9 @@
 #define CPUFREQ_CREATE_POLICY		(3)
 #define CPUFREQ_REMOVE_POLICY		(4)
 
+/* Govinfo Notifiers */
+#define CPUFREQ_LOAD_CHANGE		(0)
+
 #ifdef CONFIG_CPU_FREQ
 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
@@ -432,6 +436,16 @@
 		struct cpufreq_freqs *freqs);
 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
 		struct cpufreq_freqs *freqs, int transition_failed);
+/*
+ * Governor specific info that can be passed to modules that subscribe
+ * to CPUFREQ_GOVINFO_NOTIFIER
+ */
+struct cpufreq_govinfo {
+	unsigned int cpu;
+	unsigned int load;
+	unsigned int sampling_rate_us;
+};
+extern struct atomic_notifier_head cpufreq_govinfo_notifier_list;
 
 #else /* CONFIG_CPU_FREQ */
 static inline int cpufreq_register_notifier(struct notifier_block *nb,
@@ -584,6 +598,9 @@
 #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
 extern struct cpufreq_governor cpufreq_gov_conservative;
 #define CPUFREQ_DEFAULT_GOVERNOR	(&cpufreq_gov_conservative)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE)
+extern struct cpufreq_governor cpufreq_gov_interactive;
+#define CPUFREQ_DEFAULT_GOVERNOR	(&cpufreq_gov_interactive)
 #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED)
 extern struct cpufreq_governor cpufreq_gov_sched;
 #define CPUFREQ_DEFAULT_GOVERNOR	(&cpufreq_gov_sched)
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index b9337de..7f395e3 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -45,6 +45,7 @@
 	CPUHP_POWERPC_MMU_CTX_PREPARE,
 	CPUHP_XEN_PREPARE,
 	CPUHP_XEN_EVTCHN_PREPARE,
+	CPUHP_QCOM_CPUFREQ_PREPARE,
 	CPUHP_NOTIFY_PREPARE,
 	CPUHP_ARM_SHMOBILE_SCU_PREPARE,
 	CPUHP_SH_SH3X_PREPARE,
@@ -86,6 +87,7 @@
 	CPUHP_AP_METAG_TIMER_STARTING,
 	CPUHP_AP_QCOM_TIMER_STARTING,
 	CPUHP_AP_QCOM_SLEEP_STARTING,
+	CPUHP_AP_QCOM_CPUFREQ_STARTING,
 	CPUHP_AP_ARMADA_TIMER_STARTING,
 	CPUHP_AP_MARCO_TIMER_STARTING,
 	CPUHP_AP_MIPS_GIC_TIMER_STARTING,
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 61d042b..6844929 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -163,6 +163,7 @@
 	__u64			 dreq_isr;
 	__u64			 dreq_gsr;
 	__be32			 dreq_service;
+	spinlock_t		 dreq_lock;
 	struct list_head	 dreq_featneg;
 	__u32			 dreq_timestamp_echo;
 	__u32			 dreq_timestamp_time;
diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h
index 7adf6cc..374eb79 100644
--- a/include/linux/devfreq_cooling.h
+++ b/include/linux/devfreq_cooling.h
@@ -20,8 +20,6 @@
 #include <linux/devfreq.h>
 #include <linux/thermal.h>
 
-#ifdef CONFIG_DEVFREQ_THERMAL
-
 /**
  * struct devfreq_cooling_power - Devfreq cooling power ops
  * @get_static_power:	Take voltage, in mV, and return the static power
@@ -43,6 +41,8 @@
 	unsigned long dyn_power_coeff;
 };
 
+#ifdef CONFIG_DEVFREQ_THERMAL
+
 struct thermal_cooling_device *
 of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
 				  struct devfreq_cooling_power *dfc_power);
diff --git a/include/linux/esoc_client.h b/include/linux/esoc_client.h
index 77a8b50..7a4913d 100644
--- a/include/linux/esoc_client.h
+++ b/include/linux/esoc_client.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -24,6 +24,7 @@
 struct esoc_desc {
 	const char *name;
 	const char *link;
+	const char *link_info;
 	void *priv;
 };
 
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
index ff8b11b..f6dfc29 100644
--- a/include/linux/fscrypto.h
+++ b/include/linux/fscrypto.h
@@ -79,7 +79,6 @@
 	u8 ci_filename_mode;
 	u8 ci_flags;
 	struct crypto_skcipher *ci_ctfm;
-	struct key *ci_keyring_key;
 	u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
 };
 
@@ -256,7 +255,6 @@
 extern int fscrypt_inherit_context(struct inode *, struct inode *,
 					void *, bool);
 /* keyinfo.c */
-extern int get_crypt_info(struct inode *);
 extern int fscrypt_get_encryption_info(struct inode *);
 extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
 
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 192eef2f..d596a07 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1548,31 +1548,23 @@
 get_next_pkt_raw(struct vmbus_channel *channel)
 {
 	struct hv_ring_buffer_info *ring_info = &channel->inbound;
-	u32 read_loc = ring_info->priv_read_index;
+	u32 priv_read_loc = ring_info->priv_read_index;
 	void *ring_buffer = hv_get_ring_buffer(ring_info);
-	struct vmpacket_descriptor *cur_desc;
-	u32 packetlen;
 	u32 dsize = ring_info->ring_datasize;
-	u32 delta = read_loc - ring_info->ring_buffer->read_index;
+	/*
+	 * delta is the difference between what is available to read and
+	 * what was already consumed in place. We commit read index after
+	 * the whole batch is processed.
+	 */
+	u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ?
+		priv_read_loc - ring_info->ring_buffer->read_index :
+		(dsize - ring_info->ring_buffer->read_index) + priv_read_loc;
 	u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
 
 	if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
 		return NULL;
 
-	if ((read_loc + sizeof(*cur_desc)) > dsize)
-		return NULL;
-
-	cur_desc = ring_buffer + read_loc;
-	packetlen = cur_desc->len8 << 3;
-
-	/*
-	 * If the packet under consideration is wrapping around,
-	 * return failure.
-	 */
-	if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1))
-		return NULL;
-
-	return cur_desc;
+	return ring_buffer + priv_read_loc;
 }
 
 /*
@@ -1584,16 +1576,14 @@
 				struct vmpacket_descriptor *desc)
 {
 	struct hv_ring_buffer_info *ring_info = &channel->inbound;
-	u32 read_loc = ring_info->priv_read_index;
 	u32 packetlen = desc->len8 << 3;
 	u32 dsize = ring_info->ring_datasize;
 
-	if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize)
-		BUG();
 	/*
 	 * Include the packet trailer.
 	 */
 	ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
+	ring_info->priv_read_index %= dsize;
 }
 
 /*
diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h
index 23ca415..fa79319 100644
--- a/include/linux/iio/sw_device.h
+++ b/include/linux/iio/sw_device.h
@@ -62,7 +62,7 @@
 				  const char *name,
 				  struct config_item_type *type)
 {
-#ifdef CONFIG_CONFIGFS_FS
+#if IS_ENABLED(CONFIG_CONFIGFS_FS)
 	config_group_init_type_name(&d->group, name, type);
 #endif
 }
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index a83ac84..0668534 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -98,7 +98,7 @@
 };
 
 /**
- * enum hdr_total_len_or_pad_type - type vof value held by TOTAL_LEN_OR_PAD
+ * enum hdr_total_len_or_pad_type - type of value held by TOTAL_LEN_OR_PAD
  * field in header configuration register.
  * @IPA_HDR_PAD: field is used as padding length
  * @IPA_HDR_TOTAL_LEN: field is used as total length
@@ -433,6 +433,55 @@
 		       unsigned long data);
 
 /**
+ * enum ipa_wdi_meter_evt_type - type of event client callback is
+ * for AP+STA mode metering
+ * @IPA_GET_WDI_SAP_STATS: get IPA_stats betwen SAP and STA -
+ *			use ipa_get_wdi_sap_stats structure
+ * @IPA_SET_WIFI_QUOTA: set quota limit on STA -
+ *			use ipa_set_wifi_quota structure
+ */
+enum ipa_wdi_meter_evt_type {
+	IPA_GET_WDI_SAP_STATS,
+	IPA_SET_WIFI_QUOTA,
+};
+
+struct ipa_get_wdi_sap_stats {
+	/* indicate to reset stats after query */
+	uint8_t reset_stats;
+	/* indicate valid stats from wlan-fw */
+	uint8_t stats_valid;
+	/* Tx: SAP->STA */
+	uint64_t ipv4_tx_packets;
+	uint64_t ipv4_tx_bytes;
+	/* Rx: STA->SAP */
+	uint64_t ipv4_rx_packets;
+	uint64_t ipv4_rx_bytes;
+	uint64_t ipv6_tx_packets;
+	uint64_t ipv6_tx_bytes;
+	uint64_t ipv6_rx_packets;
+	uint64_t ipv6_rx_bytes;
+};
+
+/**
+ * struct ipa_set_wifi_quota - structure used for
+ *                                   IPA_SET_WIFI_QUOTA.
+ *
+ * @quota_bytes:    Quota (in bytes) for the STA interface.
+ * @set_quota:       Indicate whether to set the quota (use 1) or
+ *                   unset the quota.
+ *
+ */
+struct ipa_set_wifi_quota {
+	uint64_t quota_bytes;
+	uint8_t  set_quota;
+	/* indicate valid quota set from wlan-fw */
+	uint8_t set_valid;
+};
+
+typedef void (*ipa_wdi_meter_notifier_cb)(enum ipa_wdi_meter_evt_type evt,
+		       void *data);
+
+/**
  * struct ipa_connect_params - low-level client connect input parameters. Either
  * client allocates the data and desc FIFO and specifies that in data+desc OR
  * specifies sizes and pipe_mem pref and IPA does the allocation.
@@ -1003,6 +1052,7 @@
  * @ul_smmu: WDI_RX configuration info when WLAN uses SMMU
  * @dl_smmu: WDI_TX configuration info when WLAN uses SMMU
  * @smmu_enabled: true if WLAN uses SMMU
+ * @ipa_wdi_meter_notifier_cb: Get WDI stats and quato info
  */
 struct ipa_wdi_in_params {
 	struct ipa_sys_connect_params sys;
@@ -1013,6 +1063,15 @@
 		struct ipa_wdi_dl_params_smmu dl_smmu;
 	} u;
 	bool smmu_enabled;
+#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
+	ipa_wdi_meter_notifier_cb wdi_notify;
+#endif
+};
+
+enum ipa_upstream_type {
+	IPA_UPSTEAM_MODEM = 1,
+	IPA_UPSTEAM_WLAN,
+	IPA_UPSTEAM_MAX
 };
 
 /**
@@ -1267,6 +1326,9 @@
 int ipa_suspend_wdi_pipe(u32 clnt_hdl);
 int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
 u16 ipa_get_smem_restr_bytes(void);
+int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid,
+		uint64_t num_bytes);
+
 /*
  * To retrieve doorbell physical address of
  * wlan pipes
@@ -1847,6 +1909,12 @@
 	return -EPERM;
 }
 
+static inline int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid,
+		uint64_t num_bytes)
+{
+	return -EPERM;
+}
+
 static inline int ipa_uc_wdi_get_dbpa(
 	struct ipa_wdi_db_params *out)
 {
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 7aebe23..3b94400 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -37,6 +37,7 @@
 	__s32		accept_ra_rtr_pref;
 	__s32		rtr_probe_interval;
 #ifdef CONFIG_IPV6_ROUTE_INFO
+	__s32		accept_ra_rt_info_min_plen;
 	__s32		accept_ra_rt_info_max_plen;
 #endif
 #endif
diff --git a/include/linux/log2.h b/include/linux/log2.h
index fd7ff3d..f38fae2 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -16,12 +16,6 @@
 #include <linux/bitops.h>
 
 /*
- * deal with unrepresentable constant logarithms
- */
-extern __attribute__((const, noreturn))
-int ____ilog2_NaN(void);
-
-/*
  * non-constant log of base 2 calculators
  * - the arch may override these in asm/bitops.h if they can be implemented
  *   more efficiently than using fls() and fls64()
@@ -85,7 +79,7 @@
 #define ilog2(n)				\
 (						\
 	__builtin_constant_p(n) ? (		\
-		(n) < 1 ? ____ilog2_NaN() :	\
+		(n) < 2 ? 0 :			\
 		(n) & (1ULL << 63) ? 63 :	\
 		(n) & (1ULL << 62) ? 62 :	\
 		(n) & (1ULL << 61) ? 61 :	\
@@ -148,10 +142,7 @@
 		(n) & (1ULL <<  4) ?  4 :	\
 		(n) & (1ULL <<  3) ?  3 :	\
 		(n) & (1ULL <<  2) ?  2 :	\
-		(n) & (1ULL <<  1) ?  1 :	\
-		(n) & (1ULL <<  0) ?  0 :	\
-		____ilog2_NaN()			\
-				   ) :		\
+		1 ) :				\
 	(sizeof(n) <= 4) ?			\
 	__ilog2_u32(n) :			\
 	__ilog2_u64(n)				\
diff --git a/include/linux/mailbox/qmp.h b/include/linux/mailbox/qmp.h
new file mode 100644
index 0000000..df3565b
--- /dev/null
+++ b/include/linux/mailbox/qmp.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QMP_H_
+#define _QMP_H_
+
+#include <linux/types.h>
+
+/**
+ * struct qmp_pkt - Packet structure to be used for TX and RX with QMP
+ * @size	size of data
+ * @data	Buffer holding data of this packet
+ */
+struct qmp_pkt {
+	u32 size;
+	void *data;
+};
+
+#endif /* _QMP_H_ */
diff --git a/include/linux/mfd/wcd9xxx/core.h b/include/linux/mfd/wcd9xxx/core.h
index 8a507d2..c6c8d24 100644
--- a/include/linux/mfd/wcd9xxx/core.h
+++ b/include/linux/mfd/wcd9xxx/core.h
@@ -334,6 +334,7 @@
 	struct slim_device *slim_slave;
 	struct mutex io_lock;
 	struct mutex xfer_lock;
+	struct mutex reset_lock;
 	u8 version;
 
 	int reset_gpio;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2d191bf..f7b0dab 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2109,7 +2109,7 @@
 void task_dirty_inc(struct task_struct *tsk);
 
 /* readahead.c */
-#define VM_MAX_READAHEAD	128	/* kbytes */
+#define VM_MAX_READAHEAD	512	/* kbytes */
 #define VM_MIN_READAHEAD	16	/* kbytes (includes current page) */
 
 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 73fad83..d265f60 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -119,6 +119,9 @@
 	u8			raw_pwr_cl_ddr_200_360;	/* 253 */
 	u8			raw_bkops_status;	/* 246 */
 	u8			raw_sectors[4];		/* 212 - 4 bytes */
+	u8			pre_eol_info;		/* 267 */
+	u8			device_life_time_est_typ_a;	/* 268 */
+	u8			device_life_time_est_typ_b;	/* 269 */
 
 	unsigned int            feature_support;
 #define MMC_DISCARD_FEATURE	BIT(0)                  /* CMD38 feature */
@@ -270,6 +273,7 @@
 						/* for byte mode */
 #define MMC_QUIRK_NONSTD_SDIO	(1<<2)		/* non-standard SDIO card attached */
 						/* (missing CIA registers) */
+#define MMC_QUIRK_BROKEN_CLK_GATING (1<<3)	/* clock gating the sdio bus will make card fail */
 #define MMC_QUIRK_NONSTD_FUNC_IF (1<<4)		/* SDIO card has nonstd function interfaces */
 #define MMC_QUIRK_DISABLE_CD	(1<<5)		/* disconnect CD/DAT[3] resistor */
 #define MMC_QUIRK_INAND_CMD38	(1<<6)		/* iNAND devices have broken CMD38 */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index fac3b5c..6dd1547 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -85,6 +85,12 @@
 
 struct mmc_host_ops {
 	/*
+	 * 'enable' is called when the host is claimed and 'disable' is called
+	 * when the host is released. 'enable' and 'disable' are deprecated.
+	 */
+	int (*enable)(struct mmc_host *host);
+	int (*disable)(struct mmc_host *host);
+	/*
 	 * It is optional for the host to implement pre_req and post_req in
 	 * order to support double buffering of requests (prepare one
 	 * request while another request is active).
@@ -313,9 +319,22 @@
 #define MMC_CAP2_HS400_ES	(1 << 20)	/* Host supports enhanced strobe */
 #define MMC_CAP2_NO_SD		(1 << 21)	/* Do not send SD commands during initialization */
 #define MMC_CAP2_NO_MMC		(1 << 22)	/* Do not send (e)MMC commands during initialization */
+#define MMC_CAP2_PACKED_WR_CONTROL (1 << 23)	/* Allow write packing control */
 
 	mmc_pm_flag_t		pm_caps;	/* supported pm features */
 
+#ifdef CONFIG_MMC_CLKGATE
+	int			clk_requests;	/* internal reference counter */
+	unsigned int		clk_delay;	/* number of MCI clk hold cycles */
+	bool			clk_gated;	/* clock gated */
+	struct delayed_work	clk_gate_work; /* delayed clock gate */
+	unsigned int		clk_old;	/* old clock value cache */
+	spinlock_t		clk_lock;	/* lock for clk fields */
+	struct mutex		clk_gate_mutex;	/* mutex for clock gating */
+	struct device_attribute clkgate_delay_attr;
+	unsigned long           clkgate_delay;
+#endif
+
 	/* host specific block data */
 	unsigned int		max_seg_size;	/* see blk_queue_max_segment_size */
 	unsigned short		max_segs;	/* see blk_queue_max_segments */
@@ -523,6 +542,26 @@
 	return host->caps2 & MMC_CAP2_PACKED_WR;
 }
 
+#ifdef CONFIG_MMC_CLKGATE
+void mmc_host_clk_hold(struct mmc_host *host);
+void mmc_host_clk_release(struct mmc_host *host);
+unsigned int mmc_host_clk_rate(struct mmc_host *host);
+
+#else
+static inline void mmc_host_clk_hold(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_release(struct mmc_host *host)
+{
+}
+
+static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+	return host->ios.clock;
+}
+#endif
+
 static inline int mmc_card_hs(struct mmc_card *card)
 {
 	return card->host->ios.timing == MMC_TIMING_SD_HS ||
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 0ac4125..68f60b8 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -273,6 +273,9 @@
 #define EXT_CSD_CACHE_SIZE		249	/* RO, 4 bytes */
 #define EXT_CSD_PWR_CL_DDR_200_360	253	/* RO */
 #define EXT_CSD_FIRMWARE_VERSION	254	/* RO, 8 bytes */
+#define EXT_CSD_PRE_EOL_INFO		267	/* RO */
+#define EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A	268	/* RO */
+#define EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B	269	/* RO */
 #define EXT_CSD_SUPPORTED_MODE		493	/* RO */
 #define EXT_CSD_TAG_UNIT_SIZE		498	/* RO */
 #define EXT_CSD_DATA_TAG_SUPPORT	499	/* RO */
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
index 541b10e..f5d2f72 100644
--- a/include/linux/msm_gsi.h
+++ b/include/linux/msm_gsi.h
@@ -367,6 +367,7 @@
 enum gsi_xfer_elem_type {
 	GSI_XFER_ELEM_DATA,
 	GSI_XFER_ELEM_IMME_CMD,
+	GSI_XFER_ELEM_NOP,
 };
 
 /**
@@ -409,6 +410,7 @@
  *
  *		    GSI_XFER_ELEM_DATA: for all data transfers
  *		    GSI_XFER_ELEM_IMME_CMD: for IPA immediate commands
+ *		    GSI_XFER_ELEM_NOP: for event generation only
  *
  * @xfer_user_data: cookie used in xfer_cb
  *
diff --git a/include/linux/of_batterydata.h b/include/linux/of_batterydata.h
new file mode 100644
index 0000000..5505371
--- /dev/null
+++ b/include/linux/of_batterydata.h
@@ -0,0 +1,64 @@
+/* Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/batterydata-lib.h>
+
+#ifdef CONFIG_OF_BATTERYDATA
+/**
+ * of_batterydata_read_data() - Populate battery data from the device tree
+ * @container_node: pointer to the battery-data container device node
+ *		containing the profile nodes.
+ * @batt_data: pointer to an allocated bms_battery_data structure that the
+ *		loaded profile will be written to.
+ * @batt_id_uv: ADC voltage of the battery id line used to differentiate
+ *		between different battery profiles. If there are multiple
+ *		battery data in the device tree, the one with the closest
+ *		battery id resistance will be automatically loaded.
+ *
+ * This routine loads the closest match battery data from device tree based on
+ * the battery id reading. Then, it will try to load all the relevant data from
+ * the device tree battery data profile.
+ *
+ * If any of the lookup table pointers are NULL, this routine will skip trying
+ * to read them.
+ */
+int of_batterydata_read_data(struct device_node *container_node,
+				struct bms_battery_data *batt_data,
+				int batt_id_uv);
+/**
+ * of_batterydata_get_best_profile() - Find matching battery data device node
+ * @batterydata_container_node: pointer to the battery-data container device
+ *		node containing the profile nodes.
+ * @batt_id_kohm: Battery ID in KOhms for which we want to find the profile.
+ * @batt_type: Battery type which we want to force load the profile.
+ *
+ * This routine returns a device_node pointer to the closest match battery data
+ * from device tree based on the battery id reading.
+ */
+struct device_node *of_batterydata_get_best_profile(
+		struct device_node *batterydata_container_node,
+		int batt_id_kohm, const char *batt_type);
+#else
+static inline int of_batterydata_read_data(struct device_node *container_node,
+				struct bms_battery_data *batt_data,
+				int batt_id_uv)
+{
+	return -ENXIO;
+}
+static inline struct device_node *of_batterydata_get_best_profile(
+		struct device_node *batterydata_container_node,
+		int batt_id_kohm, const char *batt_type)
+{
+	return -ENXIO;
+}
+#endif /* CONFIG_OF_QPNP */
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 8462da2..2251428 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -84,6 +84,12 @@
 	ARMPMU_NR_ATTR_GROUPS
 };
 
+enum armpmu_pmu_states {
+	ARM_PMU_STATE_OFF,
+	ARM_PMU_STATE_RUNNING,
+	ARM_PMU_STATE_GOING_DOWN,
+};
+
 struct arm_pmu {
 	struct pmu	pmu;
 	cpumask_t	active_irqs;
@@ -108,6 +114,8 @@
 	void		(*free_irq)(struct arm_pmu *);
 	int		(*map_event)(struct perf_event *event);
 	int		num_events;
+	int		pmu_state;
+	int		percpu_irq;
 	atomic_t	active_events;
 	struct mutex	reserve_mutex;
 	u64		max_period;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 531b8b1..3c80583 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -270,6 +270,8 @@
 	atomic_t			exclusive_cnt; /* < 0: cpu; > 0: tsk */
 	int				task_ctx_nr;
 	int				hrtimer_interval_ms;
+	u32				events_across_hotplug:1,
+					reserved:31;
 
 	/* number of address filters this PMU can do */
 	unsigned int			nr_addr_filters;
diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h
index 7945fea..25e7a5f 100644
--- a/include/linux/phy/phy-qcom-ufs.h
+++ b/include/linux/phy/phy-qcom-ufs.h
@@ -58,5 +58,6 @@
 			u8 major, u16 minor, u16 step);
 const char *ufs_qcom_phy_name(struct phy *phy);
 int ufs_qcom_phy_configure_lpm(struct phy *generic_phy, bool enable);
+void ufs_qcom_phy_dbg_register_dump(struct phy *generic_phy);
 
 #endif /* PHY_QCOM_UFS_H_ */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 37fb247..b46d6a8 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -46,6 +46,7 @@
 	POWER_SUPPLY_CHARGE_TYPE_NONE,
 	POWER_SUPPLY_CHARGE_TYPE_TRICKLE,
 	POWER_SUPPLY_CHARGE_TYPE_FAST,
+	POWER_SUPPLY_CHARGE_TYPE_TAPER,
 };
 
 enum {
@@ -58,6 +59,9 @@
 	POWER_SUPPLY_HEALTH_COLD,
 	POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE,
 	POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE,
+	POWER_SUPPLY_HEALTH_WARM,
+	POWER_SUPPLY_HEALTH_COOL,
+	POWER_SUPPLY_HEALTH_HOT,
 };
 
 enum {
@@ -85,6 +89,29 @@
 	POWER_SUPPLY_SCOPE_DEVICE,
 };
 
+enum {
+	POWER_SUPPLY_DP_DM_UNKNOWN = 0,
+	POWER_SUPPLY_DP_DM_PREPARE = 1,
+	POWER_SUPPLY_DP_DM_UNPREPARE = 2,
+	POWER_SUPPLY_DP_DM_CONFIRMED_HVDCP3 = 3,
+	POWER_SUPPLY_DP_DM_DP_PULSE = 4,
+	POWER_SUPPLY_DP_DM_DM_PULSE = 5,
+	POWER_SUPPLY_DP_DM_DP0P6_DMF = 6,
+	POWER_SUPPLY_DP_DM_DP0P6_DM3P3 = 7,
+	POWER_SUPPLY_DP_DM_DPF_DMF = 8,
+	POWER_SUPPLY_DP_DM_DPR_DMR = 9,
+	POWER_SUPPLY_DP_DM_HVDCP3_SUPPORTED = 10,
+	POWER_SUPPLY_DP_DM_ICL_DOWN = 11,
+	POWER_SUPPLY_DP_DM_ICL_UP = 12,
+};
+
+enum {
+	POWER_SUPPLY_PL_NONE,
+	POWER_SUPPLY_PL_USBIN_USBIN,
+	POWER_SUPPLY_PL_USBIN_USBIN_EXT,
+	POWER_SUPPLY_PL_USBMID_USBMID,
+};
+
 enum power_supply_property {
 	/* Properties of type `int' */
 	POWER_SUPPLY_PROP_STATUS = 0,
@@ -114,6 +141,8 @@
 	POWER_SUPPLY_PROP_CHARGE_FULL,
 	POWER_SUPPLY_PROP_CHARGE_EMPTY,
 	POWER_SUPPLY_PROP_CHARGE_NOW,
+	POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+	POWER_SUPPLY_PROP_CHARGE_NOW_ERROR,
 	POWER_SUPPLY_PROP_CHARGE_AVG,
 	POWER_SUPPLY_PROP_CHARGE_COUNTER,
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
@@ -133,6 +162,7 @@
 	POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN, /* in percents! */
 	POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX, /* in percents! */
 	POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+	POWER_SUPPLY_PROP_CAPACITY_RAW,
 	POWER_SUPPLY_PROP_TEMP,
 	POWER_SUPPLY_PROP_TEMP_MAX,
 	POWER_SUPPLY_PROP_TEMP_MIN,
@@ -149,11 +179,51 @@
 	POWER_SUPPLY_PROP_SCOPE,
 	POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
 	POWER_SUPPLY_PROP_CALIBRATE,
-	POWER_SUPPLY_PROP_RESISTANCE,
 	/* Local extensions */
 	POWER_SUPPLY_PROP_USB_HC,
 	POWER_SUPPLY_PROP_USB_OTG,
-	POWER_SUPPLY_PROP_CHARGE_ENABLED,
+	POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_STEP_CHARGING_STEP,
+	POWER_SUPPLY_PROP_PIN_ENABLED,
+	POWER_SUPPLY_PROP_INPUT_SUSPEND,
+	POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_MAX,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_TRIM,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+	POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED,
+	POWER_SUPPLY_PROP_VCHG_LOOP_DBC_BYPASS,
+	POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW,
+	POWER_SUPPLY_PROP_HI_POWER,
+	POWER_SUPPLY_PROP_LOW_POWER,
+	POWER_SUPPLY_PROP_COOL_TEMP,
+	POWER_SUPPLY_PROP_WARM_TEMP,
+	POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
+	POWER_SUPPLY_PROP_RESISTANCE,
+	POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE,
+	POWER_SUPPLY_PROP_RESISTANCE_ID, /* in Ohms */
+	POWER_SUPPLY_PROP_RESISTANCE_NOW,
+	POWER_SUPPLY_PROP_FLASH_CURRENT_MAX,
+	POWER_SUPPLY_PROP_UPDATE_NOW,
+	POWER_SUPPLY_PROP_ESR_COUNT,
+	POWER_SUPPLY_PROP_BUCK_FREQ,
+	POWER_SUPPLY_PROP_BOOST_CURRENT,
+	POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE,
+	POWER_SUPPLY_PROP_CHARGE_DONE,
+	POWER_SUPPLY_PROP_FLASH_ACTIVE,
+	POWER_SUPPLY_PROP_FLASH_TRIGGER,
+	POWER_SUPPLY_PROP_FORCE_TLIM,
+	POWER_SUPPLY_PROP_DP_DM,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CURRENT_QNOVO,
+	POWER_SUPPLY_PROP_VOLTAGE_QNOVO,
+	POWER_SUPPLY_PROP_RERUN_AICL,
+	POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
+	POWER_SUPPLY_PROP_SAFETY_TIMER_EXPIRED,
+	POWER_SUPPLY_PROP_RESTRICTED_CHARGING,
+	POWER_SUPPLY_PROP_CURRENT_CAPABILITY,
 	POWER_SUPPLY_PROP_TYPEC_MODE,
 	POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION, /* 0: N/C, 1: CC1, 2: CC2 */
 	POWER_SUPPLY_PROP_TYPEC_POWER_ROLE,
@@ -162,16 +232,26 @@
 	POWER_SUPPLY_PROP_PD_IN_HARD_RESET,
 	POWER_SUPPLY_PROP_PD_CURRENT_MAX,
 	POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED,
+	POWER_SUPPLY_PROP_CHARGER_TEMP,
+	POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+	POWER_SUPPLY_PROP_PARALLEL_DISABLE,
 	POWER_SUPPLY_PROP_PE_START,
 	POWER_SUPPLY_PROP_SET_SHIP_MODE,
-	POWER_SUPPLY_PROP_BOOST_CURRENT,
-	POWER_SUPPLY_PROP_FORCE_TLIM,
+	POWER_SUPPLY_PROP_SOC_REPORTING_READY,
+	POWER_SUPPLY_PROP_DEBUG_BATTERY,
+	POWER_SUPPLY_PROP_FCC_DELTA,
+	POWER_SUPPLY_PROP_ICL_REDUCTION,
+	POWER_SUPPLY_PROP_PARALLEL_MODE,
+	POWER_SUPPLY_PROP_DIE_HEALTH,
+	POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
+	POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
 	/* Properties of type `const char *' */
 	POWER_SUPPLY_PROP_MODEL_NAME,
 	POWER_SUPPLY_PROP_MANUFACTURER,
 	POWER_SUPPLY_PROP_SERIAL_NUMBER,
+	POWER_SUPPLY_PROP_BATTERY_TYPE,
 };
 
 enum power_supply_type {
@@ -183,9 +263,17 @@
 	POWER_SUPPLY_TYPE_USB_DCP,	/* Dedicated Charging Port */
 	POWER_SUPPLY_TYPE_USB_CDP,	/* Charging Downstream Port */
 	POWER_SUPPLY_TYPE_USB_ACA,	/* Accessory Charger Adapters */
-	POWER_SUPPLY_TYPE_USB_TYPE_C,	/* Type C Port */
-	POWER_SUPPLY_TYPE_USB_PD,	/* Power Delivery Port */
-	POWER_SUPPLY_TYPE_USB_PD_DRP,	/* PD Dual Role Port */
+	POWER_SUPPLY_TYPE_USB_HVDCP,	/* High Voltage DCP */
+	POWER_SUPPLY_TYPE_USB_HVDCP_3,	/* Efficient High Voltage DCP */
+	POWER_SUPPLY_TYPE_USB_PD,       /* Power Delivery */
+	POWER_SUPPLY_TYPE_WIRELESS,	/* Accessory Charger Adapters */
+	POWER_SUPPLY_TYPE_BMS,		/* Battery Monitor System */
+	POWER_SUPPLY_TYPE_PARALLEL,	/* Parallel Path */
+	POWER_SUPPLY_TYPE_MAIN,		/* Main Path */
+	POWER_SUPPLY_TYPE_WIPOWER,	/* Wipower */
+	POWER_SUPPLY_TYPE_TYPEC,	/* Type-C */
+	POWER_SUPPLY_TYPE_UFP,		/* Type-C UFP */
+	POWER_SUPPLY_TYPE_DFP,		/* TYpe-C DFP */
 };
 
 /* Indicates USB Type-C CC connection status */
@@ -359,7 +447,7 @@
 #ifdef CONFIG_POWER_SUPPLY
 extern int power_supply_is_system_supplied(void);
 #else
-static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
+static inline int power_supply_is_system_supplied(void) { return -EIO; }
 #endif
 
 extern int power_supply_get_property(struct power_supply *psy,
@@ -411,6 +499,9 @@
 	case POWER_SUPPLY_PROP_CURRENT_NOW:
 	case POWER_SUPPLY_PROP_CURRENT_AVG:
 	case POWER_SUPPLY_PROP_CURRENT_BOOT:
+	case POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW:
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX:
+	case POWER_SUPPLY_PROP_FLASH_CURRENT_MAX:
 		return 1;
 	default:
 		break;
diff --git a/include/linux/qdsp6v2/apr_tal.h b/include/linux/qdsp6v2/apr_tal.h
index 32c977f..bac5e90 100644
--- a/include/linux/qdsp6v2/apr_tal.h
+++ b/include/linux/qdsp6v2/apr_tal.h
@@ -32,7 +32,6 @@
 #if defined(CONFIG_MSM_QDSP6_APRV2_GLINK) || \
 	defined(CONFIG_MSM_QDSP6_APRV3_GLINK)
 #define APR_MAX_BUF			512
-#define APR_NUM_OF_TX_BUF		30
 #else
 #define APR_MAX_BUF			8092
 #endif
diff --git a/include/linux/qdsp6v2/rtac.h b/include/linux/qdsp6v2/rtac.h
index 3e5433b..eeea0eb 100644
--- a/include/linux/qdsp6v2/rtac.h
+++ b/include/linux/qdsp6v2/rtac.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2011, 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011, 2013-2015, 2017, The Linux Foundation. All rights
+ * reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -95,4 +96,5 @@
 bool rtac_make_afe_callback(uint32_t *payload, u32 payload_size);
 void rtac_set_afe_handle(void *handle);
 void get_rtac_adm_data(struct rtac_adm *adm_data);
+void rtac_update_afe_topology(u32 port_id);
 #endif
diff --git a/include/linux/qpnp/qpnp-misc.h b/include/linux/qpnp/qpnp-misc.h
new file mode 100644
index 0000000..7d95bf2
--- /dev/null
+++ b/include/linux/qpnp/qpnp-misc.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2013-2014, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QPNP_MISC_H
+#define __QPNP_MISC_H
+
+#include <linux/errno.h>
+
+#ifdef CONFIG_QPNP_MISC
+/**
+ * qpnp_misc_irqs_available - check if IRQs are available
+ *
+ * @consumer_dev: device struct
+ *
+ * This function returns true if the MISC interrupts are available
+ * based on a check in the MISC peripheral revision registers.
+ *
+ * Any consumer of this function needs to reference a MISC device phandle
+ * using the "qcom,misc-ref" property in their device tree node.
+ */
+
+int qpnp_misc_irqs_available(struct device *consumer_dev);
+
+/**
+ * qpnp_misc_read_reg - read register from misc device
+ *
+ * @node: device node pointer
+ * @address: address offset in misc peripheral to be read
+ * @val: data read from register
+ *
+ * This function returns zero if reading the MISC register succeeds.
+ *
+ */
+
+int qpnp_misc_read_reg(struct device_node *node, u16 addr, u8 *val);
+#else
+static inline int qpnp_misc_irqs_available(struct device *consumer_dev)
+{
+	return 0;
+}
+static inline int qpnp_misc_read_reg(struct device_node *node, u16 addr,
+					u8 *val)
+{
+	return 0;
+}
+#endif
+#endif
diff --git a/include/linux/qpnp/qpnp-pbs.h b/include/linux/qpnp/qpnp-pbs.h
new file mode 100644
index 0000000..39497ac
--- /dev/null
+++ b/include/linux/qpnp/qpnp-pbs.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QPNP_PBS_H
+#define _QPNP_PBS_H
+
+#ifdef CONFIG_QPNP_PBS
+int qpnp_pbs_trigger_event(struct device_node *dev_node, u8 bitmap);
+#else
+static inline int qpnp_pbs_trigger_event(struct device_node *dev_node,
+						 u8 bitmap) {
+	return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/include/linux/qpnp/qpnp-revid.h b/include/linux/qpnp/qpnp-revid.h
index 4023e3a..a0e2283 100644
--- a/include/linux/qpnp/qpnp-revid.h
+++ b/include/linux/qpnp/qpnp-revid.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -181,6 +181,7 @@
 #define PM660L_SUBTYPE	0x1A
 #define PM660_SUBTYPE	0x1B
 
+/* PMI8998 REV_ID */
 #define PMI8998_V1P0_REV1	0x00
 #define PMI8998_V1P0_REV2	0x00
 #define PMI8998_V1P0_REV3	0x00
@@ -196,6 +197,26 @@
 #define PMI8998_V2P0_REV3	0x00
 #define PMI8998_V2P0_REV4	0x02
 
+/* PM660 REV_ID */
+#define PM660_V1P0_REV1		0x00
+#define PM660_V1P0_REV2		0x00
+#define PM660_V1P0_REV3		0x00
+#define PM660_V1P0_REV4		0x01
+
+#define PM660_V1P1_REV1		0x00
+#define PM660_V1P1_REV2		0x00
+#define PM660_V1P1_REV3		0x01
+#define PM660_V1P1_REV4		0x01
+
+/* PMI8998 FAB_ID */
+#define PMI8998_FAB_ID_SMIC	0x11
+#define PMI8998_FAB_ID_GF	0x30
+
+/* PM660 FAB_ID */
+#define PM660_FAB_ID_GF		0x0
+#define PM660_FAB_ID_TSMC	0x2
+#define PM660_FAB_ID_MX		0x3
+
 /* PM8005 */
 #define PM8005_SUBTYPE		0x18
 
diff --git a/include/linux/regulator/qpnp-labibb-regulator.h b/include/linux/regulator/qpnp-labibb-regulator.h
new file mode 100644
index 0000000..2470695
--- /dev/null
+++ b/include/linux/regulator/qpnp-labibb-regulator.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QPNP_LABIBB_REGULATOR_H
+#define _QPNP_LABIBB_REGULATOR_H
+
+enum labibb_notify_event {
+	LAB_VREG_OK = 1,
+};
+
+int qpnp_labibb_notifier_register(struct notifier_block *nb);
+int qpnp_labibb_notifier_unregister(struct notifier_block *nb);
+
+#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 45b5f91..867de7d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -3885,6 +3885,7 @@
 #define SCHED_CPUFREQ_RT	(1U << 0)
 #define SCHED_CPUFREQ_DL	(1U << 1)
 #define SCHED_CPUFREQ_IOWAIT	(1U << 2)
+#define SCHED_CPUFREQ_INTERCLUSTER_MIG (1U << 3)
 
 #define SCHED_CPUFREQ_RT_DL	(SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
 
diff --git a/include/linux/sde_rsc.h b/include/linux/sde_rsc.h
new file mode 100644
index 0000000..60cc768
--- /dev/null
+++ b/include/linux/sde_rsc.h
@@ -0,0 +1,245 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_RSC_H_
+#define _SDE_RSC_H_
+
+#include <linux/kernel.h>
+
+/* primary display rsc index */
+#define SDE_RSC_INDEX		0
+
+#define MAX_RSC_CLIENT_NAME_LEN 128
+
+/**
+ * event will be triggered before sde core power collapse,
+ * mdss gdsc is still on
+ */
+#define SDE_RSC_EVENT_PRE_CORE_PC 0x1
+/**
+ * event will be triggered after sde core collapse complete,
+ * mdss gdsc is off now
+ */
+#define SDE_RSC_EVENT_POST_CORE_PC 0x2
+/**
+ * event will be triggered before restoring the sde core from power collapse,
+ * mdss gdsc is still off
+ */
+#define SDE_RSC_EVENT_PRE_CORE_RESTORE 0x4
+/**
+ * event will be triggered after restoring the sde core from power collapse,
+ * mdss gdsc is on now
+ */
+#define SDE_RSC_EVENT_POST_CORE_RESTORE 0x8
+/**
+ * event attached with solver state enabled
+ * all clients in clk_state or cmd_state
+ */
+#define SDE_RSC_EVENT_SOLVER_ENABLED 0x10
+/**
+ * event attached with solver state disabled
+ * one of the client requested for vid state
+ */
+#define SDE_RSC_EVENT_SOLVER_DISABLED 0x20
+
+/**
+ * sde_rsc_state: sde rsc state information
+ * SDE_RSC_IDLE_STATE: A client requests for idle state when there is no
+ *                    pixel or cmd transfer expected. An idle vote from
+ *                    all clients lead to power collapse state.
+ * SDE_RSC_CLK_STATE:  A client requests for clk state when it wants to
+ *                    only avoid mode-2 entry/exit. For ex: V4L2 driver,
+ *                    sde power handle, etc.
+ * SDE_RSC_CMD_STATE:  A client requests for cmd state when it wants to
+ *                    enable the solver mode.
+ * SDE_RSC_VID_STATE:  A client requests for vid state it wants to avoid
+ *                    solver enable because client is fetching data from
+ *                    continuously.
+ */
+enum sde_rsc_state {
+	SDE_RSC_IDLE_STATE,
+	SDE_RSC_CLK_STATE,
+	SDE_RSC_CMD_STATE,
+	SDE_RSC_VID_STATE,
+};
+
+/**
+ * struct sde_rsc_client: stores the rsc client for sde driver
+ * @name:	name of the client
+ * @current_state:   current client state
+ * @crtc_id:		crtc_id associated with this rsc client.
+ * @rsc_index:	rsc index of a client - only index "0" valid.
+ * @list:	list to attach client master list
+ */
+struct sde_rsc_client {
+	char name[MAX_RSC_CLIENT_NAME_LEN];
+	short current_state;
+	int crtc_id;
+	u32 rsc_index;
+	struct list_head list;
+};
+
+/**
+ * struct sde_rsc_event: local event registration entry structure
+ * @cb_func:	Pointer to desired callback function
+ * @usr:	User pointer to pass to callback on event trigger
+ * @rsc_index:	rsc index of a client - only index "0" valid.
+ * @event_type:	refer comments in event_register
+ * @list:	list to attach event master list
+ */
+struct sde_rsc_event {
+	void (*cb_func)(uint32_t event_type, void *usr);
+	void *usr;
+	u32 rsc_index;
+	uint32_t event_type;
+	struct list_head list;
+};
+
+/**
+ * struct sde_rsc_cmd_config: provides panel configuration to rsc
+ * when client is command mode. It is not required to set it during
+ * video mode.
+ *
+ * @fps:	panel te interval
+ * @vtotal:	current vertical total (height + vbp + vfp)
+ * @jitter:	panel can set the jitter to wake up rsc/solver early
+ *              This value causes mdp core to exit certain mode
+ *              early. Default is 10% jitter
+ * @prefill_lines:	max prefill lines based on panel
+ */
+struct sde_rsc_cmd_config {
+	u32 fps;
+	u32 vtotal;
+	u32 jitter;
+	u32 prefill_lines;
+};
+
+#ifdef CONFIG_DRM_SDE_RSC
+/**
+ * sde_rsc_client_create() - create the client for sde rsc.
+ * Different displays like DSI, HDMI, DP, WB, etc should call this
+ * api to register their vote for rpmh. They still need to vote for
+ * power handle to get the clocks.
+
+ * @rsc_index:   A client will be created on this RSC. As of now only
+ *               SDE_RSC_INDEX is valid rsc index.
+ * @name:	 Caller needs to provide some valid string to identify
+ *               the client. "primary", "dp", "hdmi" are suggested name.
+ * @is_primary:	 Caller needs to provide information if client is primary
+ *               or not. Primary client votes will be redirected to
+ *               display rsc.
+ * @config:	 fps, vtotal, porches, etc configuration for command mode
+ *               panel
+ *
+ * Return: client node pointer.
+ */
+struct sde_rsc_client *sde_rsc_client_create(u32 rsc_index, char *name,
+		bool is_primary_display);
+
+/**
+ * sde_rsc_client_destroy() - Destroy the sde rsc client.
+ *
+ * @client:	 Client pointer provided by sde_rsc_client_create().
+ *
+ * Return: none
+ */
+void sde_rsc_client_destroy(struct sde_rsc_client *client);
+
+/**
+ * sde_rsc_client_state_update() - rsc client state update
+ * Video mode, cmd mode and clk state are supported as modes. A client need to
+ * set this property during panel time. A switching client can set the
+ * property to change the state
+ *
+ * @client:	 Client pointer provided by sde_rsc_client_create().
+ * @state:	 Client state - video/cmd
+ * @config:	 fps, vtotal, porches, etc configuration for command mode
+ *               panel
+ * @crtc_id:	 current client's crtc id
+ *
+ * Return: error code.
+ */
+int sde_rsc_client_state_update(struct sde_rsc_client *client,
+	enum sde_rsc_state state,
+	struct sde_rsc_cmd_config *config, int crtc_id);
+
+/**
+ * sde_rsc_client_vote() - ab/ib vote from rsc client
+ *
+ * @client:	 Client pointer provided by sde_rsc_client_create().
+ * @ab:		 aggregated bandwidth vote from client.
+ * @ib:		 instant bandwidth vote from client.
+ *
+ * Return: error code.
+ */
+int sde_rsc_client_vote(struct sde_rsc_client *caller_client,
+	u64 ab_vote, u64 ib_vote);
+
+/**
+ * sde_rsc_register_event - register a callback function for an event
+ * @rsc_index:   A client will be created on this RSC. As of now only
+ *               SDE_RSC_INDEX is valid rsc index.
+ * @event_type:  event type to register; client sets 0x3 if it wants
+ *               to register for CORE_PC and CORE_RESTORE - both events.
+ * @cb_func:     Pointer to desired callback function
+ * @usr:         User pointer to pass to callback on event trigger
+ * Returns: sde_rsc_event pointer on success
+ */
+struct sde_rsc_event *sde_rsc_register_event(int rsc_index, uint32_t event_type,
+		void (*cb_func)(uint32_t event_type, void *usr), void *usr);
+
+/**
+ * sde_rsc_unregister_event - unregister callback for an event
+ * @sde_rsc_event: event returned by sde_rsc_register_event
+ */
+void sde_rsc_unregister_event(struct sde_rsc_event *event);
+
+#else
+
+static inline struct sde_rsc_client *sde_rsc_client_create(u32 rsc_index,
+		char *name, bool is_primary_display)
+{
+	return NULL;
+}
+
+static inline void sde_rsc_client_destroy(struct sde_rsc_client *client)
+{
+}
+
+static inline int sde_rsc_client_state_update(struct sde_rsc_client *client,
+	enum sde_rsc_state state,
+	struct sde_rsc_cmd_config *config, int crtc_id)
+{
+	return 0;
+}
+
+static inline int sde_rsc_client_vote(struct sde_rsc_client *caller_client,
+	u64 ab_vote, u64 ib_vote)
+{
+	return 0;
+}
+
+static inline struct sde_rsc_event *sde_rsc_register_event(int rsc_index,
+		uint32_t event_type,
+		void (*cb_func)(uint32_t event_type, void *usr), void *usr)
+{
+	return NULL;
+}
+
+static inline void sde_rsc_unregister_event(struct sde_rsc_event *event)
+{
+}
+
+#endif /* CONFIG_DRM_SDE_RSC */
+
+#endif /* _SDE_RSC_H_ */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 8d0210e..8491bdc 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -142,6 +142,8 @@
 	int (*get_max_state) (struct thermal_cooling_device *, unsigned long *);
 	int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *);
 	int (*set_cur_state) (struct thermal_cooling_device *, unsigned long);
+	int (*set_min_state)(struct thermal_cooling_device *, unsigned long);
+	int (*get_min_state)(struct thermal_cooling_device *, unsigned long *);
 	int (*get_requested_power)(struct thermal_cooling_device *,
 				   struct thermal_zone_device *, u32 *);
 	int (*state2power)(struct thermal_cooling_device *,
@@ -161,6 +163,8 @@
 	struct mutex lock; /* protect thermal_instances list */
 	struct list_head thermal_instances;
 	struct list_head node;
+	unsigned long sysfs_cur_state_req;
+	unsigned long sysfs_min_state_req;
 };
 
 struct thermal_attr {
@@ -260,6 +264,7 @@
 	void (*unbind_from_tz)(struct thermal_zone_device *tz);
 	int (*throttle)(struct thermal_zone_device *tz, int trip);
 	struct list_head	governor_list;
+	int min_state_throttle;
 };
 
 /* Structure that holds binding parameters for a zone */
@@ -348,6 +353,12 @@
 	 * 		Used by thermal zone drivers (default 0).
 	 */
 	int offset;
+
+	/*
+	 * @tracks_low:	Indicates that the thermal zone params are for
+	 *		temperatures falling below the thresholds.
+	 */
+	bool tracks_low;
 };
 
 struct thermal_genl_event {
diff --git a/include/linux/usb.h b/include/linux/usb.h
index eba1f10..1f39661 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -354,6 +354,7 @@
  */
 struct usb_bus {
 	struct device *controller;	/* host/master side hardware */
+	struct device *sysdev;		/* as seen from firmware or bus */
 	int busnum;			/* Bus number (in order of reg) */
 	const char *bus_name;		/* stable id (PCI slot_name etc) */
 	u8 uses_dma;			/* Does the host controller use DMA? */
@@ -396,6 +397,15 @@
 	struct mon_bus *mon_bus;	/* non-null when associated */
 	int monitored;			/* non-zero when monitored */
 #endif
+	unsigned skip_resume:1;		/* All USB devices are brought into full
+					 * power state after system resume. It
+					 * is desirable for some buses to keep
+					 * their devices in suspend state even
+					 * after system resume. The devices
+					 * are resumed later when a remote
+					 * wakeup is detected or an interface
+					 * driver starts I/O.
+					 */
 };
 
 struct usb_dev_state;
@@ -734,6 +744,16 @@
 
 /* for drivers using iso endpoints */
 extern int usb_get_current_frame_number(struct usb_device *usb_dev);
+extern int usb_sec_event_ring_setup(struct usb_device *dev,
+	unsigned int intr_num);
+extern int usb_sec_event_ring_cleanup(struct usb_device *dev,
+	unsigned int intr_num);
+
+extern dma_addr_t usb_get_sec_event_ring_dma_addr(struct usb_device *dev,
+	unsigned int intr_num);
+extern dma_addr_t usb_get_dcba_dma_addr(struct usb_device *dev);
+extern dma_addr_t usb_get_xfer_ring_dma_addr(struct usb_device *dev,
+	struct usb_host_endpoint *ep);
 
 /* Sets up a group of bulk endpoints to support multiple stream IDs. */
 extern int usb_alloc_streams(struct usb_interface *interface,
@@ -1885,8 +1905,11 @@
 #define USB_DEVICE_REMOVE	0x0002
 #define USB_BUS_ADD		0x0003
 #define USB_BUS_REMOVE		0x0004
+#define USB_BUS_DIED		0x0005
 extern void usb_register_notify(struct notifier_block *nb);
 extern void usb_unregister_notify(struct notifier_block *nb);
+extern void usb_register_atomic_notify(struct notifier_block *nb);
+extern void usb_unregister_atomic_notify(struct notifier_block *nb);
 
 /* debugfs stuff */
 extern struct dentry *usb_debug_root;
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 66fc137..5c0b3fa 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -398,6 +398,15 @@
 	/* Call for power on/off the port if necessary */
 	int	(*port_power)(struct usb_hcd *hcd, int portnum, bool enable);
 
+	int (*sec_event_ring_setup)(struct usb_hcd *hcd, unsigned int intr_num);
+	int (*sec_event_ring_cleanup)(struct usb_hcd *hcd,
+			unsigned int intr_num);
+	dma_addr_t (*get_sec_event_ring_dma_addr)(struct usb_hcd *hcd,
+			unsigned int intr_num);
+	dma_addr_t (*get_xfer_ring_dma_addr)(struct usb_hcd *hcd,
+			struct usb_device *udev, struct usb_host_endpoint *ep);
+	dma_addr_t (*get_dcba_dma_addr)(struct usb_hcd *hcd,
+			struct usb_device *udev);
 };
 
 static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd)
@@ -436,7 +445,19 @@
 		struct usb_host_interface *old_alt,
 		struct usb_host_interface *new_alt);
 extern int usb_hcd_get_frame_number(struct usb_device *udev);
+extern int usb_hcd_sec_event_ring_setup(struct usb_device *udev,
+	unsigned int intr_num);
+extern int usb_hcd_sec_event_ring_cleanup(struct usb_device *udev,
+	unsigned int intr_num);
+extern dma_addr_t usb_hcd_get_sec_event_ring_dma_addr(struct usb_device *udev,
+		unsigned int intr_num);
+extern dma_addr_t usb_hcd_get_dcba_dma_addr(struct usb_device *udev);
+extern dma_addr_t usb_hcd_get_xfer_ring_dma_addr(struct usb_device *udev,
+	struct usb_host_endpoint *ep);
 
+struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
+		struct device *sysdev, struct device *dev, const char *bus_name,
+		struct usb_hcd *primary_hcd);
 extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
 		struct device *dev, const char *bus_name);
 extern struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
@@ -485,7 +506,7 @@
 extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd);
 extern void usb_wakeup_notification(struct usb_device *hdev,
 		unsigned int portnum);
-
+extern void usb_flush_hub_wq(void);
 extern void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum);
 extern void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum);
 
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 1d0043d..de2a722 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -50,4 +50,10 @@
 /* device can't handle Link Power Management */
 #define USB_QUIRK_NO_LPM			BIT(10)
 
+/*
+ * Device reports its bInterval as linear frames instead of the
+ * USB 2.0 calculation.
+ */
+#define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL	BIT(11)
+
 #endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/media/msm_vidc.h b/include/media/msm_vidc.h
index d66d44c..262fa64 100644
--- a/include/media/msm_vidc.h
+++ b/include/media/msm_vidc.h
@@ -105,7 +105,8 @@
 int msm_vidc_s_ext_ctrl(void *instance, struct v4l2_ext_controls *a);
 int msm_vidc_g_ctrl(void *instance, struct v4l2_control *a);
 int msm_vidc_reqbufs(void *instance, struct v4l2_requestbuffers *b);
-int msm_vidc_release_buffers(void *instance, int buffer_type);
+int msm_vidc_release_buffer(void *instance, int buffer_type,
+		unsigned int buffer_index);
 int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b);
 int msm_vidc_dqbuf(void *instance, struct v4l2_buffer *b);
 int msm_vidc_streamon(void *instance, enum v4l2_buf_type i);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 677a047..6d27dae 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4245,6 +4245,32 @@
 					struct ieee80211_regdomain *rd);
 
 /**
+ * regulatory_hint_user - hint to the wireless core a regulatory domain
+ * which the driver has received from an application
+ * @alpha2: the ISO/IEC 3166 alpha2 the driver claims its regulatory domain
+ *	should be in. If @rd is set this should be NULL. Note that if you
+ *	set this to NULL you should still set rd->alpha2 to some accepted
+ *	alpha2.
+ * @user_reg_hint_type: the type of user regulatory hint.
+ *
+ * Wireless drivers can use this function to hint to the wireless core
+ * the current regulatory domain as specified by trusted applications,
+ * it is the driver's responsibilty to estbalish which applications it
+ * trusts.
+ *
+ * The wiphy should be registered to cfg80211 prior to this call.
+ * For cfg80211 drivers this means you must first use wiphy_register(),
+ * for mac80211 drivers you must first use ieee80211_register_hw().
+ *
+ * Drivers should check the return value, its possible you can get
+ * an -ENOMEM or an -EINVAL.
+ *
+ * Return: 0 on success. -ENOMEM, -EINVAL.
+ */
+int regulatory_hint_user(const char *alpha2,
+			 enum nl80211_user_reg_hint_type user_reg_hint_type);
+
+/**
  * wiphy_apply_custom_regulatory - apply a custom driver regulatory domain
  * @wiphy: the wireless device we want to process the regulatory domain on
  * @regd: the custom regulatory domain to use for this wiphy
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 4d1c46a..c7b1dc7 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -196,6 +196,7 @@
 	struct iscsi_task	*task;		/* xmit task in progress */
 
 	/* xmit */
+	spinlock_t		taskqueuelock;  /* protects the next three lists */
 	struct list_head	mgmtqueue;	/* mgmt (control) xmit queue */
 	struct list_head	cmdqueue;	/* data-path cmd queue */
 	struct list_head	requeue;	/* tasks needing another run */
diff --git a/include/soc/qcom/devfreq_devbw.h b/include/soc/qcom/devfreq_devbw.h
new file mode 100644
index 0000000..7edb2ab
--- /dev/null
+++ b/include/soc/qcom/devfreq_devbw.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DEVFREQ_DEVBW_H
+#define _DEVFREQ_DEVBW_H
+
+#include <linux/devfreq.h>
+
+#ifdef CONFIG_MSM_DEVFREQ_DEVBW
+int devfreq_add_devbw(struct device *dev);
+int devfreq_remove_devbw(struct device *dev);
+int devfreq_suspend_devbw(struct device *dev);
+int devfreq_resume_devbw(struct device *dev);
+#else
+static inline int devfreq_add_devbw(struct device *dev)
+{
+	return 0;
+}
+static inline int devfreq_remove_devbw(struct device *dev)
+{
+	return 0;
+}
+static inline int devfreq_suspend_devbw(struct device *dev)
+{
+	return 0;
+}
+static inline int devfreq_resume_devbw(struct device *dev)
+{
+	return 0;
+}
+#endif
+
+#endif /* _DEVFREQ_DEVBW_H */
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index 6b567d7..7ef984a 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -17,6 +17,21 @@
 #define ICNSS_MAX_IRQ_REGISTRATIONS    12
 #define ICNSS_MAX_TIMESTAMP_LEN        32
 
+enum icnss_uevent {
+	ICNSS_UEVENT_FW_READY,
+	ICNSS_UEVENT_FW_CRASHED,
+	ICNSS_UEVENT_FW_DOWN,
+};
+
+struct icnss_uevent_fw_down_data {
+	bool crashed;
+};
+
+struct icnss_uevent_data {
+	enum icnss_uevent uevent;
+	void *data;
+};
+
 struct icnss_driver_ops {
 	char *name;
 	int (*probe)(struct device *dev);
@@ -28,6 +43,7 @@
 	int (*pm_resume)(struct device *dev);
 	int (*suspend_noirq)(struct device *dev);
 	int (*resume_noirq)(struct device *dev);
+	int (*uevent)(struct device *dev, struct icnss_uevent_data *uevent);
 };
 
 
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 812ea65..7a09cb1 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -42,6 +42,8 @@
 #define ADM_MATRIX_ID_AUDIO_TX              1
 
 #define ADM_MATRIX_ID_COMPRESSED_AUDIO_RX   2
+
+#define ADM_MATRIX_ID_LISTEN_TX             4
 /* Enumeration for an audio Tx matrix ID.*/
 #define ADM_MATRIX_ID_AUDIOX              1
 
@@ -96,6 +98,16 @@
  */
 #define ADM_CMD_DEVICE_OPEN_V5                          0x00010326
 
+/* This command allows a client to open a COPP/Voice Proc the
+ *	way as ADM_CMD_DEVICE_OPEN_V5 but supports multiple endpoint2
+ *	channels.
+ *
+ *	@return
+ *	#ADM_CMDRSP_DEVICE_OPEN_V6 with the resulting status and
+ *	COPP ID.
+ */
+#define ADM_CMD_DEVICE_OPEN_V6                      0x00010356
+
 /* Definition for a low latency stream session. */
 #define ADM_LOW_LATENCY_DEVICE_SESSION			0x2000
 
@@ -251,6 +263,129 @@
  */
 } __packed;
 
+/*  ADM device open command payload of the
+ *  #ADM_CMD_DEVICE_OPEN_V6 command.
+ */
+struct adm_cmd_device_open_v6 {
+	struct apr_hdr		hdr;
+	u16                  flags;
+/* Reserved for future use. Clients must set this field
+ * to zero.
+ */
+
+	u16                  mode_of_operation;
+/* Specifies whether the COPP must be opened on the Tx or Rx
+ * path. Use the ADM_CMD_COPP_OPEN_MODE_OF_OPERATION_* macros for
+ * supported values and interpretation.
+ * Supported values:
+ * - 0x1 -- Rx path COPP
+ * - 0x2 -- Tx path live COPP
+ * - 0x3 -- Tx path nonlive COPP
+ * Live connections cause sample discarding in the Tx device
+ * matrix if the destination output ports do not pull them
+ * fast enough. Nonlive connections queue the samples
+ * indefinitely.
+ */
+
+	u16                  endpoint_id_1;
+/* Logical and physical endpoint ID of the audio path.
+ * If the ID is a voice processor Tx block, it receives near
+ * samples.	Supported values: Any pseudoport, AFE Rx port,
+ * or AFE Tx port For a list of valid IDs, refer to
+ * @xhyperref{Q4,[Q4]}.
+ * Q4 = Hexagon Multimedia: AFE Interface Specification
+ */
+
+	u16                  endpoint_id_2;
+/* Logical and physical endpoint ID 2 for a voice processor
+ * Tx block.
+ * This is not applicable to audio COPP.
+ * Supported values:
+ * - AFE Rx port
+ * - 0xFFFF -- Endpoint 2 is unavailable and the voice
+ * processor Tx
+ * block ignores this endpoint
+ * When the voice processor Tx block is created on the audio
+ * record path,
+ * it can receive far-end samples from an AFE Rx port if the
+ * voice call
+ * is active. The ID of the AFE port is provided in this
+ * field.
+ * For a list of valid IDs, refer @xhyperref{Q4,[Q4]}.
+ */
+
+	u32                  topology_id;
+/* Audio COPP topology ID; 32-bit GUID. */
+
+	u16                  dev_num_channel;
+/* Number of channels the audio COPP sends to/receives from
+ * the endpoint.
+ * Supported values: 1 to 8.
+ * The value is ignored for the voice processor Tx block,
+ * where channel
+ * configuration is derived from the topology ID.
+ */
+
+	u16                  bit_width;
+/* Bit width (in bits) that the audio COPP sends to/receives
+ * from the
+ * endpoint. The value is ignored for the voice processing
+ * Tx block,
+ * where the PCM width is 16 bits.
+ */
+
+	u32                  sample_rate;
+/* Sampling rate at which the audio COPP/voice processor
+ * Tx block
+ * interfaces with the endpoint.
+ * Supported values for voice processor Tx: 8000, 16000,
+ * 48000 Hz
+ * Supported values for audio COPP: >0 and <=192 kHz
+ */
+
+	u8                   dev_channel_mapping[8];
+/* Array of channel mapping of buffers that the audio COPP
+ * sends to the endpoint. Channel[i] mapping describes channel
+ * I inside the buffer, where 0 < i < dev_num_channel.
+ * This value is relevant only for an audio Rx COPP.
+ * For the voice processor block and Tx audio block, this field
+ * is set to zero and is ignored.
+ */
+
+	u16                  dev_num_channel_eid2;
+/* Number of channels the voice processor block sends
+ * to/receives from the endpoint2.
+ * Supported values: 1 to 8.
+ * The value is ignored for audio COPP or if endpoint_id_2 is
+ * set to 0xFFFF.
+ */
+
+	u16                  bit_width_eid2;
+/* Bit width (in bits) that the voice processor sends
+ * to/receives from the endpoint2.
+ * Supported values: 16 and 24.
+ * The value is ignored for audio COPP or if endpoint_id_2 is
+ * set to 0xFFFF.
+ */
+
+	u32                  sample_rate_eid2;
+/* Sampling rate at which the voice processor Tx block
+ * interfaces with the endpoint2.
+ * Supported values for Tx voice processor: >0 and <=384 kHz
+ * The value is ignored for audio COPP or if endpoint_id_2 is
+ * set to 0xFFFF.
+ */
+
+	u8                   dev_channel_mapping_eid2[8];
+/* Array of channel mapping of buffers that the voice processor
+ * sends to the endpoint. Channel[i] mapping describes channel
+ * I inside the buffer, where 0 < i < dev_num_channel.
+ * This value is relevant only for the Tx voice processor.
+ * The values are ignored for audio COPP or if endpoint_id_2 is
+ * set to 0xFFFF.
+ */
+} __packed;
+
 /*
  *	This command allows the client to close a COPP and disconnect
  *	the device session.
@@ -369,6 +504,15 @@
 	/* Reserved. This field must be set to zero.*/
 } __packed;
 
+/* Returns the status and COPP ID to an #ADM_CMD_DEVICE_OPEN_V6 command. */
+#define ADM_CMDRSP_DEVICE_OPEN_V6                      0x00010357
+
+/*  Payload of the #ADM_CMDRSP_DEVICE_OPEN_V6 message,
+ *	which returns the
+ *	status and COPP ID to an #ADM_CMD_DEVICE_OPEN_V6 command
+ *	is the exact same as ADM_CMDRSP_DEVICE_OPEN_V5.
+ */
+
 /* This command allows a query of one COPP parameter. */
 #define ADM_CMD_GET_PP_PARAMS_V5                                0x0001032A
 
@@ -1204,6 +1348,8 @@
  * #AFE_MODULE_SIDETONE_IIR_FILTER module.
  */
 #define AFE_PARAM_ID_SIDETONE_IIR_FILTER_CONFIG	0x00010204
+#define MAX_SIDETONE_IIR_DATA_SIZE 224
+#define MAX_NO_IIR_FILTER_STAGE    10
 
 struct afe_sidetone_iir_filter_config_params {
 	u16                  num_biquad_stages;
@@ -1215,6 +1361,7 @@
 /* Pregain for the compensating filter response.
  * Supported values: Any number in Q13 format
  */
+	uint8_t   iir_config[MAX_SIDETONE_IIR_DATA_SIZE];
 } __packed;
 
 #define AFE_MODULE_LOOPBACK	0x00010205
@@ -1365,6 +1512,55 @@
 
 } __packed;
 
+struct afe_loopback_sidetone_gain {
+	u16                  rx_port_id;
+	u16                  gain;
+} __packed;
+
+struct loopback_cfg_data {
+	u32                  loopback_cfg_minor_version;
+/* Minor version used for tracking the version of the RMC module
+ * configuration interface.
+ * Supported values: #AFE_API_VERSION_LOOPBACK_CONFIG
+ */
+	u16                  dst_port_id;
+	/* Destination Port Id. */
+	u16                  routing_mode;
+/* Specifies data path type from src to dest port.
+ * Supported values:
+ * #LB_MODE_DEFAULT
+ * #LB_MODE_SIDETONE
+ * #LB_MODE_EC_REF_VOICE_AUDIO
+ * #LB_MODE_EC_REF_VOICE_A
+ * #LB_MODE_EC_REF_VOICE
+ */
+
+	u16                  enable;
+/* Specifies whether to enable (1) or
+ * disable (0) an AFE loopback.
+ */
+	u16                  reserved;
+/* Reserved for 32-bit alignment. This field must be set to 0.
+ */
+} __packed;
+
+struct afe_st_loopback_cfg_v1 {
+	struct apr_hdr                    hdr;
+	struct afe_port_cmd_set_param_v2  param;
+	struct afe_port_param_data_v2     gain_pdata;
+	struct afe_loopback_sidetone_gain gain_data;
+	struct afe_port_param_data_v2     cfg_pdata;
+	struct loopback_cfg_data          cfg_data;
+} __packed;
+
+struct afe_loopback_iir_cfg_v2 {
+	struct apr_hdr                          hdr;
+	struct afe_port_cmd_set_param_v2        param;
+	struct afe_port_param_data_v2           st_iir_enable_pdata;
+	struct afe_mod_enable_param             st_iir_mode_enable_data;
+	struct afe_port_param_data_v2           st_iir_filter_config_pdata;
+	struct afe_sidetone_iir_filter_config_params st_iir_filter_config_data;
+} __packed;
 #define AFE_MODULE_SPEAKER_PROTECTION	0x00010209
 #define AFE_PARAM_ID_SPKR_PROT_CONFIG	0x0001020a
 #define AFE_API_VERSION_SPKR_PROT_CONFIG	0x1
@@ -1619,11 +1815,14 @@
 #define AFE_PORT_SAMPLE_RATE_16K          16000
 #define AFE_PORT_SAMPLE_RATE_48K          48000
 #define AFE_PORT_SAMPLE_RATE_96K          96000
+#define AFE_PORT_SAMPLE_RATE_176P4K       176400
 #define AFE_PORT_SAMPLE_RATE_192K         192000
+#define AFE_PORT_SAMPLE_RATE_352P8K       352800
 #define AFE_LINEAR_PCM_DATA				0x0
 #define AFE_NON_LINEAR_DATA				0x1
 #define AFE_LINEAR_PCM_DATA_PACKED_60958 0x2
 #define AFE_NON_LINEAR_DATA_PACKED_60958 0x3
+#define AFE_GENERIC_COMPRESSED           0x8
 
 /* This param id is used to configure I2S interface */
 #define AFE_PARAM_ID_I2S_CONFIG	0x0001020D
@@ -2265,6 +2464,13 @@
  */
 #define AFE_PARAM_ID_USB_AUDIO_DEV_PARAMS    0x000102A5
 
+
+/* ID of the parameter used to set the endianness value for the
+ * USB audio device. It should be used with
+ * AFE_MODULE_AUDIO_DEV_INTERFACE
+ */
+#define AFE_PARAM_ID_USB_AUDIO_DEV_LPCM_FMT 0x000102AA
+
 /* Minor version used for tracking USB audio  configuration */
 #define AFE_API_MINIOR_VERSION_USB_AUDIO_CONFIG 0x1
 
@@ -2280,6 +2486,15 @@
 	u32                  dev_token;
 } __packed;
 
+struct afe_param_id_usb_audio_dev_lpcm_fmt {
+/* Minor version used for tracking USB audio device parameter.
+ * Supported values: AFE_API_MINIOR_VERSION_USB_AUDIO_CONFIG
+ */
+	u32                  cfg_minor_version;
+/* Endianness of actual end USB audio device */
+	u32                  endian;
+} __packed;
+
 /* ID of the parameter used by AFE_PARAM_ID_USB_AUDIO_CONFIG to configure
  * USB audio interface. It should be used with AFE_MODULE_AUDIO_DEV_INTERFACE
  */
@@ -2324,13 +2539,18 @@
 	u16                  reserved;
 /* device token of actual end USB aduio device */
 	u32                  dev_token;
+/* endianness of this interface */
+	u32                   endian;
 } __packed;
 
 struct afe_usb_audio_dev_param_command {
 	struct apr_hdr hdr;
 	struct afe_port_cmd_set_param_v2 param;
 	struct afe_port_param_data_v2    pdata;
-	struct afe_param_id_usb_audio_dev_params usb_dev;
+	union {
+		struct afe_param_id_usb_audio_dev_params usb_dev;
+		struct afe_param_id_usb_audio_dev_lpcm_fmt lpcm_fmt;
+	};
 } __packed;
 
 /* This param id is used to configure Real Time Proxy interface. */
@@ -2528,7 +2748,9 @@
 	 * - #AFE_PORT_SAMPLE_RATE_16K
 	 * - #AFE_PORT_SAMPLE_RATE_24K
 	 * - #AFE_PORT_SAMPLE_RATE_32K
-	 * - #AFE_PORT_SAMPLE_RATE_48K @tablebulletend
+	 * - #AFE_PORT_SAMPLE_RATE_48K
+	 * - #AFE_PORT_SAMPLE_RATE_176P4K
+	 * - #AFE_PORT_SAMPLE_RATE_352P8K @tablebulletend
 	 */
 
 	u32	bit_width;
@@ -2537,10 +2759,11 @@
 	 */
 
 	u16	data_format;
-	/* < Data format: linear and compressed
+	/* < Data format: linear ,compressed, generic compresssed
 	 * @values
 	 * - #AFE_LINEAR_PCM_DATA
-	 * - #AFE_NON_LINEAR_DATA @tablebulletend
+	 * - #AFE_NON_LINEAR_DATA
+	 * - #AFE_GENERIC_COMPRESSED
 	 */
 
 	u16	sync_mode;
@@ -3419,7 +3642,7 @@
 #define DEFAULT_COPP_TOPOLOGY				0x00010314
 #define DEFAULT_POPP_TOPOLOGY				0x00010BE4
 #define COMPRESSED_PASSTHROUGH_DEFAULT_TOPOLOGY         0x0001076B
-#define COMPRESS_PASSTHROUGH_NONE_TOPOLOGY      0x00010774
+#define COMPRESSED_PASSTHROUGH_NONE_TOPOLOGY            0x00010774
 #define VPM_TX_SM_ECNS_COPP_TOPOLOGY			0x00010F71
 #define VPM_TX_DM_FLUENCE_COPP_TOPOLOGY			0x00010F72
 #define VPM_TX_QMIC_FLUENCE_COPP_TOPOLOGY		0x00010F75
@@ -3725,6 +3948,8 @@
 
 #define ASM_MEDIA_FMT_EVRCWB_FS 0x00010BF0
 
+#define ASM_MEDIA_FMT_GENERIC_COMPRESSED  0x00013212
+
 #define ASM_MAX_EQ_BANDS 12
 
 #define ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2 0x00010D98
@@ -3734,6 +3959,40 @@
 	/* Media format block size in bytes.*/
 }  __packed;
 
+struct asm_generic_compressed_fmt_blk_t {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
+
+	/*
+	 * Channel mapping array of bitstream output.
+	 * Channel[i] mapping describes channel i inside the buffer, where
+	 * i < num_channels. All valid used channels must be
+	 * present at the beginning of the array.
+	 */
+	uint8_t channel_mapping[8];
+
+	/*
+	 * Number of channels of the incoming bitstream.
+	 * Supported values: 1,2,3,4,5,6,7,8
+	 */
+	uint16_t num_channels;
+
+	/*
+	 * Nominal bits per sample value of the incoming bitstream.
+	 * Supported values: 16, 32
+	 */
+	uint16_t bits_per_sample;
+
+	/*
+	 * Nominal sampling rate of the incoming bitstream.
+	 * Supported values: 8000, 11025, 16000, 22050, 24000, 32000,
+	 *                   44100, 48000, 88200, 96000, 176400, 192000,
+	 *                   352800, 384000
+	 */
+	uint32_t sampling_rate;
+
+} __packed;
+
 struct asm_multi_channel_pcm_fmt_blk_v2 {
 	struct apr_hdr hdr;
 	struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
@@ -4128,6 +4387,9 @@
 /* Enumeration for the raw AAC format. */
 #define ASM_MEDIA_FMT_AAC_FORMAT_FLAG_RAW    3
 
+/* Enumeration for the AAC LATM format. */
+#define ASM_MEDIA_FMT_AAC_FORMAT_FLAG_LATM   4
+
 #define ASM_MEDIA_FMT_AAC_AOT_LC             2
 #define ASM_MEDIA_FMT_AAC_AOT_SBR            5
 #define ASM_MEDIA_FMT_AAC_AOT_PS             29
@@ -4783,8 +5045,8 @@
 
 } __packed;
 
-#define ASM_MEDIA_FMT_AC3			0x00010DEE
-#define ASM_MEDIA_FMT_EAC3			0x00010DEF
+#define ASM_MEDIA_FMT_AC3                    0x00010DEE
+#define ASM_MEDIA_FMT_EAC3                   0x00010DEF
 #define ASM_MEDIA_FMT_DTS                    0x00010D88
 #define ASM_MEDIA_FMT_MP2                    0x00010DE9
 #define ASM_MEDIA_FMT_FLAC                   0x00010C16
@@ -4793,7 +5055,6 @@
 #define ASM_MEDIA_FMT_APE                    0x00012F32
 #define ASM_MEDIA_FMT_DSD                    0x00012F3E
 
-
 /* Media format ID for adaptive transform acoustic coding. This
  * ID is used by the #ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED command
  * only.
@@ -5816,6 +6077,138 @@
 /* Reserved for future use. This field must be set to zero. */
 } __packed;
 
+
+#define ASM_STREAM_CMD_OPEN_TRANSCODE_LOOPBACK    0x00010DBA
+
+/* Bitmask for the stream's Performance mode. */
+#define ASM_BIT_MASK_STREAM_PERF_MODE_FLAG_IN_OPEN_TRANSCODE_LOOPBACK \
+	(0x70000000UL)
+
+/* Bit shift for the stream's Performance mode. */
+#define ASM_SHIFT_STREAM_PERF_MODE_FLAG_IN_OPEN_TRANSCODE_LOOPBACK    28
+
+/* Bitmask for the decoder converter enable flag. */
+#define ASM_BIT_MASK_DECODER_CONVERTER_FLAG    (0x00000078UL)
+
+/* Shift value for the decoder converter enable flag. */
+#define ASM_SHIFT_DECODER_CONVERTER_FLAG                              3
+
+/* Converter mode is None (Default). */
+#define ASM_CONVERTER_MODE_NONE                                       0
+
+/* Converter mode is DDP-to-DD. */
+#define ASM_DDP_DD_CONVERTER_MODE                                     1
+
+/*  Identifies a special converter mode where source and sink formats
+ *  are the same but postprocessing must applied. Therefore, Decode
+ *  @rarrow Re-encode is necessary.
+ */
+#define ASM_POST_PROCESS_CONVERTER_MODE                               2
+
+
+struct asm_stream_cmd_open_transcode_loopback_t {
+	struct apr_hdr         hdr;
+	u32                    mode_flags;
+/* Mode Flags specifies the performance mode in which this stream
+ * is to be opened.
+ * Supported values{for bits 30 to 28}(stream_perf_mode flag)
+ *
+ * #ASM_LEGACY_STREAM_SESSION -- This mode ensures backward
+ *       compatibility to the original behavior
+ *       of ASM_STREAM_CMD_OPEN_TRANSCODE_LOOPBACK
+ *
+ * #ASM_LOW_LATENCY_STREAM_SESSION -- Opens a loopback session by using
+ *  shortened buffers in low latency POPP
+ *  - Recommendation: Do not enable high latency algorithms. They might
+ *    negate the benefits of opening a low latency stream, and they
+ *    might also suffer quality degradation from unexpected jitter.
+ *  - This Low Latency mode is supported only for PCM In and PCM Out
+ *    loopbacks. An error is returned if Low Latency mode is opened for
+ *    other transcode loopback modes.
+ *  - To configure this subfield, use
+ *     ASM_BIT_MASK_STREAM_PERF_MODE_FLAG_IN_OPEN_TRANSCODE_LOOPBACK and
+ *     ASM_SHIFT_STREAM_PERF_MODE_FLAG_IN_OPEN_TRANSCODE_LOOPBACK.
+ *
+ * Supported values{for bits 6 to 3} (decoder-converter compatibility)
+ * #ASM_CONVERTER_MODE_NONE (0x0) -- Default
+ * #ASM_DDP_DD_CONVERTER_MODE (0x1)
+ * #ASM_POST_PROCESS_CONVERTER_MODE (0x2)
+ * 0x3-0xF -- Reserved for future use
+ * - Use #ASM_BIT_MASK_DECODER_CONVERTER_FLAG and
+ *        ASM_SHIFT_DECODER_CONVERTER_FLAG to set this bit
+ * All other bits are reserved; clients must set them to 0.
+ */
+
+	u32                    src_format_id;
+/* Specifies the media format of the input audio stream.
+ *
+ * Supported values
+ * - #ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2
+ * - #ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3
+ * - #ASM_MEDIA_FMT_DTS
+ * - #ASM_MEDIA_FMT_EAC3_DEC
+ * - #ASM_MEDIA_FMT_EAC3
+ * - #ASM_MEDIA_FMT_AC3_DEC
+ * - #ASM_MEDIA_FMT_AC3
+ */
+	u32                    sink_format_id;
+/* Specifies the media format of the output stream.
+ *
+ * Supported values
+ * - #ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2
+ * - #ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3
+ * - #ASM_MEDIA_FMT_DTS (not supported in Low Latency mode)
+ * - #ASM_MEDIA_FMT_EAC3_DEC (not supported in Low Latency mode)
+ * - #ASM_MEDIA_FMT_EAC3 (not supported in Low Latency mode)
+ * - #ASM_MEDIA_FMT_AC3_DEC (not supported in Low Latency mode)
+ * - #ASM_MEDIA_FMT_AC3 (not supported in Low Latency mode)
+ */
+
+	u32                    audproc_topo_id;
+/* Postprocessing topology ID, which specifies the topology (order of
+ *        processing) of postprocessing algorithms.
+ *
+ * Supported values
+ *    - #ASM_STREAM_POSTPROC_TOPO_ID_DEFAULT
+ *    - #ASM_STREAM_POSTPROC_TOPO_ID_PEAKMETER
+ *    - #ASM_STREAM_POSTPROC_TOPO_ID_MCH_PEAK_VOL
+ *    - #ASM_STREAM_POSTPROC_TOPO_ID_NONE
+ *  Topologies can be added through #ASM_CMD_ADD_TOPOLOGIES.
+ *  This field is ignored for the Converter mode, in which no
+ *  postprocessing is performed.
+ */
+
+	u16                    src_endpoint_type;
+/* Specifies the source endpoint that provides the input samples.
+ *
+ * Supported values
+ *  - 0 -- Tx device matrix or stream router (gateway to the hardware
+ *    ports)
+ *  - All other values are reserved
+ *  Clients must set this field to 0. Otherwise, an error is returned.
+ */
+
+	u16                    sink_endpoint_type;
+/*  Specifies the sink endpoint type.
+ *
+ *  Supported values
+ *  - 0 -- Rx device matrix or stream router (gateway to the hardware
+ *    ports)
+ *  - All other values are reserved
+ *   Clients must set this field to 0. Otherwise, an error is returned.
+ */
+
+	u16                    bits_per_sample;
+/*   Number of bits per sample processed by the ASM modules.
+ *   Supported values 16, 24
+ */
+
+	u16                    reserved;
+/*   This field must be set to 0.
+ */
+} __packed;
+
+
 #define ASM_STREAM_CMD_CLOSE             0x00010BCD
 #define ASM_STREAM_CMD_FLUSH             0x00010BCE
 
@@ -6771,6 +7164,12 @@
 	/*< Clients must set this field to zero. */
 } __packed;
 
+struct adm_set_mic_gain_params {
+	struct adm_cmd_set_pp_params_v5 params;
+	struct adm_param_data_v5 data;
+	struct admx_mic_gain mic_gain_data;
+} __packed;
+
 /* end_addtogroup audio_pp_param_ids */
 
 /* @ingroup audio_pp_module_ids
@@ -8022,11 +8421,10 @@
 /*	Band cut equalizer effect.*/
 #define ASM_PARAM_EQ_BAND_CUT       6
 
-/* Voice get & set params */
-#define VOICE_CMD_SET_PARAM				0x0001133D
-#define VOICE_CMD_GET_PARAM				0x0001133E
-#define VOICE_EVT_GET_PARAM_ACK				0x00011008
-
+/* Get & set params */
+#define VSS_ICOMMON_CMD_SET_PARAM_V2	0x0001133D
+#define VSS_ICOMMON_CMD_GET_PARAM_V2	0x0001133E
+#define VSS_ICOMMON_RSP_GET_PARAM	0x00011008
 
 /* ID of the Bass Boost module.
  * This module supports the following parameter IDs:
@@ -8663,6 +9061,31 @@
 	struct asm_stream_cmd_get_pp_params_v2 param;
 } __packed;
 
+/* Opcode to set BT address and license for aptx decoder */
+#define APTX_DECODER_BT_ADDRESS 0x00013201
+#define APTX_CLASSIC_DEC_LICENSE_ID 0x00013202
+
+struct aptx_dec_bt_addr_cfg {
+	uint32_t lap;
+	uint32_t uap;
+	uint32_t nap;
+} __packed;
+
+struct aptx_dec_bt_dev_addr {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param encdec;
+	struct aptx_dec_bt_addr_cfg bt_addr_cfg;
+} __packed;
+
+struct asm_aptx_dec_fmt_blk_v2 {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmtblk;
+	u32     sample_rate;
+/* Number of samples per second.
+ * Supported values: 44100 and 48000 Hz
+ */
+} __packed;
+
 /* LSM Specific */
 #define VW_FEAT_DIM					(39)
 
@@ -8690,6 +9113,7 @@
 #define LSM_SESSION_EVENT_DETECTION_STATUS_V2		(0x00012B01)
 #define LSM_DATA_EVENT_READ_DONE			(0x00012B02)
 #define LSM_DATA_EVENT_STATUS				(0x00012B03)
+#define LSM_SESSION_EVENT_DETECTION_STATUS_V3		(0x00012B04)
 
 #define LSM_MODULE_ID_VOICE_WAKEUP			(0x00012C00)
 #define LSM_PARAM_ID_ENDPOINT_DETECT_THRESHOLD		(0x00012C01)
@@ -8702,6 +9126,12 @@
 #define LSM_PARAM_ID_LAB_ENABLE				(0x00012C09)
 #define LSM_PARAM_ID_LAB_CONFIG				(0x00012C0A)
 #define LSM_MODULE_ID_FRAMEWORK				(0x00012C0E)
+#define LSM_PARAM_ID_SWMAD_CFG				(0x00012C18)
+#define LSM_PARAM_ID_SWMAD_MODEL			(0x00012C19)
+#define LSM_PARAM_ID_SWMAD_ENABLE			(0x00012C1A)
+#define LSM_PARAM_ID_POLLING_ENABLE			(0x00012C1B)
+#define LSM_PARAM_ID_MEDIA_FMT				(0x00012C1E)
+#define LSM_PARAM_ID_FWK_MODE_CONFIG			(0x00012C27)
 
 /* HW MAD specific */
 #define AFE_MODULE_HW_MAD				(0x00010230)
@@ -9617,6 +10047,108 @@
 	union afe_port_group_config data;
 } __packed;
 
+/* ID of the parameter used by #AFE_MODULE_AUDIO_DEV_INTERFACE to specify
+ * the timing statistics of the corresponding device interface.
+ * Client can periodically query for the device time statistics to help adjust
+ * the PLL based on the drift value. The get param command must be sent to
+ * AFE port ID corresponding to device interface
+
+ * This parameter ID supports following get param commands:
+ * #AFE_PORT_CMD_GET_PARAM_V2 and
+ * #AFE_PORT_CMD_GET_PARAM_V3.
+ */
+#define AFE_PARAM_ID_DEV_TIMING_STATS           0x000102AD
+
+/* Version information used to handle future additions to AFE device
+ * interface timing statistics (for backward compatibility).
+ */
+#define AFE_API_VERSION_DEV_TIMING_STATS        0x1
+
+/* Enumeration for specifying a sink(Rx) device */
+#define AFE_SINK_DEVICE                         0x0
+
+/* Enumeration for specifying a source(Tx) device */
+#define AFE_SOURCE_DEVICE                       0x1
+
+/* Enumeration for specifying the drift reference is of type AV Timer */
+#define AFE_REF_TIMER_TYPE_AVTIMER              0x0
+
+/* Message payload structure for the
+ * AFE_PARAM_ID_DEV_TIMING_STATS parameter.
+ */
+struct afe_param_id_dev_timing_stats {
+	/* Minor version used to track the version of device interface timing
+	 * statistics. Currently, the supported version is 1.
+	 * @values #AFE_API_VERSION_DEV_TIMING_STATS
+	 */
+	u32       minor_version;
+
+	/* Indicates the device interface direction as either
+	 * source (Tx) or sink (Rx).
+	 * @values
+	 * #AFE_SINK_DEVICE
+	 * #AFE_SOURCE_DEVICE
+	 */
+	u16        device_direction;
+
+	/* Reference timer for drift accumulation and time stamp information.
+	 * @values
+	 * #AFE_REF_TIMER_TYPE_AVTIMER @tablebulletend
+	 */
+	u16        reference_timer;
+
+	/*
+	 * Flag to indicate if resync is required on the client side for
+	 * drift correction. Flag is set to TRUE for the first get_param
+	 * response after device interface starts. This flag value can be
+	 * used by client to identify if device interface restart has
+	 * happened and if any re-sync is required at their end for drift
+	 * correction.
+	 * @values
+	 * 0: FALSE (Resync not required)
+	 * 1: TRUE (Resync required) @tablebulletend
+	 */
+	u32        resync_flag;
+
+	/* Accumulated drift value in microseconds. This value is updated
+	 * every 100th ms.
+	 * Positive drift value indicates AV timer is running faster than device
+	 * Negative drift value indicates AV timer is running slower than device
+	 * @values Any valid int32 number
+	 */
+	s32         acc_drift_value;
+
+	/* Lower 32 bits of the 64-bit absolute timestamp of reference
+	 * timer in microseconds.
+
+	 * This timestamp corresponds to the time when the drift values
+	 * are accumlated for every 100th ms.
+	 * @values Any valid uint32 number
+	 */
+	u32        ref_timer_abs_ts_lsw;
+
+	/* Upper 32 bits of the 64-bit absolute timestamp of reference
+	 * timer in microseconds.
+	 * This timestamp corresponds to the time when the drift values
+	 * are accumlated for every 100th ms.
+	 * @values Any valid uint32 number
+	 */
+	u32        ref_timer_abs_ts_msw;
+} __packed;
+
+struct afe_av_dev_drift_get_param {
+	struct apr_hdr hdr;
+	struct afe_port_cmd_get_param_v2 get_param;
+	struct afe_port_param_data_v2 pdata;
+	struct afe_param_id_dev_timing_stats timing_stats;
+} __packed;
+
+struct afe_av_dev_drift_get_param_resp {
+	uint32_t status;
+	struct afe_port_param_data_v2 pdata;
+	struct afe_param_id_dev_timing_stats timing_stats;
+} __packed;
+
 /* Command for Matrix or Stream Router */
 #define ASM_SESSION_CMD_SET_MTMX_STRTR_PARAMS_V2    0x00010DCE
 /* Module for AVSYNC */
@@ -9702,12 +10234,108 @@
 	 */
 };
 
+/* Parameter used by #ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC which allows the
+ * audio client choose the rendering decision that the audio DSP should use.
+ */
+#define ASM_SESSION_MTMX_STRTR_PARAM_RENDER_MODE_CMD  0x00012F0D
+
+/* Indicates that rendering decision will be based on default rate
+ * (session clock based rendering, device driven).
+ * 1. The default session clock based rendering is inherently driven
+ *    by the timing of the device.
+ * 2. After the initial decision is made (first buffer after a run
+ *    command), subsequent data rendering decisions are made with
+ *    respect to the rate at which the device is rendering, thus deriving
+ *    its timing from the device.
+ * 3. While this decision making is simple, it has some inherent limitations
+ *    (mentioned in the next section).
+ * 4. If this API is not set, the session clock based rendering will be assumed
+ *    and this will ensure that the DSP is backward compatible.
+ */
+#define ASM_SESSION_MTMX_STRTR_PARAM_RENDER_DEFAULT 0
+
+/* Indicates that rendering decision will be based on local clock rate.
+ * 1. In the DSP loopback/client loopback use cases (frame based
+ *    inputs), the incoming data into audio DSP is time-stamped at the
+ *    local clock rate (STC).
+ * 2. This TS rate may match the incoming data rate or maybe different
+ *    from the incoming data rate.
+ * 3. Regardless, the data will be time-stamped with local STC and
+ *    therefore, the client is recommended to set this mode for these
+ *    use cases. This method is inherently more robust to sequencing
+ *    (AFE Start/Stop) and device switches, among other benefits.
+ * 4. This API will inform the DSP to compare every incoming buffer TS
+ *    against local STC.
+ * 5. DSP will continue to honor render windows APIs, as before.
+ */
+#define ASM_SESSION_MTMX_STRTR_PARAM_RENDER_LOCAL_STC 1
+
+/* Structure for rendering decision parameter */
+struct asm_session_mtmx_strtr_param_render_mode_t {
+	/* Specifies the type of rendering decision the audio DSP should use.
+	 *
+	 * @values
+	 * - #ASM_SESSION_MTMX_STRTR_PARAM_RENDER_DEFAULT
+	 * - #ASM_SESSION_MTMX_STRTR_PARAM_RENDER_LOCAL_STC
+	 */
+	u32                  flags;
+} __packed;
+
+/* Parameter used by #ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC which allows the
+ * audio client to specify the clock recovery mechanism that the audio DSP
+ * should use.
+ */
+
+#define ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_CMD 0x00012F0E
+
+/* Indicates that default clock recovery will be used (no clock recovery).
+ * If the client wishes that no clock recovery be done, the client can
+ * choose this. This means that no attempt will made by the DSP to try and
+ * match the rates of the input and output audio.
+ */
+#define ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_NONE 0
+
+/* Indicates that independent clock recovery needs to be used.
+ * 1. In the DSP loopback/client loopback use cases (frame based inputs),
+ *    the client should choose the independent clock recovery option.
+ * 2. This basically de-couples the audio and video from knowing each others
+ *    clock sources and lets the audio DSP independently rate match the input
+ *    and output rates.
+ * 3. After drift detection, the drift correction is achieved by either pulling
+ *    the PLLs (if applicable) or by stream to device rate matching
+ *    (for PCM use cases) by comparing drift with respect to STC.
+ * 4. For passthrough use cases, since the PLL pulling is the only option,
+ *    a best effort will be made.
+ *    If PLL pulling is not possible / available, the rendering will be
+ *    done without rate matching.
+ */
+#define ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_AUTO 1
+
+/* Payload of the #ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC parameter.
+ */
+struct asm_session_mtmx_strtr_param_clk_rec_t {
+	/* Specifies the type of clock recovery that the audio DSP should
+	 * use for rate matching.
+	 */
+
+	/* @values
+	 * #ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_DEFAULT
+	 * #ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_INDEPENDENT
+	 */
+	u32                  flags;
+} __packed;
+
+union asm_session_mtmx_strtr_param_config {
+	struct asm_session_mtmx_strtr_param_window_v2_t window_param;
+	struct asm_session_mtmx_strtr_param_render_mode_t render_param;
+	struct asm_session_mtmx_strtr_param_clk_rec_t clk_rec_param;
+} __packed;
+
 struct asm_mtmx_strtr_params {
 	struct apr_hdr  hdr;
 	struct asm_session_cmd_set_mtmx_strstr_params_v2 param;
 	struct asm_stream_param_data_v2 data;
-	u32 window_lsw;
-	u32 window_msw;
+	union asm_session_mtmx_strtr_param_config config;
 } __packed;
 
 #define ASM_SESSION_CMD_GET_MTMX_STRTR_PARAMS_V2 0x00010DCF
@@ -9827,6 +10455,8 @@
 	COMPRESSED_PASSTHROUGH,
 	COMPRESSED_PASSTHROUGH_CONVERT,
 	COMPRESSED_PASSTHROUGH_DSD,
+	LISTEN,
+	COMPRESSED_PASSTHROUGH_GEN,
 };
 
 #define AUDPROC_MODULE_ID_COMPRESSED_MUTE                0x00010770
@@ -9898,4 +10528,21 @@
 #define AUDPROC_PARAM_ID_AUDIOSPHERE_DESIGN_MULTICHANNEL_INPUT   0x0001091D
 
 #define AUDPROC_PARAM_ID_AUDIOSPHERE_OPERATING_INPUT_MEDIA_INFO  0x0001091E
+
+#define AUDPROC_MODULE_ID_VOICE_TX_SECNS   0x10027059
+#define AUDPROC_PARAM_IDX_SEC_PRIMARY_MIC_CH 0x10014444
+
+struct admx_sec_primary_mic_ch {
+	uint16_t version;
+	uint16_t reserved;
+	uint16_t sec_primary_mic_ch;
+	uint16_t reserved1;
+} __packed;
+
+
+struct adm_set_sec_primary_ch_params {
+	struct adm_cmd_set_pp_params_v5 params;
+	struct adm_param_data_v5 data;
+	struct admx_sec_primary_mic_ch sec_primary_mic_ch_data;
+} __packed;
 #endif /*_APR_AUDIO_V2_H_ */
diff --git a/include/sound/cpe_core.h b/include/sound/cpe_core.h
index f4af562..411c2ff 100644
--- a/include/sound/cpe_core.h
+++ b/include/sound/cpe_core.h
@@ -162,7 +162,7 @@
 	int (*lsm_set_one_param)(void *core_handle,
 			struct cpe_lsm_session *session,
 			struct lsm_params_info *p_info,
-			void *data, enum LSM_PARAM_TYPE param_type);
+			void *data, uint32_t param_type);
 	void (*lsm_get_snd_model_offset)
 		(void *core_handle, struct cpe_lsm_session *,
 		 size_t *offset);
diff --git a/include/sound/jack.h b/include/sound/jack.h
index c66e4e9..722a20e 100644
--- a/include/sound/jack.h
+++ b/include/sound/jack.h
@@ -66,12 +66,12 @@
 				  SND_JACK_MICROPHONE2,
 
 	/* Kept separate from switches to facilitate implementation */
-	SND_JACK_BTN_0		= 0x4000,
-	SND_JACK_BTN_1		= 0x2000,
-	SND_JACK_BTN_2		= 0x1000,
-	SND_JACK_BTN_3		= 0x0800,
-	SND_JACK_BTN_4		= 0x0400,
-	SND_JACK_BTN_5		= 0x0200,
+	SND_JACK_BTN_0		= 0x8000,
+	SND_JACK_BTN_1		= 0x4000,
+	SND_JACK_BTN_2		= 0x2000,
+	SND_JACK_BTN_3		= 0x1000,
+	SND_JACK_BTN_4		= 0x0800,
+	SND_JACK_BTN_5		= 0x0400,
 };
 
 struct snd_jack {
diff --git a/include/sound/q6adm-v2.h b/include/sound/q6adm-v2.h
index c9a429d..42d048f 100644
--- a/include/sound/q6adm-v2.h
+++ b/include/sound/q6adm-v2.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -58,9 +58,9 @@
 struct route_payload {
 	unsigned int copp_idx[MAX_COPPS_PER_PORT];
 	unsigned int port_id[MAX_COPPS_PER_PORT];
-	int app_type;
-	int acdb_dev_id;
-	int sample_rate;
+	int app_type[MAX_COPPS_PER_PORT];
+	int acdb_dev_id[MAX_COPPS_PER_PORT];
+	int sample_rate[MAX_COPPS_PER_PORT];
 	unsigned short num_copps;
 	unsigned int session_id;
 };
@@ -96,12 +96,18 @@
 int adm_close(int port, int topology, int perf_mode);
 
 int adm_matrix_map(int path, struct route_payload payload_map,
-		   int perf_mode);
+		   int perf_mode, uint32_t passthr_mode);
 
 int adm_connect_afe_port(int mode, int session_id, int port_id);
 
 void adm_ec_ref_rx_id(int  port_id);
 
+void adm_num_ec_ref_rx_chans(int num_chans);
+
+void adm_ec_ref_rx_bit_width(int bit_width);
+
+void adm_ec_ref_rx_sampling_rate(int sampling_rate);
+
 int adm_get_lowlatency_copp_id(int port_id);
 
 int adm_set_multi_ch_map(char *channel_map, int path);
@@ -130,6 +136,11 @@
 int adm_set_softvolume(int port_id, int copp_idx,
 		       struct audproc_softvolume_params *softvol_param);
 
+int adm_set_mic_gain(int port_id, int copp_idx, int volume);
+
+int adm_send_set_multichannel_ec_primary_mic_ch(int port_id, int copp_idx,
+				int primary_mic_ch);
+
 int adm_param_enable(int port_id, int copp_idx, int module_id,  int enable);
 
 int adm_send_calibration(int port_id, int copp_idx, int path, int perf_mode,
diff --git a/include/sound/q6afe-v2.h b/include/sound/q6afe-v2.h
index 367e75d..8361175 100644
--- a/include/sound/q6afe-v2.h
+++ b/include/sound/q6afe-v2.h
@@ -42,6 +42,8 @@
 #define AFE_CLK_VERSION_V1    1
 #define AFE_CLK_VERSION_V2    2
 
+typedef int (*routing_cb)(int port);
+
 enum {
 	/* IDX 0->4 */
 	IDX_PRIMARY_I2S_RX,
@@ -265,7 +267,7 @@
 int afe_open(u16 port_id, union afe_port_config *afe_config, int rate);
 int afe_close(int port_id);
 int afe_loopback(u16 enable, u16 rx_port, u16 tx_port);
-int afe_sidetone(u16 tx_port_id, u16 rx_port_id, u16 enable, uint16_t gain);
+int afe_sidetone_enable(u16 tx_port_id, u16 rx_port_id, bool enable);
 int afe_loopback_gain(u16 port_id, u16 volume);
 int afe_validate_port(u16 port_id);
 int afe_get_port_index(u16 port_id);
@@ -362,5 +364,8 @@
 	struct afe_param_id_custom_tdm_header_cfg *custom_tdm_header_cfg,
 	u16 port_id);
 int afe_tdm_port_start(u16 port_id, struct afe_tdm_port_config *tdm_port,
-		u32 rate);
+		       u32 rate, u16 num_groups);
+void afe_set_routing_callback(routing_cb cb);
+int afe_get_av_dev_drift(struct afe_param_id_dev_timing_stats *timing_stats,
+		u16 port);
 #endif /* __Q6AFE_V2_H__ */
diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h
index 7321481..6bc93f5 100644
--- a/include/sound/q6asm-v2.h
+++ b/include/sound/q6asm-v2.h
@@ -53,6 +53,8 @@
 #define FORMAT_G711_MLAW_FS 0x001b
 #define FORMAT_DTS          0x001c
 #define FORMAT_DSD          0x001d
+#define FORMAT_APTX         0x001e
+#define FORMAT_GEN_COMPR    0x001f
 
 #define ENCDEC_SBCBITRATE   0x0001
 #define ENCDEC_IMMEDIATE_DECODE 0x0002
@@ -270,7 +272,7 @@
 		       uint16_t bits_per_sample);
 
 int q6asm_open_read_v4(struct audio_client *ac, uint32_t format,
-		       uint16_t bits_per_sample);
+		       uint16_t bits_per_sample, bool ts_mode);
 
 int q6asm_open_write(struct audio_client *ac, uint32_t format
 		/*, uint16_t bits_per_sample*/);
@@ -499,6 +501,11 @@
 			uint32_t rate, uint32_t channels,
 			bool use_default_chmap, char *channel_map,
 			uint16_t bits_per_sample);
+int q6asm_media_format_block_gen_compr(
+			struct audio_client *ac,
+			uint32_t rate, uint32_t channels,
+			bool use_default_chmap, char *channel_map,
+			uint16_t bits_per_sample);
 
 int q6asm_media_format_block_multi_ch_pcm_v3(struct audio_client *ac,
 					     uint32_t rate, uint32_t channels,
@@ -552,6 +559,9 @@
 int q6asm_media_format_block_dsd(struct audio_client *ac,
 			struct asm_dsd_cfg *cfg, int stream_id);
 
+int q6asm_stream_media_format_block_aptx_dec(struct audio_client *ac,
+						uint32_t sr, int stream_id);
+
 int q6asm_ds1_set_endp_params(struct audio_client *ac,
 				int param_id, int param_value);
 
@@ -574,6 +584,10 @@
 int q6asm_dts_eagle_get(struct audio_client *ac, int param_id, uint32_t size,
 			void *data, struct param_outband *po, int m_id);
 
+/* Send aptx decoder BT address */
+int q6asm_set_aptx_dec_bt_addr(struct audio_client *ac,
+				struct aptx_dec_bt_addr_cfg *cfg);
+
 /* Set SoftPause Params */
 int q6asm_set_softpause(struct audio_client *ac,
 			struct asm_softpause_params *param);
@@ -627,6 +641,14 @@
 		struct asm_session_mtmx_strtr_param_window_v2_t *window_param,
 		uint32_t param_id);
 
+/* Configure DSP render mode */
+int q6asm_send_mtmx_strtr_render_mode(struct audio_client *ac,
+		uint32_t render_mode);
+
+/* Configure DSP clock recovery mode */
+int q6asm_send_mtmx_strtr_clk_rec_mode(struct audio_client *ac,
+		uint32_t clk_rec_mode);
+
 /* Retrieve the current DSP path delay */
 int q6asm_get_path_delay(struct audio_client *ac);
 
diff --git a/include/sound/q6lsm.h b/include/sound/q6lsm.h
index 22a62da..26106a8 100644
--- a/include/sound/q6lsm.h
+++ b/include/sound/q6lsm.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015, 2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -21,6 +21,10 @@
 
 #define MAX_NUM_CONFIDENCE 20
 
+#define ADM_LSM_PORT_ID 0xADCB
+
+#define LSM_MAX_NUM_CHANNELS 8
+
 typedef void (*lsm_app_cb)(uint32_t opcode, uint32_t token,
 		       uint32_t *payload, void *priv);
 
@@ -49,11 +53,12 @@
 	uint32_t mem_map_handle;
 };
 
-struct lsm_lab_hw_params {
+struct lsm_hw_params {
 	u16 sample_rate;
 	u16 sample_size;
 	u32 buf_sz;
 	u32 period_count;
+	u16 num_chs;
 };
 
 struct lsm_client {
@@ -79,8 +84,12 @@
 	bool		lab_enable;
 	bool		lab_started;
 	struct lsm_lab_buffer *lab_buffer;
-	struct lsm_lab_hw_params hw_params;
+	struct lsm_hw_params hw_params;
 	bool		use_topology;
+	int		session_state;
+	bool		poll_enable;
+	int		perf_mode;
+	uint32_t	event_mode;
 };
 
 struct lsm_stream_cmd_open_tx {
@@ -134,6 +143,27 @@
 	uint16_t	reserved;
 } __packed;
 
+struct lsm_param_poll_enable {
+	struct lsm_param_payload_common common;
+	uint32_t	minor_version;
+	/* indicates to voice wakeup that HW MAD/SW polling is enabled or not */
+	uint32_t	polling_enable;
+} __packed;
+
+struct lsm_param_fwk_mode_cfg {
+	struct lsm_param_payload_common common;
+	uint32_t	minor_version;
+	uint32_t	mode;
+} __packed;
+
+struct lsm_param_media_fmt {
+	struct lsm_param_payload_common common;
+	uint32_t	minor_version;
+	uint32_t	sample_rate;
+	uint16_t	num_channels;
+	uint16_t	bit_width;
+	uint8_t		channel_mapping[LSM_MAX_NUM_CHANNELS];
+} __packed;
 
 /*
  * This param cannot be sent in this format.
@@ -163,11 +193,22 @@
 	struct lsm_param_min_confidence_levels	conf_payload;
 } __packed;
 
-struct lsm_cmd_set_opmode_connectport {
+struct lsm_cmd_set_params_opmode {
 	struct apr_hdr  msg_hdr;
 	struct lsm_set_params_hdr params_hdr;
-	struct lsm_param_connect_to_port	connect_to_port;
-	struct lsm_param_op_mode		op_mode;
+	struct lsm_param_op_mode op_mode;
+} __packed;
+
+struct lsm_cmd_set_connectport {
+	struct apr_hdr msg_hdr;
+	struct lsm_set_params_hdr params_hdr;
+	struct lsm_param_connect_to_port connect_to_port;
+} __packed;
+
+struct lsm_cmd_poll_enable {
+	struct apr_hdr  msg_hdr;
+	struct lsm_set_params_hdr params_hdr;
+	struct lsm_param_poll_enable poll_enable;
 } __packed;
 
 struct lsm_param_epd_thres {
@@ -250,6 +291,19 @@
 	uint32_t flags;
 } __packed;
 
+struct lsm_cmd_set_fwk_mode_cfg {
+	struct apr_hdr  msg_hdr;
+	struct lsm_set_params_hdr params_hdr;
+	struct lsm_param_fwk_mode_cfg fwk_mode_cfg;
+} __packed;
+
+struct lsm_cmd_set_media_fmt {
+	struct apr_hdr  msg_hdr;
+	struct lsm_set_params_hdr params_hdr;
+	struct lsm_param_media_fmt media_fmt;
+} __packed;
+
+
 struct lsm_client *q6lsm_client_alloc(lsm_app_cb cb, void *priv);
 void q6lsm_client_free(struct lsm_client *client);
 int q6lsm_open(struct lsm_client *client, uint16_t app_id);
@@ -274,8 +328,11 @@
 int q6lsm_lab_buffer_alloc(struct lsm_client *client, bool alloc);
 int q6lsm_set_one_param(struct lsm_client *client,
 			struct lsm_params_info *p_info, void *data,
-			enum LSM_PARAM_TYPE param_type);
+			uint32_t param_type);
 void q6lsm_sm_set_param_data(struct lsm_client *client,
 		struct lsm_params_info *p_info,
 		size_t *offset);
+int q6lsm_set_port_connected(struct lsm_client *client);
+int q6lsm_set_fwk_mode_cfg(struct lsm_client *client, uint32_t event_mode);
+int q6lsm_set_media_fmt_params(struct lsm_client *client);
 #endif /* __Q6LSM_H__ */
diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h
index d7cd961..693fceb 100644
--- a/include/trace/events/cma.h
+++ b/include/trace/events/cma.h
@@ -7,7 +7,7 @@
 #include <linux/types.h>
 #include <linux/tracepoint.h>
 
-TRACE_EVENT(cma_alloc,
+DECLARE_EVENT_CLASS(cma_alloc_class,
 
 	TP_PROTO(unsigned long pfn, const struct page *page,
 		 unsigned int count, unsigned int align),
@@ -60,6 +60,44 @@
 		  __entry->count)
 );
 
+TRACE_EVENT(cma_alloc_start,
+
+	TP_PROTO(unsigned int count, unsigned int align),
+
+	TP_ARGS(count, align),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, count)
+		__field(unsigned int, align)
+	),
+
+	TP_fast_assign(
+		__entry->count = count;
+		__entry->align = align;
+	),
+
+	TP_printk("count=%u align=%u",
+		  __entry->count,
+		  __entry->align)
+);
+
+DEFINE_EVENT(cma_alloc_class, cma_alloc,
+
+	TP_PROTO(unsigned long pfn, const struct page *page,
+		 unsigned int count, unsigned int align),
+
+	TP_ARGS(pfn, page, count, align)
+);
+
+DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry,
+
+	TP_PROTO(unsigned long pfn, const struct page *page,
+		 unsigned int count, unsigned int align),
+
+	TP_ARGS(pfn, page, count, align)
+);
+
+
 #endif /* _TRACE_CMA_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/cpufreq_interactive.h b/include/trace/events/cpufreq_interactive.h
index faecc0b..61992e6 100644
--- a/include/trace/events/cpufreq_interactive.h
+++ b/include/trace/events/cpufreq_interactive.h
@@ -8,102 +8,138 @@
 
 DECLARE_EVENT_CLASS(set,
 	TP_PROTO(u32 cpu_id, unsigned long targfreq,
-		 unsigned long actualfreq),
+	         unsigned long actualfreq),
 	TP_ARGS(cpu_id, targfreq, actualfreq),
 
 	TP_STRUCT__entry(
-		__field(u32, cpu_id)
-		__field(unsigned long, targfreq)
-		__field(unsigned long, actualfreq)
-	),
+	    __field(          u32, cpu_id    )
+	    __field(unsigned long, targfreq   )
+	    __field(unsigned long, actualfreq )
+	   ),
 
 	TP_fast_assign(
-		__entry->cpu_id = (u32)cpu_id;
-		__entry->targfreq = targfreq;
-		__entry->actualfreq = actualfreq;
+	    __entry->cpu_id = (u32) cpu_id;
+	    __entry->targfreq = targfreq;
+	    __entry->actualfreq = actualfreq;
 	),
 
 	TP_printk("cpu=%u targ=%lu actual=%lu",
-		__entry->cpu_id, __entry->targfreq,
-		__entry->actualfreq)
+	      __entry->cpu_id, __entry->targfreq,
+	      __entry->actualfreq)
 );
 
 DEFINE_EVENT(set, cpufreq_interactive_setspeed,
 	TP_PROTO(u32 cpu_id, unsigned long targfreq,
-		 unsigned long actualfreq),
+	     unsigned long actualfreq),
 	TP_ARGS(cpu_id, targfreq, actualfreq)
 );
 
 DECLARE_EVENT_CLASS(loadeval,
-	TP_PROTO(unsigned long cpu_id, unsigned long load,
-		 unsigned long curtarg, unsigned long curactual,
-		 unsigned long newtarg),
-	TP_ARGS(cpu_id, load, curtarg, curactual, newtarg),
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+		    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg),
 
-	TP_STRUCT__entry(
-		__field(unsigned long, cpu_id)
-		__field(unsigned long, load)
-		__field(unsigned long, curtarg)
-		__field(unsigned long, curactual)
-		__field(unsigned long, newtarg)
-	),
+	    TP_STRUCT__entry(
+		    __field(unsigned long, cpu_id    )
+		    __field(unsigned long, load      )
+		    __field(unsigned long, curtarg   )
+		    __field(unsigned long, curactual )
+		    __field(unsigned long, newtarg   )
+	    ),
 
-	TP_fast_assign(
-		__entry->cpu_id = cpu_id;
-		__entry->load = load;
-		__entry->curtarg = curtarg;
-		__entry->curactual = curactual;
-		__entry->newtarg = newtarg;
-	),
+	    TP_fast_assign(
+		    __entry->cpu_id = cpu_id;
+		    __entry->load = load;
+		    __entry->curtarg = curtarg;
+		    __entry->curactual = curactual;
+		    __entry->newtarg = newtarg;
+	    ),
 
-	TP_printk("cpu=%lu load=%lu cur=%lu actual=%lu targ=%lu",
-		  __entry->cpu_id, __entry->load, __entry->curtarg,
-		  __entry->curactual, __entry->newtarg)
+	    TP_printk("cpu=%lu load=%lu cur=%lu actual=%lu targ=%lu",
+		      __entry->cpu_id, __entry->load, __entry->curtarg,
+		      __entry->curactual, __entry->newtarg)
 );
 
 DEFINE_EVENT(loadeval, cpufreq_interactive_target,
-	TP_PROTO(unsigned long cpu_id, unsigned long load,
-		 unsigned long curtarg, unsigned long curactual,
-		 unsigned long newtarg),
-	TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+	    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
 );
 
 DEFINE_EVENT(loadeval, cpufreq_interactive_already,
-	TP_PROTO(unsigned long cpu_id, unsigned long load,
-		 unsigned long curtarg, unsigned long curactual,
-		 unsigned long newtarg),
-	TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+	    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
 );
 
 DEFINE_EVENT(loadeval, cpufreq_interactive_notyet,
-	TP_PROTO(unsigned long cpu_id, unsigned long load,
-		 unsigned long curtarg, unsigned long curactual,
-		 unsigned long newtarg),
-	TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+	    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
 );
 
 TRACE_EVENT(cpufreq_interactive_boost,
-	TP_PROTO(const char *s),
-	TP_ARGS(s),
-	TP_STRUCT__entry(
-		__string(s, s)
-	),
-	TP_fast_assign(
-		__assign_str(s, s);
-	),
-	TP_printk("%s", __get_str(s))
+	    TP_PROTO(const char *s),
+	    TP_ARGS(s),
+	    TP_STRUCT__entry(
+		    __string(s, s)
+	    ),
+	    TP_fast_assign(
+		    __assign_str(s, s);
+	    ),
+	    TP_printk("%s", __get_str(s))
 );
 
 TRACE_EVENT(cpufreq_interactive_unboost,
-	TP_PROTO(const char *s),
-	TP_ARGS(s),
-	TP_STRUCT__entry(
-		__string(s, s)
-	),
-	TP_fast_assign(
-		__assign_str(s, s);
-	),
-	TP_printk("%s", __get_str(s))
+	    TP_PROTO(const char *s),
+	    TP_ARGS(s),
+	    TP_STRUCT__entry(
+		    __string(s, s)
+	    ),
+	    TP_fast_assign(
+		    __assign_str(s, s);
+	    ),
+	    TP_printk("%s", __get_str(s))
+);
+
+TRACE_EVENT(cpufreq_interactive_load_change,
+	    TP_PROTO(unsigned long cpu_id),
+	    TP_ARGS(cpu_id),
+	    TP_STRUCT__entry(
+		__field(unsigned long, cpu_id)
+	    ),
+	    TP_fast_assign(
+		__entry->cpu_id = cpu_id;
+	    ),
+	    TP_printk("re-evaluate for cpu=%lu", __entry->cpu_id)
+);
+
+TRACE_EVENT(cpufreq_interactive_cpuload,
+	    TP_PROTO(unsigned long cpu_id, unsigned int load,
+		     unsigned int new_task_pct, unsigned int prev,
+		     unsigned int predicted),
+	    TP_ARGS(cpu_id, load, new_task_pct, prev, predicted),
+	    TP_STRUCT__entry(
+		__field(unsigned long, cpu_id)
+		__field(unsigned int, load)
+		__field(unsigned int, new_task_pct)
+		__field(unsigned int, prev)
+		__field(unsigned int, predicted)
+	    ),
+	    TP_fast_assign(
+		__entry->cpu_id = cpu_id;
+		__entry->load = load;
+		__entry->new_task_pct = new_task_pct;
+		__entry->prev = prev;
+		__entry->predicted = predicted;
+	    ),
+	    TP_printk("cpu=%lu load=%u new_task_pct=%u prev=%u predicted=%u",
+		      __entry->cpu_id, __entry->load, __entry->new_task_pct,
+		      __entry->prev, __entry->predicted)
 );
 
 #endif /* _TRACE_CPUFREQ_INTERACTIVE_H */
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index 539b25a..0ee910d 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -96,6 +96,27 @@
 		__entry->dst_nid,
 		__entry->nr_pages)
 );
+
+TRACE_EVENT(mm_migrate_pages_start,
+
+	TP_PROTO(enum migrate_mode mode, int reason),
+
+	TP_ARGS(mode, reason),
+
+	TP_STRUCT__entry(
+		__field(enum migrate_mode, mode)
+		__field(int, reason)
+	),
+
+	TP_fast_assign(
+		__entry->mode	= mode;
+		__entry->reason	= reason;
+	),
+
+	TP_printk("mode=%s reason=%s",
+		__print_symbolic(__entry->mode, MIGRATE_MODE),
+		__print_symbolic(__entry->reason, MIGRATE_REASON))
+);
 #endif /* _TRACE_MIGRATE_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index ec6f815..e792405 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -179,6 +179,48 @@
 	TP_ARGS(capacity, cpu_id)
 );
 
+TRACE_EVENT(cpu_frequency_switch_start,
+
+	TP_PROTO(unsigned int start_freq, unsigned int end_freq,
+		 unsigned int cpu_id),
+
+	TP_ARGS(start_freq, end_freq, cpu_id),
+
+	TP_STRUCT__entry(
+		__field(	u32,		start_freq	)
+		__field(	u32,		end_freq	)
+		__field(	u32,		cpu_id		)
+	),
+
+	TP_fast_assign(
+		__entry->start_freq = start_freq;
+		__entry->end_freq = end_freq;
+		__entry->cpu_id = cpu_id;
+	),
+
+	TP_printk("start=%lu end=%lu cpu_id=%lu",
+		  (unsigned long)__entry->start_freq,
+		  (unsigned long)__entry->end_freq,
+		  (unsigned long)__entry->cpu_id)
+);
+
+TRACE_EVENT(cpu_frequency_switch_end,
+
+	TP_PROTO(unsigned int cpu_id),
+
+	TP_ARGS(cpu_id),
+
+	TP_STRUCT__entry(
+		__field(	u32,		cpu_id		)
+	),
+
+	TP_fast_assign(
+		__entry->cpu_id = cpu_id;
+	),
+
+	TP_printk("cpu_id=%lu", (unsigned long)__entry->cpu_id)
+);
+
 TRACE_EVENT(device_pm_callback_start,
 
 	TP_PROTO(struct device *dev, const char *pm_ops, int event),
@@ -554,6 +596,177 @@
 
 	TP_ARGS(name, type, new_value)
 );
+
+TRACE_EVENT(bw_hwmon_meas,
+
+	TP_PROTO(const char *name, unsigned long mbps,
+		 unsigned long us, int wake),
+
+	TP_ARGS(name, mbps, us, wake),
+
+	TP_STRUCT__entry(
+		__string(	name,			name	)
+		__field(	unsigned long,		mbps	)
+		__field(	unsigned long,		us	)
+		__field(	int,			wake	)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->mbps = mbps;
+		__entry->us = us;
+		__entry->wake = wake;
+	),
+
+	TP_printk("dev: %s, mbps = %lu, us = %lu, wake = %d",
+		__get_str(name),
+		__entry->mbps,
+		__entry->us,
+		__entry->wake)
+);
+
+TRACE_EVENT(bw_hwmon_update,
+
+	TP_PROTO(const char *name, unsigned long mbps, unsigned long freq,
+		 unsigned long up_thres, unsigned long down_thres),
+
+	TP_ARGS(name, mbps, freq, up_thres, down_thres),
+
+	TP_STRUCT__entry(
+		__string(	name,			name		)
+		__field(	unsigned long,		mbps		)
+		__field(	unsigned long,		freq		)
+		__field(	unsigned long,		up_thres	)
+		__field(	unsigned long,		down_thres	)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->mbps = mbps;
+		__entry->freq = freq;
+		__entry->up_thres = up_thres;
+		__entry->down_thres = down_thres;
+	),
+
+	TP_printk("dev: %s, mbps = %lu, freq = %lu, up = %lu, down = %lu",
+		__get_str(name),
+		__entry->mbps,
+		__entry->freq,
+		__entry->up_thres,
+		__entry->down_thres)
+);
+
+TRACE_EVENT(cache_hwmon_meas,
+	TP_PROTO(const char *name, unsigned long high_mrps,
+		 unsigned long med_mrps, unsigned long low_mrps,
+		 unsigned int busy_percent, unsigned int us),
+	TP_ARGS(name, high_mrps, med_mrps, low_mrps, busy_percent, us),
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(unsigned long, high_mrps)
+		__field(unsigned long, med_mrps)
+		__field(unsigned long, low_mrps)
+		__field(unsigned long, total_mrps)
+		__field(unsigned int, busy_percent)
+		__field(unsigned int, us)
+	),
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->high_mrps = high_mrps;
+		__entry->med_mrps = med_mrps;
+		__entry->low_mrps = low_mrps;
+		__entry->total_mrps = high_mrps + med_mrps + low_mrps;
+		__entry->busy_percent = busy_percent;
+		__entry->us = us;
+	),
+	TP_printk("dev=%s H=%lu M=%lu L=%lu T=%lu busy_pct=%u period=%u",
+		  __get_str(name), __entry->high_mrps, __entry->med_mrps,
+		  __entry->low_mrps, __entry->total_mrps,
+		  __entry->busy_percent, __entry->us)
+);
+
+TRACE_EVENT(cache_hwmon_update,
+	TP_PROTO(const char *name, unsigned long freq_mhz),
+	TP_ARGS(name, freq_mhz),
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(unsigned long, freq)
+	),
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->freq = freq_mhz;
+	),
+	TP_printk("dev=%s freq=%lu", __get_str(name), __entry->freq)
+);
+
+TRACE_EVENT(memlat_dev_meas,
+
+	TP_PROTO(const char *name, unsigned int dev_id, unsigned long inst,
+		 unsigned long mem, unsigned long freq, unsigned int ratio),
+
+	TP_ARGS(name, dev_id, inst, mem, freq, ratio),
+
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(unsigned int, dev_id)
+		__field(unsigned long, inst)
+		__field(unsigned long, mem)
+		__field(unsigned long, freq)
+		__field(unsigned int, ratio)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->dev_id = dev_id;
+		__entry->inst = inst;
+		__entry->mem = mem;
+		__entry->freq = freq;
+		__entry->ratio = ratio;
+	),
+
+	TP_printk("dev: %s, id=%u, inst=%lu, mem=%lu, freq=%lu, ratio=%u",
+		__get_str(name),
+		__entry->dev_id,
+		__entry->inst,
+		__entry->mem,
+		__entry->freq,
+		__entry->ratio)
+);
+
+TRACE_EVENT(memlat_dev_update,
+
+	TP_PROTO(const char *name, unsigned int dev_id, unsigned long inst,
+		 unsigned long mem, unsigned long freq, unsigned long vote),
+
+	TP_ARGS(name, dev_id, inst, mem, freq, vote),
+
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(unsigned int, dev_id)
+		__field(unsigned long, inst)
+		__field(unsigned long, mem)
+		__field(unsigned long, freq)
+		__field(unsigned long, vote)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->dev_id = dev_id;
+		__entry->inst = inst;
+		__entry->mem = mem;
+		__entry->freq = freq;
+		__entry->vote = vote;
+	),
+
+	TP_printk("dev: %s, id=%u, inst=%lu, mem=%lu, freq=%lu, vote=%lu",
+		__get_str(name),
+		__entry->dev_id,
+		__entry->inst,
+		__entry->mem,
+		__entry->freq,
+		__entry->vote)
+);
+
 #endif /* _TRACE_POWER_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/rpmh.h b/include/trace/events/rpmh.h
index 62e7216..919877d 100644
--- a/include/trace/events/rpmh.h
+++ b/include/trace/events/rpmh.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,86 +20,89 @@
 
 DECLARE_EVENT_CLASS(rpmh_ack_recvd,
 
-	TP_PROTO(int m, u32 addr, int errno),
+	TP_PROTO(const char *s, int m, u32 addr, int errno),
 
-	TP_ARGS(m, addr, errno),
+	TP_ARGS(s, m, addr, errno),
 
 	TP_STRUCT__entry(
+		__field(const char *, name)
 		__field(int, m)
 		__field(u32, addr)
 		__field(int, errno)
 	),
 
 	TP_fast_assign(
+		__entry->name = s;
 		__entry->m = m;
 		__entry->addr = addr;
 		__entry->errno = errno;
 	),
 
-	TP_printk("ack: tcs-m:%d addr: 0x%08x errno: %d",
-			__entry->m, __entry->addr, __entry->errno)
+	TP_printk("%s: ack: tcs-m:%d addr: 0x%08x errno: %d",
+			__entry->name, __entry->m, __entry->addr, __entry->errno)
 );
 
 DEFINE_EVENT(rpmh_ack_recvd, rpmh_notify_irq,
-	TP_PROTO(int m, u32 addr, int err),
-	TP_ARGS(m, addr, err)
+	TP_PROTO(const char *s, int m, u32 addr, int err),
+	TP_ARGS(s, m, addr, err)
 );
 
 DEFINE_EVENT(rpmh_ack_recvd, rpmh_notify,
-	TP_PROTO(int m, u32 addr, int err),
-	TP_ARGS(m, addr, err)
+	TP_PROTO(const char *s, int m, u32 addr, int err),
+	TP_ARGS(s, m, addr, err)
 );
 
 TRACE_EVENT(rpmh_send_msg,
 
-	TP_PROTO(void *b, int m, int n, u32 h, u32 a, u32 v, bool c),
+	TP_PROTO(const char *s, int m, int n, u32 h, u32 a, u32 v, bool c, bool t),
 
-	TP_ARGS(b, m, n, h, a, v, c),
+	TP_ARGS(s, m, n, h, a, v, c, t),
 
 	TP_STRUCT__entry(
-		__field(void *, base)
+		__field(const char*, name)
 		__field(int, m)
 		__field(int, n)
 		__field(u32, hdr)
 		__field(u32, addr)
 		__field(u32, data)
 		__field(bool, complete)
+		__field(bool, trigger)
 	),
 
 	TP_fast_assign(
-		__entry->base = b;
+		__entry->name = s;
 		__entry->m = m;
 		__entry->n = n;
 		__entry->hdr = h;
 		__entry->addr = a;
 		__entry->data = v;
 		__entry->complete = c;
+		__entry->trigger = t;
 	),
 
-	TP_printk("msg: base: 0x%p  tcs(m): %d cmd(n): %d msgid: 0x%08x addr: 0x%08x data: 0x%08x complete: %d",
-			__entry->base + (672 * __entry->m) + (20 * __entry->n),
-			__entry->m, __entry->n, __entry->hdr,
-			__entry->addr, __entry->data, __entry->complete)
+	TP_printk("%s: send-msg: tcs(m): %d cmd(n): %d msgid: 0x%08x addr: 0x%08x data: 0x%08x complete: %d trigger: %d",
+			__entry->name, __entry->m, __entry->n, __entry->hdr,
+			__entry->addr, __entry->data, __entry->complete, __entry->trigger)
 );
 
 TRACE_EVENT(rpmh_control_msg,
 
-	TP_PROTO(void *r, u32 v),
+	TP_PROTO(const char *s, u32 v),
 
-	TP_ARGS(r, v),
+	TP_ARGS(s, v),
 
 	TP_STRUCT__entry(
-		__field(void *, reg)
+		__field(const char *, name)
 		__field(u32, data)
 	),
 
 	TP_fast_assign(
-		__entry->reg = r;
+		__entry->name = s;
 		__entry->data = v;
 	),
 
-	TP_printk("ctrl-msg: reg: 0x%p data: 0x%08x",
-			__entry->reg, __entry->data)
+	TP_printk("%s: ctrl-msg: data: 0x%08x",
+			__entry->name, __entry->data)
 );
 
 #endif /* _TRACE_RPMH_H */
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 1fa3215..7846ec8 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -257,6 +257,14 @@
  */
 #define DRM_FORMAT_MOD_QCOM_TIGHT	fourcc_mod_code(QCOM, 0x4)
 
+/*
+ * QTI Tile Format
+ *
+ * Refers to a tile variant of the base format.
+ * Implementation may be platform and base-format specific.
+ */
+#define DRM_FORMAT_MOD_QCOM_TILE	fourcc_mod_code(QCOM, 0x8)
+
 #if defined(__cplusplus)
 }
 #endif
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 94d7fcb..fda50e9 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -244,6 +244,63 @@
 	__u32 retained;       /* out, whether backing store still exists */
 };
 
+/* HDR WRGB x and y index */
+#define DISPLAY_PRIMARIES_WX 0
+#define DISPLAY_PRIMARIES_WY 1
+#define DISPLAY_PRIMARIES_RX 2
+#define DISPLAY_PRIMARIES_RY 3
+#define DISPLAY_PRIMARIES_GX 4
+#define DISPLAY_PRIMARIES_GY 5
+#define DISPLAY_PRIMARIES_BX 6
+#define DISPLAY_PRIMARIES_BY 7
+#define DISPLAY_PRIMARIES_MAX 8
+
+struct drm_panel_hdr_properties {
+	__u32 hdr_enabled;
+
+	/* WRGB X and y values arrayed in format */
+	/* [WX, WY, RX, RY, GX, GY, BX, BY] */
+	__u32 display_primaries[DISPLAY_PRIMARIES_MAX];
+
+	/* peak brightness supported by panel */
+	__u32 peak_brightness;
+	/* Blackness level supported by panel */
+	__u32 blackness_level;
+};
+
+/**
+ * struct drm_msm_event_req - Payload to event enable/disable ioctls.
+ * @object_id: DRM object id. e.g.: for crtc pass crtc id.
+ * @object_type: DRM object type. e.g.: for crtc set it to DRM_MODE_OBJECT_CRTC.
+ * @event: Event for which notification is being enabled/disabled.
+ *         e.g.: for Histogram set - DRM_EVENT_HISTOGRAM.
+ * @client_context: Opaque pointer that will be returned during event response
+ *                  notification.
+ * @index: Object index(e.g.: crtc index), optional for user-space to set.
+ *         Driver will override value based on object_id and object_type.
+ */
+struct drm_msm_event_req {
+	__u32 object_id;
+	__u32 object_type;
+	__u32 event;
+	__u64 client_context;
+	__u32 index;
+};
+
+/**
+ * struct drm_msm_event_resp - payload returned when read is called for
+ *                            custom notifications.
+ * @base: Event type and length of complete notification payload.
+ * @info: Contains information about DRM that which raised this event.
+ * @data: Custom payload that driver returns for event type.
+ *        size of data = base.length - (sizeof(base) + sizeof(info))
+ */
+struct drm_msm_event_resp {
+	struct drm_event base;
+	struct drm_msm_event_req info;
+	__u8 data[];
+};
+
 #define DRM_MSM_GET_PARAM              0x00
 /* placeholder:
 #define DRM_MSM_SET_PARAM              0x01
@@ -255,8 +312,14 @@
 #define DRM_MSM_GEM_SUBMIT             0x06
 #define DRM_MSM_WAIT_FENCE             0x07
 #define DRM_MSM_GEM_MADVISE            0x08
-#define DRM_SDE_WB_CONFIG              0x08
-#define DRM_MSM_NUM_IOCTLS             0x09
+
+#define DRM_SDE_WB_CONFIG              0x40
+#define DRM_MSM_REGISTER_EVENT         0x41
+#define DRM_MSM_DEREGISTER_EVENT       0x42
+
+/* sde custom events */
+#define DRM_EVENT_HISTOGRAM 0x80000000
+#define DRM_EVENT_AD_BACKLIGHT 0x80000001
 
 #define DRM_IOCTL_MSM_GET_PARAM        DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
 #define DRM_IOCTL_MSM_GEM_NEW          DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
@@ -268,6 +331,10 @@
 #define DRM_IOCTL_MSM_GEM_MADVISE      DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
 #define DRM_IOCTL_SDE_WB_CONFIG \
 	DRM_IOW((DRM_COMMAND_BASE + DRM_SDE_WB_CONFIG), struct sde_drm_wb_cfg)
+#define DRM_IOCTL_MSM_REGISTER_EVENT   DRM_IOW((DRM_COMMAND_BASE + \
+			DRM_MSM_REGISTER_EVENT), struct drm_msm_event_req)
+#define DRM_IOCTL_MSM_DEREGISTER_EVENT DRM_IOW((DRM_COMMAND_BASE + \
+			DRM_MSM_DEREGISTER_EVENT), struct drm_msm_event_req)
 
 #if defined(__cplusplus)
 }
diff --git a/include/uapi/drm/msm_drm_pp.h b/include/uapi/drm/msm_drm_pp.h
index 943940e..e809c03 100644
--- a/include/uapi/drm/msm_drm_pp.h
+++ b/include/uapi/drm/msm_drm_pp.h
@@ -134,4 +134,151 @@
 	__u32 c1[PGC_TBL_LEN];
 	__u32 c2[PGC_TBL_LEN];
 };
+
+#define AD4_LUT_GRP0_SIZE 33
+#define AD4_LUT_GRP1_SIZE 32
+/*
+ * struct drm_msm_ad4_init - ad4 init structure set by user-space client.
+ *                           Init param values can change based on tuning
+ *                           hence it is passed by user-space clients.
+ */
+struct drm_msm_ad4_init {
+	__u32 init_param_001[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_002[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_003[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_004[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_005[AD4_LUT_GRP1_SIZE];
+	__u32 init_param_006[AD4_LUT_GRP1_SIZE];
+	__u32 init_param_007[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_008[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_009;
+	__u32 init_param_010;
+	__u32 init_param_011;
+	__u32 init_param_012;
+	__u32 init_param_013;
+	__u32 init_param_014;
+	__u32 init_param_015;
+	__u32 init_param_016;
+	__u32 init_param_017;
+	__u32 init_param_018;
+	__u32 init_param_019;
+	__u32 init_param_020;
+	__u32 init_param_021;
+	__u32 init_param_022;
+	__u32 init_param_023;
+	__u32 init_param_024;
+	__u32 init_param_025;
+	__u32 init_param_026;
+	__u32 init_param_027;
+	__u32 init_param_028;
+	__u32 init_param_029;
+	__u32 init_param_030;
+	__u32 init_param_031;
+	__u32 init_param_032;
+	__u32 init_param_033;
+	__u32 init_param_034;
+	__u32 init_param_035;
+	__u32 init_param_036;
+	__u32 init_param_037;
+	__u32 init_param_038;
+	__u32 init_param_039;
+	__u32 init_param_040;
+	__u32 init_param_041;
+	__u32 init_param_042;
+	__u32 init_param_043;
+	__u32 init_param_044;
+	__u32 init_param_045;
+	__u32 init_param_046;
+	__u32 init_param_047;
+	__u32 init_param_048;
+	__u32 init_param_049;
+	__u32 init_param_050;
+	__u32 init_param_051;
+	__u32 init_param_052;
+	__u32 init_param_053;
+	__u32 init_param_054;
+	__u32 init_param_055;
+	__u32 init_param_056;
+	__u32 init_param_057;
+	__u32 init_param_058;
+	__u32 init_param_059;
+	__u32 init_param_060;
+	__u32 init_param_061;
+	__u32 init_param_062;
+	__u32 init_param_063;
+	__u32 init_param_064;
+	__u32 init_param_065;
+	__u32 init_param_066;
+	__u32 init_param_067;
+	__u32 init_param_068;
+	__u32 init_param_069;
+	__u32 init_param_070;
+	__u32 init_param_071;
+	__u32 init_param_072;
+	__u32 init_param_073;
+	__u32 init_param_074;
+	__u32 init_param_075;
+};
+
+/*
+ * struct drm_msm_ad4_cfg - ad4 config structure set by user-space client.
+ *                           Config param values can vary based on tuning,
+ *                           hence it is passed by user-space clients.
+ */
+struct drm_msm_ad4_cfg {
+	__u32 cfg_param_001;
+	__u32 cfg_param_002;
+	__u32 cfg_param_003;
+	__u32 cfg_param_004;
+	__u32 cfg_param_005;
+	__u32 cfg_param_006;
+	__u32 cfg_param_007;
+	__u32 cfg_param_008;
+	__u32 cfg_param_009;
+	__u32 cfg_param_010;
+	__u32 cfg_param_011;
+	__u32 cfg_param_012;
+	__u32 cfg_param_013;
+	__u32 cfg_param_014;
+	__u32 cfg_param_015;
+	__u32 cfg_param_016;
+	__u32 cfg_param_017;
+	__u32 cfg_param_018;
+	__u32 cfg_param_019;
+	__u32 cfg_param_020;
+	__u32 cfg_param_021;
+	__u32 cfg_param_022;
+	__u32 cfg_param_023;
+	__u32 cfg_param_024;
+	__u32 cfg_param_025;
+	__u32 cfg_param_026;
+	__u32 cfg_param_027;
+	__u32 cfg_param_028;
+	__u32 cfg_param_029;
+	__u32 cfg_param_030;
+	__u32 cfg_param_031;
+	__u32 cfg_param_032;
+	__u32 cfg_param_033;
+	__u32 cfg_param_034;
+	__u32 cfg_param_035;
+	__u32 cfg_param_036;
+	__u32 cfg_param_037;
+	__u32 cfg_param_038;
+	__u32 cfg_param_039;
+	__u32 cfg_param_040;
+	__u32 cfg_param_041;
+	__u32 cfg_param_042;
+	__u32 cfg_param_043;
+	__u32 cfg_param_044;
+	__u32 cfg_param_045;
+	__u32 cfg_param_046;
+	__u32 cfg_param_047;
+	__u32 cfg_param_048;
+	__u32 cfg_param_049;
+	__u32 cfg_param_050;
+	__u32 cfg_param_051;
+	__u32 cfg_param_052;
+	__u32 cfg_param_053;
+};
+
 #endif /* _MSM_DRM_PP_H_ */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 888762e..33ba430 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -265,6 +265,7 @@
 header-y += map_to_7segment.h
 header-y += matroxfb.h
 header-y += mdio.h
+header-y += mdss_rotator.h
 header-y += media.h
 header-y += media-bus-format.h
 header-y += mei.h
@@ -306,6 +307,8 @@
 header-y += msm_ion.h
 header-y += msm_ipc.h
 header-y += msm_kgsl.h
+header-y += msm_mdp.h
+header-y += msm_mdp_ext.h
 header-y += msm_rmnet.h
 header-y += mtio.h
 header-y += nbd.h
@@ -425,6 +428,7 @@
 header-y += snmp.h
 header-y += sock_diag.h
 header-y += socket.h
+header-y += sockev.h
 header-y += sockios.h
 header-y += sonet.h
 header-y += sonypi.h
@@ -517,3 +521,4 @@
 header-y += rmnet_ipa_fd_ioctl.h
 header-y += msm_dsps.h
 header-y += msm-core-interface.h
+header-y += msm_rotator.h
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 51f891f..7668b57 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -132,6 +132,7 @@
 
 /* struct binder_fd_array_object - object describing an array of fds in a buffer
  * @hdr:		common header structure
+ * @pad:		padding to ensure correct alignment
  * @num_fds:		number of file descriptors in the buffer
  * @parent:		index in offset array to buffer holding the fd array
  * @parent_offset:	start offset of fd array in the buffer
@@ -152,6 +153,7 @@
  */
 struct binder_fd_array_object {
 	struct binder_object_header	hdr;
+	__u32				pad;
 	binder_size_t			num_fds;
 	binder_size_t			parent;
 	binder_size_t			parent_offset;
diff --git a/include/uapi/linux/esoc_ctrl.h b/include/uapi/linux/esoc_ctrl.h
index 1b17e1c..4201c95 100644
--- a/include/uapi/linux/esoc_ctrl.h
+++ b/include/uapi/linux/esoc_ctrl.h
@@ -5,11 +5,11 @@
 
 #define ESOC_CODE		0xCC
 
-#define ESOC_CMD_EXE		_IOW(ESOC_CODE, 1, __u32)
-#define ESOC_WAIT_FOR_REQ	_IOR(ESOC_CODE, 2, __u32)
-#define ESOC_NOTIFY		_IOW(ESOC_CODE, 3, __u32)
-#define ESOC_GET_STATUS		_IOR(ESOC_CODE, 4, __u32)
-#define ESOC_WAIT_FOR_CRASH	_IOR(ESOC_CODE, 6, __u32)
+#define ESOC_CMD_EXE		_IOW(ESOC_CODE, 1, unsigned int)
+#define ESOC_WAIT_FOR_REQ	_IOR(ESOC_CODE, 2, unsigned int)
+#define ESOC_NOTIFY		_IOW(ESOC_CODE, 3, unsigned int)
+#define ESOC_GET_STATUS		_IOR(ESOC_CODE, 4, unsigned int)
+#define ESOC_WAIT_FOR_CRASH	_IOR(ESOC_CODE, 6, unsigned int)
 #define ESOC_REG_REQ_ENG	_IO(ESOC_CODE, 7)
 #define ESOC_REG_CMD_ENG	_IO(ESOC_CODE, 8)
 
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 1049c78..c462f1d 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -179,6 +179,12 @@
 	DEVCONF_DROP_UNSOLICITED_NA,
 	DEVCONF_KEEP_ADDR_ON_DOWN,
 	DEVCONF_RTR_SOLICIT_MAX_INTERVAL,
+	DEVCONF_SEG6_ENABLED,
+	DEVCONF_SEG6_REQUIRE_HMAC,
+	DEVCONF_ENHANCED_DAD,
+	DEVCONF_ADDR_GEN_MODE,
+	DEVCONF_DISABLE_POLICY,
+	DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN,
 	DEVCONF_MAX
 };
 
diff --git a/include/uapi/linux/mdss_rotator.h b/include/uapi/linux/mdss_rotator.h
new file mode 100644
index 0000000..167e1426
--- /dev/null
+++ b/include/uapi/linux/mdss_rotator.h
@@ -0,0 +1,144 @@
+#ifndef _UAPI_MDSS_ROTATOR_H_
+#define _UAPI_MDSS_ROTATOR_H_
+
+#include <linux/msm_mdp_ext.h>
+
+#define MDSS_ROTATOR_IOCTL_MAGIC 'w'
+
+/* open a rotation session */
+#define MDSS_ROTATION_OPEN \
+	_IOWR(MDSS_ROTATOR_IOCTL_MAGIC, 1, struct mdp_rotation_config *)
+
+/* change the rotation session configuration */
+#define MDSS_ROTATION_CONFIG \
+	_IOWR(MDSS_ROTATOR_IOCTL_MAGIC, 2, struct mdp_rotation_config *)
+
+/* queue the rotation request */
+#define MDSS_ROTATION_REQUEST \
+	_IOWR(MDSS_ROTATOR_IOCTL_MAGIC, 3, struct mdp_rotation_request *)
+
+/* close a rotation session with the specified rotation session ID */
+#define MDSS_ROTATION_CLOSE	_IOW(MDSS_ROTATOR_IOCTL_MAGIC, 4, unsigned int)
+
+/*
+ * Rotation request flag
+ */
+/* no rotation flag, i.e. color space conversion */
+#define MDP_ROTATION_NOP	0x01
+
+/* left/right flip */
+#define MDP_ROTATION_FLIP_LR	0x02
+
+/* up/down flip */
+#define MDP_ROTATION_FLIP_UD	0x04
+
+/* rotate 90 degree */
+#define MDP_ROTATION_90		0x08
+
+/* rotate 180 degre */
+#define MDP_ROTATION_180	(MDP_ROTATION_FLIP_LR | MDP_ROTATION_FLIP_UD)
+
+/* rotate 270 degree */
+#define MDP_ROTATION_270	(MDP_ROTATION_90 | MDP_ROTATION_180)
+
+/* format is interlaced */
+#define MDP_ROTATION_DEINTERLACE 0x10
+
+/* enable bwc */
+#define MDP_ROTATION_BWC_EN	0x40
+
+/* secure data */
+#define MDP_ROTATION_SECURE	0x80
+
+/*
+ * Rotation commit flag
+ */
+/* Flag indicates to validate the rotation request */
+#define MDSS_ROTATION_REQUEST_VALIDATE	0x01
+
+#define MDP_ROTATION_REQUEST_VERSION_1_0	0x00010000
+
+/*
+ * Client can let driver to allocate the hardware resources with
+ * this particular hw resource id.
+ */
+#define MDSS_ROTATION_HW_ANY	0xFFFFFFFF
+
+/*
+ * Configuration Structures
+ */
+struct mdp_rotation_buf_info {
+	uint32_t width;
+	uint32_t height;
+	uint32_t format;
+	struct mult_factor comp_ratio;
+};
+
+struct mdp_rotation_config {
+	uint32_t	version;
+	uint32_t	session_id;
+	struct mdp_rotation_buf_info	input;
+	struct mdp_rotation_buf_info	output;
+	uint32_t	frame_rate;
+	uint32_t	flags;
+	uint32_t	reserved[6];
+};
+
+struct mdp_rotation_item {
+	/* rotation request flag */
+	uint32_t	flags;
+
+	/* Source crop rectangle */
+	struct mdp_rect	src_rect;
+
+	/* Destination rectangle */
+	struct mdp_rect	dst_rect;
+
+	/* Input buffer for the request */
+	struct mdp_layer_buffer	input;
+
+	/* The output buffer for the request */
+	struct mdp_layer_buffer	output;
+
+	/*
+	 * DMA pipe selection for this request by client:
+	 * 0: DMA pipe 0
+	 * 1: DMA pipe 1
+	 * or MDSS_ROTATION_HW_ANY if client wants
+	 * driver to allocate any that is available
+	 */
+	uint32_t	pipe_idx;
+
+	/*
+	 * Write-back block selection for this request by client:
+	 * 0: Write-back block 0
+	 * 1: Write-back block 1
+	 * or MDSS_ROTATION_HW_ANY if client wants
+	 * driver to allocate any that is available
+	 */
+	uint32_t	wb_idx;
+
+	/* Which session ID is this request scheduled on */
+	uint32_t	session_id;
+
+	/* 32bits reserved value for future usage */
+	uint32_t	reserved[6];
+};
+
+struct mdp_rotation_request {
+	/* 32bit version indicates the request structure */
+	uint32_t	version;
+
+	uint32_t	flags;
+
+	/* Number of rotation request items in the list */
+	uint32_t	count;
+
+	/* Pointer to a list of rotation request items */
+	struct mdp_rotation_item __user	*list;
+
+	/* 32bits reserved value for future usage*/
+	uint32_t	reserved[6];
+};
+
+#endif /*_UAPI_MDSS_ROTATOR_H_*/
diff --git a/include/uapi/linux/msm_audio_calibration.h b/include/uapi/linux/msm_audio_calibration.h
index 11af32e..5a0b860 100644
--- a/include/uapi/linux/msm_audio_calibration.h
+++ b/include/uapi/linux/msm_audio_calibration.h
@@ -98,12 +98,15 @@
 	ULP_LSM_TOPOLOGY_ID_CAL_TYPE,
 	AFE_FB_SPKR_PROT_TH_VI_CAL_TYPE,
 	AFE_FB_SPKR_PROT_EX_VI_CAL_TYPE,
+	AFE_SIDETONE_IIR_CAL_TYPE,
 	MAX_CAL_TYPES,
 };
 
 #define AFE_FB_SPKR_PROT_TH_VI_CAL_TYPE AFE_FB_SPKR_PROT_TH_VI_CAL_TYPE
 #define AFE_FB_SPKR_PROT_EX_VI_CAL_TYPE AFE_FB_SPKR_PROT_EX_VI_CAL_TYPE
 
+#define AFE_SIDETONE_IIR_CAL_TYPE AFE_SIDETONE_IIR_CAL_TYPE
+
 enum {
 	VERSION_0_0,
 };
@@ -346,6 +349,19 @@
 	int32_t		pid;
 };
 
+#define MAX_SIDETONE_IIR_DATA_SIZE   224
+#define MAX_NO_IIR_FILTER_STAGE      10
+
+struct audio_cal_info_sidetone_iir {
+	uint16_t	iir_enable;
+	uint16_t	num_biquad_stages;
+	uint16_t	pregain;
+	int32_t	        tx_acdb_id;
+	int32_t	        rx_acdb_id;
+	int32_t	        mid;
+	int32_t	        pid;
+	uint8_t	        iir_config[MAX_SIDETONE_IIR_DATA_SIZE];
+};
 struct audio_cal_info_lsm_top {
 	int32_t		topology;
 	int32_t		acdb_id;
@@ -580,6 +596,17 @@
 	struct audio_cal_type_sidetone		cal_type;
 };
 
+struct audio_cal_type_sidetone_iir {
+	struct audio_cal_type_header	   cal_hdr;
+	struct audio_cal_data		   cal_data;
+	struct audio_cal_info_sidetone_iir cal_info;
+};
+
+struct audio_cal_sidetone_iir {
+	struct audio_cal_header		   hdr;
+	struct audio_cal_type_sidetone_iir cal_type;
+};
+
 struct audio_cal_type_lsm_top {
 	struct audio_cal_type_header	cal_hdr;
 	struct audio_cal_data		cal_data;
diff --git a/include/uapi/linux/msm_kgsl.h b/include/uapi/linux/msm_kgsl.h
index 941a816..c190446 100644
--- a/include/uapi/linux/msm_kgsl.h
+++ b/include/uapi/linux/msm_kgsl.h
@@ -327,6 +327,7 @@
 #define KGSL_PROP_DEVICE_QDSS_STM	0x19
 #define KGSL_PROP_MIN_ACCESS_LENGTH	0x1A
 #define KGSL_PROP_UBWC_MODE		0x1B
+#define KGSL_PROP_DEVICE_QTIMER		0x20
 
 struct kgsl_shadowprop {
 	unsigned long gpuaddr;
@@ -339,6 +340,11 @@
 	uint64_t size;
 };
 
+struct kgsl_qtimer_prop {
+	uint64_t gpuaddr;
+	uint64_t size;
+};
+
 struct kgsl_version {
 	unsigned int drv_major;
 	unsigned int drv_minor;
diff --git a/include/uapi/linux/msm_mdp.h b/include/uapi/linux/msm_mdp.h
new file mode 100644
index 0000000..73f4938
--- /dev/null
+++ b/include/uapi/linux/msm_mdp.h
@@ -0,0 +1,1461 @@
+#ifndef _UAPI_MSM_MDP_H_
+#define _UAPI_MSM_MDP_H_
+
+#ifndef __KERNEL__
+#include <stdint.h>
+#else
+#include <linux/types.h>
+#endif
+#include <linux/fb.h>
+
+#define MSMFB_IOCTL_MAGIC 'm'
+#define MSMFB_GRP_DISP          _IOW(MSMFB_IOCTL_MAGIC, 1, unsigned int)
+#define MSMFB_BLIT              _IOW(MSMFB_IOCTL_MAGIC, 2, unsigned int)
+#define MSMFB_SUSPEND_SW_REFRESHER _IOW(MSMFB_IOCTL_MAGIC, 128, unsigned int)
+#define MSMFB_RESUME_SW_REFRESHER _IOW(MSMFB_IOCTL_MAGIC, 129, unsigned int)
+#define MSMFB_CURSOR _IOW(MSMFB_IOCTL_MAGIC, 130, struct fb_cursor)
+#define MSMFB_SET_LUT _IOW(MSMFB_IOCTL_MAGIC, 131, struct fb_cmap)
+#define MSMFB_HISTOGRAM _IOWR(MSMFB_IOCTL_MAGIC, 132, struct mdp_histogram_data)
+/* new ioctls's for set/get ccs matrix */
+#define MSMFB_GET_CCS_MATRIX  _IOWR(MSMFB_IOCTL_MAGIC, 133, struct mdp_ccs)
+#define MSMFB_SET_CCS_MATRIX  _IOW(MSMFB_IOCTL_MAGIC, 134, struct mdp_ccs)
+#define MSMFB_OVERLAY_SET       _IOWR(MSMFB_IOCTL_MAGIC, 135, \
+						struct mdp_overlay)
+#define MSMFB_OVERLAY_UNSET     _IOW(MSMFB_IOCTL_MAGIC, 136, unsigned int)
+
+#define MSMFB_OVERLAY_PLAY      _IOW(MSMFB_IOCTL_MAGIC, 137, \
+						struct msmfb_overlay_data)
+#define MSMFB_OVERLAY_QUEUE	MSMFB_OVERLAY_PLAY
+
+#define MSMFB_GET_PAGE_PROTECTION _IOR(MSMFB_IOCTL_MAGIC, 138, \
+					struct mdp_page_protection)
+#define MSMFB_SET_PAGE_PROTECTION _IOW(MSMFB_IOCTL_MAGIC, 139, \
+					struct mdp_page_protection)
+#define MSMFB_OVERLAY_GET      _IOR(MSMFB_IOCTL_MAGIC, 140, \
+						struct mdp_overlay)
+#define MSMFB_OVERLAY_PLAY_ENABLE     _IOW(MSMFB_IOCTL_MAGIC, 141, unsigned int)
+#define MSMFB_OVERLAY_BLT       _IOWR(MSMFB_IOCTL_MAGIC, 142, \
+						struct msmfb_overlay_blt)
+#define MSMFB_OVERLAY_BLT_OFFSET     _IOW(MSMFB_IOCTL_MAGIC, 143, unsigned int)
+#define MSMFB_HISTOGRAM_START	_IOR(MSMFB_IOCTL_MAGIC, 144, \
+						struct mdp_histogram_start_req)
+#define MSMFB_HISTOGRAM_STOP	_IOR(MSMFB_IOCTL_MAGIC, 145, unsigned int)
+#define MSMFB_NOTIFY_UPDATE	_IOWR(MSMFB_IOCTL_MAGIC, 146, unsigned int)
+
+#define MSMFB_OVERLAY_3D       _IOWR(MSMFB_IOCTL_MAGIC, 147, \
+						struct msmfb_overlay_3d)
+
+#define MSMFB_MIXER_INFO       _IOWR(MSMFB_IOCTL_MAGIC, 148, \
+						struct msmfb_mixer_info_req)
+#define MSMFB_OVERLAY_PLAY_WAIT _IOWR(MSMFB_IOCTL_MAGIC, 149, \
+						struct msmfb_overlay_data)
+#define MSMFB_WRITEBACK_INIT _IO(MSMFB_IOCTL_MAGIC, 150)
+#define MSMFB_WRITEBACK_START _IO(MSMFB_IOCTL_MAGIC, 151)
+#define MSMFB_WRITEBACK_STOP _IO(MSMFB_IOCTL_MAGIC, 152)
+#define MSMFB_WRITEBACK_QUEUE_BUFFER _IOW(MSMFB_IOCTL_MAGIC, 153, \
+						struct msmfb_data)
+#define MSMFB_WRITEBACK_DEQUEUE_BUFFER _IOW(MSMFB_IOCTL_MAGIC, 154, \
+						struct msmfb_data)
+#define MSMFB_WRITEBACK_TERMINATE _IO(MSMFB_IOCTL_MAGIC, 155)
+#define MSMFB_MDP_PP _IOWR(MSMFB_IOCTL_MAGIC, 156, struct msmfb_mdp_pp)
+#define MSMFB_OVERLAY_VSYNC_CTRL _IOW(MSMFB_IOCTL_MAGIC, 160, unsigned int)
+#define MSMFB_VSYNC_CTRL  _IOW(MSMFB_IOCTL_MAGIC, 161, unsigned int)
+#define MSMFB_BUFFER_SYNC  _IOW(MSMFB_IOCTL_MAGIC, 162, struct mdp_buf_sync)
+#define MSMFB_OVERLAY_COMMIT      _IO(MSMFB_IOCTL_MAGIC, 163)
+#define MSMFB_DISPLAY_COMMIT      _IOW(MSMFB_IOCTL_MAGIC, 164, \
+						struct mdp_display_commit)
+#define MSMFB_METADATA_SET  _IOW(MSMFB_IOCTL_MAGIC, 165, struct msmfb_metadata)
+#define MSMFB_METADATA_GET  _IOW(MSMFB_IOCTL_MAGIC, 166, struct msmfb_metadata)
+#define MSMFB_WRITEBACK_SET_MIRRORING_HINT _IOW(MSMFB_IOCTL_MAGIC, 167, \
+						unsigned int)
+#define MSMFB_ASYNC_BLIT              _IOW(MSMFB_IOCTL_MAGIC, 168, unsigned int)
+#define MSMFB_OVERLAY_PREPARE		_IOWR(MSMFB_IOCTL_MAGIC, 169, \
+						struct mdp_overlay_list)
+#define MSMFB_LPM_ENABLE	_IOWR(MSMFB_IOCTL_MAGIC, 170, unsigned int)
+#define MSMFB_MDP_PP_GET_FEATURE_VERSION _IOWR(MSMFB_IOCTL_MAGIC, 171, \
+					      struct mdp_pp_feature_version)
+
+#define FB_TYPE_3D_PANEL 0x10101010
+#define MDP_IMGTYPE2_START 0x10000
+#define MSMFB_DRIVER_VERSION	0xF9E8D701
+/* Maximum number of formats supported by MDP*/
+#define MDP_IMGTYPE_END 0x100
+
+/* HW Revisions for different MDSS targets */
+#define MDSS_GET_MAJOR(rev)		((rev) >> 28)
+#define MDSS_GET_MINOR(rev)		(((rev) >> 16) & 0xFFF)
+#define MDSS_GET_STEP(rev)		((rev) & 0xFFFF)
+#define MDSS_GET_MAJOR_MINOR(rev)	((rev) >> 16)
+
+#define IS_MDSS_MAJOR_MINOR_SAME(rev1, rev2)	\
+	(MDSS_GET_MAJOR_MINOR((rev1)) == MDSS_GET_MAJOR_MINOR((rev2)))
+
+#define MDSS_MDP_REV(major, minor, step)	\
+	((((major) & 0x000F) << 28) |		\
+	 (((minor) & 0x0FFF) << 16) |		\
+	 ((step)   & 0xFFFF))
+
+#define MDSS_MDP_HW_REV_100	MDSS_MDP_REV(1, 0, 0) /* 8974 v1.0 */
+#define MDSS_MDP_HW_REV_101	MDSS_MDP_REV(1, 1, 0) /* 8x26 v1.0 */
+#define MDSS_MDP_HW_REV_101_1	MDSS_MDP_REV(1, 1, 1) /* 8x26 v2.0, 8926 v1.0 */
+#define MDSS_MDP_HW_REV_101_2	MDSS_MDP_REV(1, 1, 2) /* 8926 v2.0 */
+#define MDSS_MDP_HW_REV_102	MDSS_MDP_REV(1, 2, 0) /* 8974 v2.0 */
+#define MDSS_MDP_HW_REV_102_1	MDSS_MDP_REV(1, 2, 1) /* 8974 v3.0 (Pro) */
+#define MDSS_MDP_HW_REV_103	MDSS_MDP_REV(1, 3, 0) /* 8084 v1.0 */
+#define MDSS_MDP_HW_REV_103_1	MDSS_MDP_REV(1, 3, 1) /* 8084 v1.1 */
+#define MDSS_MDP_HW_REV_105	MDSS_MDP_REV(1, 5, 0) /* 8994 v1.0 */
+#define MDSS_MDP_HW_REV_106	MDSS_MDP_REV(1, 6, 0) /* 8916 v1.0 */
+#define MDSS_MDP_HW_REV_107	MDSS_MDP_REV(1, 7, 0) /* 8996 v1 */
+#define MDSS_MDP_HW_REV_107_1	MDSS_MDP_REV(1, 7, 1) /* 8996 v2 */
+#define MDSS_MDP_HW_REV_107_2	MDSS_MDP_REV(1, 7, 2) /* 8996 v3 */
+#define MDSS_MDP_HW_REV_108	MDSS_MDP_REV(1, 8, 0) /* 8939 v1.0 */
+#define MDSS_MDP_HW_REV_109	MDSS_MDP_REV(1, 9, 0) /* 8994 v2.0 */
+#define MDSS_MDP_HW_REV_110	MDSS_MDP_REV(1, 10, 0) /* 8992 v1.0 */
+#define MDSS_MDP_HW_REV_200	MDSS_MDP_REV(2, 0, 0) /* 8092 v1.0 */
+#define MDSS_MDP_HW_REV_112	MDSS_MDP_REV(1, 12, 0) /* 8952 v1.0 */
+#define MDSS_MDP_HW_REV_114	MDSS_MDP_REV(1, 14, 0) /* 8937 v1.0 */
+#define MDSS_MDP_HW_REV_115	MDSS_MDP_REV(1, 15, 0) /* msmgold */
+#define MDSS_MDP_HW_REV_116	MDSS_MDP_REV(1, 16, 0) /* msmtitanium */
+#define MDSS_MDP_HW_REV_300	MDSS_MDP_REV(3, 0, 0)  /* msmcobalt */
+#define MDSS_MDP_HW_REV_301	MDSS_MDP_REV(3, 0, 1)  /* msmcobalt v1.0 */
+
+enum {
+	NOTIFY_UPDATE_INIT,
+	NOTIFY_UPDATE_DEINIT,
+	NOTIFY_UPDATE_START,
+	NOTIFY_UPDATE_STOP,
+	NOTIFY_UPDATE_POWER_OFF,
+};
+
+enum {
+	NOTIFY_TYPE_NO_UPDATE,
+	NOTIFY_TYPE_SUSPEND,
+	NOTIFY_TYPE_UPDATE,
+	NOTIFY_TYPE_BL_UPDATE,
+	NOTIFY_TYPE_BL_AD_ATTEN_UPDATE,
+};
+
+enum {
+	MDP_RGB_565,      /* RGB 565 planer */
+	MDP_XRGB_8888,    /* RGB 888 padded */
+	MDP_Y_CBCR_H2V2,  /* Y and CbCr, pseudo planer w/ Cb is in MSB */
+	MDP_Y_CBCR_H2V2_ADRENO,
+	MDP_ARGB_8888,    /* ARGB 888 */
+	MDP_RGB_888,      /* RGB 888 planer */
+	MDP_Y_CRCB_H2V2,  /* Y and CrCb, pseudo planer w/ Cr is in MSB */
+	MDP_YCRYCB_H2V1,  /* YCrYCb interleave */
+	MDP_CBYCRY_H2V1,  /* CbYCrY interleave */
+	MDP_Y_CRCB_H2V1,  /* Y and CrCb, pseduo planer w/ Cr is in MSB */
+	MDP_Y_CBCR_H2V1,   /* Y and CrCb, pseduo planer w/ Cr is in MSB */
+	MDP_Y_CRCB_H1V2,
+	MDP_Y_CBCR_H1V2,
+	MDP_RGBA_8888,    /* ARGB 888 */
+	MDP_BGRA_8888,	  /* ABGR 888 */
+	MDP_RGBX_8888,	  /* RGBX 888 */
+	MDP_Y_CRCB_H2V2_TILE,  /* Y and CrCb, pseudo planer tile */
+	MDP_Y_CBCR_H2V2_TILE,  /* Y and CbCr, pseudo planer tile */
+	MDP_Y_CR_CB_H2V2,  /* Y, Cr and Cb, planar */
+	MDP_Y_CR_CB_GH2V2,  /* Y, Cr and Cb, planar aligned to Android YV12 */
+	MDP_Y_CB_CR_H2V2,  /* Y, Cb and Cr, planar */
+	MDP_Y_CRCB_H1V1,  /* Y and CrCb, pseduo planer w/ Cr is in MSB */
+	MDP_Y_CBCR_H1V1,  /* Y and CbCr, pseduo planer w/ Cb is in MSB */
+	MDP_YCRCB_H1V1,   /* YCrCb interleave */
+	MDP_YCBCR_H1V1,   /* YCbCr interleave */
+	MDP_BGR_565,      /* BGR 565 planer */
+	MDP_BGR_888,      /* BGR 888 */
+	MDP_Y_CBCR_H2V2_VENUS,
+	MDP_BGRX_8888,   /* BGRX 8888 */
+	MDP_RGBA_8888_TILE,	  /* RGBA 8888 in tile format */
+	MDP_ARGB_8888_TILE,	  /* ARGB 8888 in tile format */
+	MDP_ABGR_8888_TILE,	  /* ABGR 8888 in tile format */
+	MDP_BGRA_8888_TILE,	  /* BGRA 8888 in tile format */
+	MDP_RGBX_8888_TILE,	  /* RGBX 8888 in tile format */
+	MDP_XRGB_8888_TILE,	  /* XRGB 8888 in tile format */
+	MDP_XBGR_8888_TILE,	  /* XBGR 8888 in tile format */
+	MDP_BGRX_8888_TILE,	  /* BGRX 8888 in tile format */
+	MDP_YCBYCR_H2V1,  /* YCbYCr interleave */
+	MDP_RGB_565_TILE,	  /* RGB 565 in tile format */
+	MDP_BGR_565_TILE,	  /* BGR 565 in tile format */
+	MDP_ARGB_1555,	/*ARGB 1555*/
+	MDP_RGBA_5551,	/*RGBA 5551*/
+	MDP_ARGB_4444,	/*ARGB 4444*/
+	MDP_RGBA_4444,	/*RGBA 4444*/
+	MDP_RGB_565_UBWC,
+	MDP_RGBA_8888_UBWC,
+	MDP_Y_CBCR_H2V2_UBWC,
+	MDP_RGBX_8888_UBWC,
+	MDP_Y_CRCB_H2V2_VENUS,
+	MDP_IMGTYPE_LIMIT,
+	MDP_RGB_BORDERFILL,	/* border fill pipe */
+	MDP_XRGB_1555,
+	MDP_RGBX_5551,
+	MDP_XRGB_4444,
+	MDP_RGBX_4444,
+	MDP_ABGR_1555,
+	MDP_BGRA_5551,
+	MDP_XBGR_1555,
+	MDP_BGRX_5551,
+	MDP_ABGR_4444,
+	MDP_BGRA_4444,
+	MDP_XBGR_4444,
+	MDP_BGRX_4444,
+	MDP_ABGR_8888,
+	MDP_XBGR_8888,
+	MDP_RGBA_1010102,
+	MDP_ARGB_2101010,
+	MDP_RGBX_1010102,
+	MDP_XRGB_2101010,
+	MDP_BGRA_1010102,
+	MDP_ABGR_2101010,
+	MDP_BGRX_1010102,
+	MDP_XBGR_2101010,
+	MDP_RGBA_1010102_UBWC,
+	MDP_RGBX_1010102_UBWC,
+	MDP_Y_CBCR_H2V2_P010,
+	MDP_Y_CBCR_H2V2_TP10_UBWC,
+	MDP_CRYCBY_H2V1,  /* CrYCbY interleave */
+	MDP_IMGTYPE_LIMIT1 = MDP_IMGTYPE_END,
+	MDP_FB_FORMAT = MDP_IMGTYPE2_START,    /* framebuffer format */
+	MDP_IMGTYPE_LIMIT2 /* Non valid image type after this enum */
+};
+
+#define MDP_CRYCBY_H2V1 MDP_CRYCBY_H2V1
+
+enum {
+	PMEM_IMG,
+	FB_IMG,
+};
+
+enum {
+	HSIC_HUE = 0,
+	HSIC_SAT,
+	HSIC_INT,
+	HSIC_CON,
+	NUM_HSIC_PARAM,
+};
+
+enum mdss_mdp_max_bw_mode {
+	MDSS_MAX_BW_LIMIT_DEFAULT = 0x1,
+	MDSS_MAX_BW_LIMIT_CAMERA = 0x2,
+	MDSS_MAX_BW_LIMIT_HFLIP = 0x4,
+	MDSS_MAX_BW_LIMIT_VFLIP = 0x8,
+};
+
+#define MDSS_MDP_ROT_ONLY		0x80
+#define MDSS_MDP_RIGHT_MIXER		0x100
+#define MDSS_MDP_DUAL_PIPE		0x200
+
+/* mdp_blit_req flag values */
+#define MDP_ROT_NOP 0
+#define MDP_FLIP_LR 0x1
+#define MDP_FLIP_UD 0x2
+#define MDP_ROT_90 0x4
+#define MDP_ROT_180 (MDP_FLIP_UD|MDP_FLIP_LR)
+#define MDP_ROT_270 (MDP_ROT_90|MDP_FLIP_UD|MDP_FLIP_LR)
+#define MDP_DITHER 0x8
+#define MDP_BLUR 0x10
+#define MDP_BLEND_FG_PREMULT 0x20000
+#define MDP_IS_FG 0x40000
+#define MDP_SOLID_FILL 0x00000020
+#define MDP_VPU_PIPE 0x00000040
+#define MDP_DEINTERLACE 0x80000000
+#define MDP_SHARPENING  0x40000000
+#define MDP_NO_DMA_BARRIER_START	0x20000000
+#define MDP_NO_DMA_BARRIER_END		0x10000000
+#define MDP_NO_BLIT			0x08000000
+#define MDP_BLIT_WITH_DMA_BARRIERS	0x000
+#define MDP_BLIT_WITH_NO_DMA_BARRIERS    \
+	(MDP_NO_DMA_BARRIER_START | MDP_NO_DMA_BARRIER_END)
+#define MDP_BLIT_SRC_GEM                0x04000000
+#define MDP_BLIT_DST_GEM                0x02000000
+#define MDP_BLIT_NON_CACHED		0x01000000
+#define MDP_OV_PIPE_SHARE		0x00800000
+#define MDP_DEINTERLACE_ODD		0x00400000
+#define MDP_OV_PLAY_NOWAIT		0x00200000
+#define MDP_SOURCE_ROTATED_90		0x00100000
+#define MDP_OVERLAY_PP_CFG_EN		0x00080000
+#define MDP_BACKEND_COMPOSITION		0x00040000
+#define MDP_BORDERFILL_SUPPORTED	0x00010000
+#define MDP_SECURE_OVERLAY_SESSION      0x00008000
+#define MDP_SECURE_DISPLAY_OVERLAY_SESSION	0x00002000
+#define MDP_OV_PIPE_FORCE_DMA		0x00004000
+#define MDP_MEMORY_ID_TYPE_FB		0x00001000
+#define MDP_BWC_EN			0x00000400
+#define MDP_DECIMATION_EN		0x00000800
+#define MDP_SMP_FORCE_ALLOC		0x00200000
+#define MDP_TRANSP_NOP 0xffffffff
+#define MDP_ALPHA_NOP 0xff
+
+#define MDP_FB_PAGE_PROTECTION_NONCACHED         (0)
+#define MDP_FB_PAGE_PROTECTION_WRITECOMBINE      (1)
+#define MDP_FB_PAGE_PROTECTION_WRITETHROUGHCACHE (2)
+#define MDP_FB_PAGE_PROTECTION_WRITEBACKCACHE    (3)
+#define MDP_FB_PAGE_PROTECTION_WRITEBACKWACACHE  (4)
+/* Sentinel: Don't use! */
+#define MDP_FB_PAGE_PROTECTION_INVALID           (5)
+/* Count of the number of MDP_FB_PAGE_PROTECTION_... values. */
+#define MDP_NUM_FB_PAGE_PROTECTION_VALUES        (5)
+
+#define MDP_DEEP_COLOR_YUV444    0x1
+#define MDP_DEEP_COLOR_RGB30B    0x2
+#define MDP_DEEP_COLOR_RGB36B    0x4
+#define MDP_DEEP_COLOR_RGB48B    0x8
+
+struct mdp_rect {
+	uint32_t x;
+	uint32_t y;
+	uint32_t w;
+	uint32_t h;
+};
+
+struct mdp_img {
+	uint32_t width;
+	uint32_t height;
+	uint32_t format;
+	uint32_t offset;
+	int memory_id;		/* the file descriptor */
+	uint32_t priv;
+};
+
+struct mult_factor {
+	uint32_t numer;
+	uint32_t denom;
+};
+
+/*
+ * {3x3} + {3} ccs matrix
+ */
+
+#define MDP_CCS_RGB2YUV	0
+#define MDP_CCS_YUV2RGB	1
+
+#define MDP_CCS_SIZE	9
+#define MDP_BV_SIZE	3
+
+struct mdp_ccs {
+	int direction;			/* MDP_CCS_RGB2YUV or YUV2RGB */
+	uint16_t ccs[MDP_CCS_SIZE];	/* 3x3 color coefficients */
+	uint16_t bv[MDP_BV_SIZE];	/* 1x3 bias vector */
+};
+
+struct mdp_csc {
+	int id;
+	uint32_t csc_mv[9];
+	uint32_t csc_pre_bv[3];
+	uint32_t csc_post_bv[3];
+	uint32_t csc_pre_lv[6];
+	uint32_t csc_post_lv[6];
+};
+
+/* The version of the mdp_blit_req structure so that
+ * user applications can selectively decide which functionality
+ * to include
+ */
+
+#define MDP_BLIT_REQ_VERSION 3
+
+struct color {
+	uint32_t r;
+	uint32_t g;
+	uint32_t b;
+	uint32_t alpha;
+};
+
+struct mdp_blit_req {
+	struct mdp_img src;
+	struct mdp_img dst;
+	struct mdp_rect src_rect;
+	struct mdp_rect dst_rect;
+	struct color const_color;
+	uint32_t alpha;
+	uint32_t transp_mask;
+	uint32_t flags;
+	int sharpening_strength;  /* -127 <--> 127, default 64 */
+	uint8_t color_space;
+	uint32_t fps;
+};
+
+struct mdp_blit_req_list {
+	uint32_t count;
+	struct mdp_blit_req req[];
+};
+
+#define MSMFB_DATA_VERSION 2
+
+struct msmfb_data {
+	uint32_t offset;
+	int memory_id;
+	int id;
+	uint32_t flags;
+	uint32_t priv;
+	uint32_t iova;
+};
+
+#define MSMFB_NEW_REQUEST -1
+
+struct msmfb_overlay_data {
+	uint32_t id;
+	struct msmfb_data data;
+	uint32_t version_key;
+	struct msmfb_data plane1_data;
+	struct msmfb_data plane2_data;
+	struct msmfb_data dst_data;
+};
+
+struct msmfb_img {
+	uint32_t width;
+	uint32_t height;
+	uint32_t format;
+};
+
+#define MSMFB_WRITEBACK_DEQUEUE_BLOCKING 0x1
+struct msmfb_writeback_data {
+	struct msmfb_data buf_info;
+	struct msmfb_img img;
+};
+
+#define MDP_PP_OPS_ENABLE 0x1
+#define MDP_PP_OPS_READ 0x2
+#define MDP_PP_OPS_WRITE 0x4
+#define MDP_PP_OPS_DISABLE 0x8
+#define MDP_PP_IGC_FLAG_ROM0	0x10
+#define MDP_PP_IGC_FLAG_ROM1	0x20
+
+
+#define MDSS_PP_DSPP_CFG	0x000
+#define MDSS_PP_SSPP_CFG	0x100
+#define MDSS_PP_LM_CFG	0x200
+#define MDSS_PP_WB_CFG	0x300
+
+#define MDSS_PP_ARG_MASK	0x3C00
+#define MDSS_PP_ARG_NUM		4
+#define MDSS_PP_ARG_SHIFT	10
+#define MDSS_PP_LOCATION_MASK	0x0300
+#define MDSS_PP_LOGICAL_MASK	0x00FF
+
+#define MDSS_PP_ADD_ARG(var, arg) ((var) | (0x1 << (MDSS_PP_ARG_SHIFT + (arg))))
+#define PP_ARG(x, var) ((var) & (0x1 << (MDSS_PP_ARG_SHIFT + (x))))
+#define PP_LOCAT(var) ((var) & MDSS_PP_LOCATION_MASK)
+#define PP_BLOCK(var) ((var) & MDSS_PP_LOGICAL_MASK)
+
+
+struct mdp_qseed_cfg {
+	uint32_t table_num;
+	uint32_t ops;
+	uint32_t len;
+	uint32_t *data;
+};
+
+struct mdp_sharp_cfg {
+	uint32_t flags;
+	uint32_t strength;
+	uint32_t edge_thr;
+	uint32_t smooth_thr;
+	uint32_t noise_thr;
+};
+
+struct mdp_qseed_cfg_data {
+	uint32_t block;
+	struct mdp_qseed_cfg qseed_data;
+};
+
+#define MDP_OVERLAY_PP_CSC_CFG         0x1
+#define MDP_OVERLAY_PP_QSEED_CFG       0x2
+#define MDP_OVERLAY_PP_PA_CFG          0x4
+#define MDP_OVERLAY_PP_IGC_CFG         0x8
+#define MDP_OVERLAY_PP_SHARP_CFG       0x10
+#define MDP_OVERLAY_PP_HIST_CFG        0x20
+#define MDP_OVERLAY_PP_HIST_LUT_CFG    0x40
+#define MDP_OVERLAY_PP_PA_V2_CFG       0x80
+#define MDP_OVERLAY_PP_PCC_CFG	       0x100
+
+#define MDP_CSC_FLAG_ENABLE	0x1
+#define MDP_CSC_FLAG_YUV_IN	0x2
+#define MDP_CSC_FLAG_YUV_OUT	0x4
+
+#define MDP_CSC_MATRIX_COEFF_SIZE	9
+#define MDP_CSC_CLAMP_SIZE		6
+#define MDP_CSC_BIAS_SIZE		3
+
+struct mdp_csc_cfg {
+	/* flags for enable CSC, toggling RGB,YUV input/output */
+	uint32_t flags;
+	uint32_t csc_mv[MDP_CSC_MATRIX_COEFF_SIZE];
+	uint32_t csc_pre_bv[MDP_CSC_BIAS_SIZE];
+	uint32_t csc_post_bv[MDP_CSC_BIAS_SIZE];
+	uint32_t csc_pre_lv[MDP_CSC_CLAMP_SIZE];
+	uint32_t csc_post_lv[MDP_CSC_CLAMP_SIZE];
+};
+
+struct mdp_csc_cfg_data {
+	uint32_t block;
+	struct mdp_csc_cfg csc_data;
+};
+
+struct mdp_pa_cfg {
+	uint32_t flags;
+	uint32_t hue_adj;
+	uint32_t sat_adj;
+	uint32_t val_adj;
+	uint32_t cont_adj;
+};
+
+struct mdp_pa_mem_col_cfg {
+	uint32_t color_adjust_p0;
+	uint32_t color_adjust_p1;
+	uint32_t hue_region;
+	uint32_t sat_region;
+	uint32_t val_region;
+};
+
+#define MDP_SIX_ZONE_LUT_SIZE		384
+
+/* PA Write/Read extension flags */
+#define MDP_PP_PA_HUE_ENABLE		0x10
+#define MDP_PP_PA_SAT_ENABLE		0x20
+#define MDP_PP_PA_VAL_ENABLE		0x40
+#define MDP_PP_PA_CONT_ENABLE		0x80
+#define MDP_PP_PA_SIX_ZONE_ENABLE	0x100
+#define MDP_PP_PA_SKIN_ENABLE		0x200
+#define MDP_PP_PA_SKY_ENABLE		0x400
+#define MDP_PP_PA_FOL_ENABLE		0x800
+
+/* PA masks */
+/* Masks used in PA v1_7 only */
+#define MDP_PP_PA_MEM_PROT_HUE_EN	0x1
+#define MDP_PP_PA_MEM_PROT_SAT_EN	0x2
+#define MDP_PP_PA_MEM_PROT_VAL_EN	0x4
+#define MDP_PP_PA_MEM_PROT_CONT_EN	0x8
+#define MDP_PP_PA_MEM_PROT_SIX_EN	0x10
+#define MDP_PP_PA_MEM_PROT_BLEND_EN	0x20
+/* Masks used in all PAv2 versions */
+#define MDP_PP_PA_HUE_MASK		0x1000
+#define MDP_PP_PA_SAT_MASK		0x2000
+#define MDP_PP_PA_VAL_MASK		0x4000
+#define MDP_PP_PA_CONT_MASK		0x8000
+#define MDP_PP_PA_SIX_ZONE_HUE_MASK	0x10000
+#define MDP_PP_PA_SIX_ZONE_SAT_MASK	0x20000
+#define MDP_PP_PA_SIX_ZONE_VAL_MASK	0x40000
+#define MDP_PP_PA_MEM_COL_SKIN_MASK	0x80000
+#define MDP_PP_PA_MEM_COL_SKY_MASK	0x100000
+#define MDP_PP_PA_MEM_COL_FOL_MASK	0x200000
+#define MDP_PP_PA_MEM_PROTECT_EN	0x400000
+#define MDP_PP_PA_SAT_ZERO_EXP_EN	0x800000
+
+/* Flags for setting PA saturation and value hold */
+#define MDP_PP_PA_LEFT_HOLD		0x1
+#define MDP_PP_PA_RIGHT_HOLD		0x2
+
+struct mdp_pa_v2_data {
+	/* Mask bits for PA features */
+	uint32_t flags;
+	uint32_t global_hue_adj;
+	uint32_t global_sat_adj;
+	uint32_t global_val_adj;
+	uint32_t global_cont_adj;
+	struct mdp_pa_mem_col_cfg skin_cfg;
+	struct mdp_pa_mem_col_cfg sky_cfg;
+	struct mdp_pa_mem_col_cfg fol_cfg;
+	uint32_t six_zone_len;
+	uint32_t six_zone_thresh;
+	uint32_t *six_zone_curve_p0;
+	uint32_t *six_zone_curve_p1;
+};
+
+struct mdp_pa_mem_col_data_v1_7 {
+	uint32_t color_adjust_p0;
+	uint32_t color_adjust_p1;
+	uint32_t color_adjust_p2;
+	uint32_t blend_gain;
+	uint8_t sat_hold;
+	uint8_t val_hold;
+	uint32_t hue_region;
+	uint32_t sat_region;
+	uint32_t val_region;
+};
+
+struct mdp_pa_data_v1_7 {
+	uint32_t mode;
+	uint32_t global_hue_adj;
+	uint32_t global_sat_adj;
+	uint32_t global_val_adj;
+	uint32_t global_cont_adj;
+	struct mdp_pa_mem_col_data_v1_7 skin_cfg;
+	struct mdp_pa_mem_col_data_v1_7 sky_cfg;
+	struct mdp_pa_mem_col_data_v1_7 fol_cfg;
+	uint32_t six_zone_thresh;
+	uint32_t six_zone_adj_p0;
+	uint32_t six_zone_adj_p1;
+	uint8_t six_zone_sat_hold;
+	uint8_t six_zone_val_hold;
+	uint32_t six_zone_len;
+	uint32_t *six_zone_curve_p0;
+	uint32_t *six_zone_curve_p1;
+};
+
+
+struct mdp_pa_v2_cfg_data {
+	uint32_t version;
+	uint32_t block;
+	uint32_t flags;
+	struct mdp_pa_v2_data pa_v2_data;
+	void *cfg_payload;
+};
+
+
+enum {
+	mdp_igc_rec601 = 1,
+	mdp_igc_rec709,
+	mdp_igc_srgb,
+	mdp_igc_custom,
+	mdp_igc_rec_max,
+};
+
+struct mdp_igc_lut_data {
+	uint32_t block;
+	uint32_t version;
+	uint32_t len, ops;
+	uint32_t *c0_c1_data;
+	uint32_t *c2_data;
+	void *cfg_payload;
+};
+
+struct mdp_igc_lut_data_v1_7 {
+	uint32_t table_fmt;
+	uint32_t len;
+	uint32_t *c0_c1_data;
+	uint32_t *c2_data;
+};
+
+struct mdp_igc_lut_data_payload {
+	uint32_t table_fmt;
+	uint32_t len;
+	uint64_t __user c0_c1_data;
+	uint64_t __user c2_data;
+	uint32_t strength;
+};
+
+struct mdp_histogram_cfg {
+	uint32_t ops;
+	uint32_t block;
+	uint8_t frame_cnt;
+	uint8_t bit_mask;
+	uint16_t num_bins;
+};
+
+struct mdp_hist_lut_data_v1_7 {
+	uint32_t len;
+	uint32_t *data;
+};
+
+struct mdp_hist_lut_data {
+	uint32_t block;
+	uint32_t version;
+	uint32_t hist_lut_first;
+	uint32_t ops;
+	uint32_t len;
+	uint32_t *data;
+	void *cfg_payload;
+};
+
+struct mdp_pcc_coeff {
+	uint32_t c, r, g, b, rr, gg, bb, rg, gb, rb, rgb_0, rgb_1;
+};
+
+struct mdp_pcc_coeff_v1_7 {
+	uint32_t c, r, g, b, rg, gb, rb, rgb;
+};
+
+struct mdp_pcc_data_v1_7 {
+	struct mdp_pcc_coeff_v1_7 r, g, b;
+};
+
+struct mdp_pcc_cfg_data {
+	uint32_t version;
+	uint32_t block;
+	uint32_t ops;
+	struct mdp_pcc_coeff r, g, b;
+	void *cfg_payload;
+};
+
+enum {
+	mdp_lut_igc,
+	mdp_lut_pgc,
+	mdp_lut_hist,
+	mdp_lut_rgb,
+	mdp_lut_max,
+};
+struct mdp_overlay_pp_params {
+	uint32_t config_ops;
+	struct mdp_csc_cfg csc_cfg;
+	struct mdp_qseed_cfg qseed_cfg[2];
+	struct mdp_pa_cfg pa_cfg;
+	struct mdp_pa_v2_data pa_v2_cfg;
+	struct mdp_igc_lut_data igc_cfg;
+	struct mdp_sharp_cfg sharp_cfg;
+	struct mdp_histogram_cfg hist_cfg;
+	struct mdp_hist_lut_data hist_lut_cfg;
+	/* PAv2 cfg data for PA 2.x versions */
+	struct mdp_pa_v2_cfg_data pa_v2_cfg_data;
+	struct mdp_pcc_cfg_data pcc_cfg_data;
+};
+
+/**
+ * enum mdss_mdp_blend_op - Different blend operations set by userspace
+ *
+ * @BLEND_OP_NOT_DEFINED:    No blend operation defined for the layer.
+ * @BLEND_OP_OPAQUE:         Apply a constant blend operation. The layer
+ *                           would appear opaque in case fg plane alpha is
+ *                           0xff.
+ * @BLEND_OP_PREMULTIPLIED:  Apply source over blend rule. Layer already has
+ *                           alpha pre-multiplication done. If fg plane alpha
+ *                           is less than 0xff, apply modulation as well. This
+ *                           operation is intended on layers having alpha
+ *                           channel.
+ * @BLEND_OP_COVERAGE:       Apply source over blend rule. Layer is not alpha
+ *                           pre-multiplied. Apply pre-multiplication. If fg
+ *                           plane alpha is less than 0xff, apply modulation as
+ *                           well.
+ * @BLEND_OP_MAX:            Used to track maximum blend operation possible by
+ *                           mdp.
+ */
+enum mdss_mdp_blend_op {
+	BLEND_OP_NOT_DEFINED = 0,
+	BLEND_OP_OPAQUE,
+	BLEND_OP_PREMULTIPLIED,
+	BLEND_OP_COVERAGE,
+	BLEND_OP_MAX,
+};
+
+#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
+#define MAX_PLANES	4
+struct mdp_scale_data {
+	uint8_t enable_pxl_ext;
+
+	int init_phase_x[MAX_PLANES];
+	int phase_step_x[MAX_PLANES];
+	int init_phase_y[MAX_PLANES];
+	int phase_step_y[MAX_PLANES];
+
+	int num_ext_pxls_left[MAX_PLANES];
+	int num_ext_pxls_right[MAX_PLANES];
+	int num_ext_pxls_top[MAX_PLANES];
+	int num_ext_pxls_btm[MAX_PLANES];
+
+	int left_ftch[MAX_PLANES];
+	int left_rpt[MAX_PLANES];
+	int right_ftch[MAX_PLANES];
+	int right_rpt[MAX_PLANES];
+
+	int top_rpt[MAX_PLANES];
+	int btm_rpt[MAX_PLANES];
+	int top_ftch[MAX_PLANES];
+	int btm_ftch[MAX_PLANES];
+
+	uint32_t roi_w[MAX_PLANES];
+};
+
+/**
+ * enum mdp_overlay_pipe_type - Different pipe type set by userspace
+ *
+ * @PIPE_TYPE_AUTO:    Not specified, pipe will be selected according to flags.
+ * @PIPE_TYPE_VIG:     VIG pipe.
+ * @PIPE_TYPE_RGB:     RGB pipe.
+ * @PIPE_TYPE_DMA:     DMA pipe.
+ * @PIPE_TYPE_CURSOR:  CURSOR pipe.
+ * @PIPE_TYPE_MAX:     Used to track maximum number of pipe type.
+ */
+enum mdp_overlay_pipe_type {
+	PIPE_TYPE_AUTO = 0,
+	PIPE_TYPE_VIG,
+	PIPE_TYPE_RGB,
+	PIPE_TYPE_DMA,
+	PIPE_TYPE_CURSOR,
+	PIPE_TYPE_MAX,
+};
+
+/**
+ * struct mdp_overlay - overlay surface structure
+ * @src:	Source image information (width, height, format).
+ * @src_rect:	Source crop rectangle, portion of image that will be fetched.
+ *		This should always be within boundaries of source image.
+ * @dst_rect:	Destination rectangle, the position and size of image on screen.
+ *		This should always be within panel boundaries.
+ * @z_order:	Blending stage to occupy in display, if multiple layers are
+ *		present, highest z_order usually means the top most visible
+ *		layer. The range acceptable is from 0-3 to support blending
+ *		up to 4 layers.
+ * @is_fg:	This flag is used to disable blending of any layers with z_order
+ *		less than this overlay. It means that any layers with z_order
+ *		less than this layer will not be blended and will be replaced
+ *		by the background border color.
+ * @alpha:	Used to set plane opacity. The range can be from 0-255, where
+ *		0 means completely transparent and 255 means fully opaque.
+ * @transp_mask: Color used as color key for transparency. Any pixel in fetched
+ *		image matching this color will be transparent when blending.
+ *		The color should be in same format as the source image format.
+ * @flags:	This is used to customize operation of overlay. See MDP flags
+ *		for more information.
+ * @pipe_type:  Used to specify the type of overlay pipe.
+ * @user_data:	DEPRECATED* Used to store user application specific information.
+ * @bg_color:	Solid color used to fill the overlay surface when no source
+ *		buffer is provided.
+ * @horz_deci:	Horizontal decimation value, this indicates the amount of pixels
+ *		dropped for each pixel that is fetched from a line. The value
+ *		given should be power of two of decimation amount.
+ *		0: no decimation
+ *		1: decimate by 2 (drop 1 pixel for each pixel fetched)
+ *		2: decimate by 4 (drop 3 pixels for each pixel fetched)
+ *		3: decimate by 8 (drop 7 pixels for each pixel fetched)
+ *		4: decimate by 16 (drop 15 pixels for each pixel fetched)
+ * @vert_deci:	Vertical decimation value, this indicates the amount of lines
+ *		dropped for each line that is fetched from overlay. The value
+ *		given should be power of two of decimation amount.
+ *		0: no decimation
+ *		1: decimation by 2 (drop 1 line for each line fetched)
+ *		2: decimation by 4 (drop 3 lines for each line fetched)
+ *		3: decimation by 8 (drop 7 lines for each line fetched)
+ *		4: decimation by 16 (drop 15 lines for each line fetched)
+ * @overlay_pp_cfg: Overlay post processing configuration, for more information
+ *		see struct mdp_overlay_pp_params.
+ * @priority:	Priority is returned by the driver when overlay is set for the
+ *		first time. It indicates the priority of the underlying pipe
+ *		serving the overlay. This priority can be used by user-space
+ *		in source split when pipes are re-used and shuffled around to
+ *		reduce fallbacks.
+ */
+struct mdp_overlay {
+	struct msmfb_img src;
+	struct mdp_rect src_rect;
+	struct mdp_rect dst_rect;
+	uint32_t z_order;	/* stage number */
+	uint32_t is_fg;		/* control alpha & transp */
+	uint32_t alpha;
+	uint32_t blend_op;
+	uint32_t transp_mask;
+	uint32_t flags;
+	uint32_t pipe_type;
+	uint32_t id;
+	uint8_t priority;
+	uint32_t user_data[6];
+	uint32_t bg_color;
+	uint8_t horz_deci;
+	uint8_t vert_deci;
+	struct mdp_overlay_pp_params overlay_pp_cfg;
+	struct mdp_scale_data scale;
+	uint8_t color_space;
+	uint32_t frame_rate;
+};
+
+struct msmfb_overlay_3d {
+	uint32_t is_3d;
+	uint32_t width;
+	uint32_t height;
+};
+
+
+struct msmfb_overlay_blt {
+	uint32_t enable;
+	uint32_t offset;
+	uint32_t width;
+	uint32_t height;
+	uint32_t bpp;
+};
+
+struct mdp_histogram {
+	uint32_t frame_cnt;
+	uint32_t bin_cnt;
+	uint32_t *r;
+	uint32_t *g;
+	uint32_t *b;
+};
+
+#define MISR_CRC_BATCH_SIZE 32
+enum {
+	DISPLAY_MISR_EDP,
+	DISPLAY_MISR_DSI0,
+	DISPLAY_MISR_DSI1,
+	DISPLAY_MISR_HDMI,
+	DISPLAY_MISR_LCDC,
+	DISPLAY_MISR_MDP,
+	DISPLAY_MISR_ATV,
+	DISPLAY_MISR_DSI_CMD,
+	DISPLAY_MISR_MAX
+};
+
+enum {
+	MISR_OP_NONE,
+	MISR_OP_SFM,
+	MISR_OP_MFM,
+	MISR_OP_BM,
+	MISR_OP_MAX
+};
+
+struct mdp_misr {
+	uint32_t block_id;
+	uint32_t frame_count;
+	uint32_t crc_op_mode;
+	uint32_t crc_value[MISR_CRC_BATCH_SIZE];
+};
+
+/*
+ * mdp_block_type defines the identifiers for pipes in MDP 4.3 and up
+ *
+ * MDP_BLOCK_RESERVED is provided for backward compatibility and is
+ * deprecated. It corresponds to DMA_P. So MDP_BLOCK_DMA_P should be used
+ * instead.
+ *
+ * MDP_LOGICAL_BLOCK_DISP_0 identifies the display pipe which fb0 uses,
+ * same for others.
+ */
+
+enum {
+	MDP_BLOCK_RESERVED = 0,
+	MDP_BLOCK_OVERLAY_0,
+	MDP_BLOCK_OVERLAY_1,
+	MDP_BLOCK_VG_1,
+	MDP_BLOCK_VG_2,
+	MDP_BLOCK_RGB_1,
+	MDP_BLOCK_RGB_2,
+	MDP_BLOCK_DMA_P,
+	MDP_BLOCK_DMA_S,
+	MDP_BLOCK_DMA_E,
+	MDP_BLOCK_OVERLAY_2,
+	MDP_LOGICAL_BLOCK_DISP_0 = 0x10,
+	MDP_LOGICAL_BLOCK_DISP_1,
+	MDP_LOGICAL_BLOCK_DISP_2,
+	MDP_BLOCK_MAX,
+};
+
+/*
+ * mdp_histogram_start_req is used to provide the parameters for
+ * histogram start request
+ */
+
+struct mdp_histogram_start_req {
+	uint32_t block;
+	uint8_t frame_cnt;
+	uint8_t bit_mask;
+	uint16_t num_bins;
+};
+
+/*
+ * mdp_histogram_data is used to return the histogram data, once
+ * the histogram is done/stopped/cance
+ */
+
+struct mdp_histogram_data {
+	uint32_t block;
+	uint32_t bin_cnt;
+	uint32_t *c0;
+	uint32_t *c1;
+	uint32_t *c2;
+	uint32_t *extra_info;
+};
+
+
+#define GC_LUT_ENTRIES_V1_7	512
+
+struct mdp_ar_gc_lut_data {
+	uint32_t x_start;
+	uint32_t slope;
+	uint32_t offset;
+};
+
+#define MDP_PP_PGC_ROUNDING_ENABLE 0x10
+struct mdp_pgc_lut_data {
+	uint32_t version;
+	uint32_t block;
+	uint32_t flags;
+	uint8_t num_r_stages;
+	uint8_t num_g_stages;
+	uint8_t num_b_stages;
+	struct mdp_ar_gc_lut_data *r_data;
+	struct mdp_ar_gc_lut_data *g_data;
+	struct mdp_ar_gc_lut_data *b_data;
+	void *cfg_payload;
+};
+
+#define PGC_LUT_ENTRIES 1024
+struct mdp_pgc_lut_data_v1_7 {
+	uint32_t  len;
+	uint32_t  *c0_data;
+	uint32_t  *c1_data;
+	uint32_t  *c2_data;
+};
+
+/*
+ * mdp_rgb_lut_data is used to provide parameters for configuring the
+ * generic RGB lut in case of gamma correction or other LUT updation usecases
+ */
+struct mdp_rgb_lut_data {
+	uint32_t flags;
+	uint32_t lut_type;
+	struct fb_cmap cmap;
+};
+
+enum {
+	mdp_rgb_lut_gc,
+	mdp_rgb_lut_hist,
+};
+
+struct mdp_lut_cfg_data {
+	uint32_t lut_type;
+	union {
+		struct mdp_igc_lut_data igc_lut_data;
+		struct mdp_pgc_lut_data pgc_lut_data;
+		struct mdp_hist_lut_data hist_lut_data;
+		struct mdp_rgb_lut_data rgb_lut_data;
+	} data;
+};
+
+struct mdp_bl_scale_data {
+	uint32_t min_lvl;
+	uint32_t scale;
+};
+
+struct mdp_pa_cfg_data {
+	uint32_t block;
+	struct mdp_pa_cfg pa_data;
+};
+
+#define MDP_DITHER_DATA_V1_7_SZ 16
+
+struct mdp_dither_data_v1_7 {
+	uint32_t g_y_depth;
+	uint32_t r_cr_depth;
+	uint32_t b_cb_depth;
+	uint32_t len;
+	uint32_t data[MDP_DITHER_DATA_V1_7_SZ];
+	uint32_t temporal_en;
+};
+
+struct mdp_pa_dither_data {
+	uint64_t data_flags;
+	uint32_t matrix_sz;
+	uint64_t __user matrix_data;
+	uint32_t strength;
+	uint32_t offset_en;
+};
+
+struct mdp_dither_cfg_data {
+	uint32_t version;
+	uint32_t block;
+	uint32_t flags;
+	uint32_t mode;
+	uint32_t g_y_depth;
+	uint32_t r_cr_depth;
+	uint32_t b_cb_depth;
+	void *cfg_payload;
+};
+
+#define MDP_GAMUT_TABLE_NUM		8
+#define MDP_GAMUT_TABLE_NUM_V1_7	4
+#define MDP_GAMUT_SCALE_OFF_TABLE_NUM	3
+#define MDP_GAMUT_TABLE_V1_7_SZ 1229
+#define MDP_GAMUT_SCALE_OFF_SZ 16
+#define MDP_GAMUT_TABLE_V1_7_COARSE_SZ 32
+
+struct mdp_gamut_cfg_data {
+	uint32_t block;
+	uint32_t flags;
+	uint32_t version;
+	/* v1 version specific params */
+	uint32_t gamut_first;
+	uint32_t tbl_size[MDP_GAMUT_TABLE_NUM];
+	uint16_t *r_tbl[MDP_GAMUT_TABLE_NUM];
+	uint16_t *g_tbl[MDP_GAMUT_TABLE_NUM];
+	uint16_t *b_tbl[MDP_GAMUT_TABLE_NUM];
+	/* params for newer versions of gamut */
+	void *cfg_payload;
+};
+
+enum {
+	mdp_gamut_fine_mode = 0x1,
+	mdp_gamut_coarse_mode,
+};
+
+struct mdp_gamut_data_v1_7 {
+	uint32_t mode;
+	uint32_t map_en;
+	uint32_t tbl_size[MDP_GAMUT_TABLE_NUM_V1_7];
+	uint32_t *c0_data[MDP_GAMUT_TABLE_NUM_V1_7];
+	uint32_t *c1_c2_data[MDP_GAMUT_TABLE_NUM_V1_7];
+	uint32_t  tbl_scale_off_sz[MDP_GAMUT_SCALE_OFF_TABLE_NUM];
+	uint32_t  *scale_off_data[MDP_GAMUT_SCALE_OFF_TABLE_NUM];
+};
+
+struct mdp_calib_config_data {
+	uint32_t ops;
+	uint32_t addr;
+	uint32_t data;
+};
+
+struct mdp_calib_config_buffer {
+	uint32_t ops;
+	uint32_t size;
+	uint32_t *buffer;
+};
+
+struct mdp_calib_dcm_state {
+	uint32_t ops;
+	uint32_t dcm_state;
+};
+
+enum {
+	DCM_UNINIT,
+	DCM_UNBLANK,
+	DCM_ENTER,
+	DCM_EXIT,
+	DCM_BLANK,
+	DTM_ENTER,
+	DTM_EXIT,
+};
+
+#define MDSS_PP_SPLIT_LEFT_ONLY		0x10000000
+#define MDSS_PP_SPLIT_RIGHT_ONLY	0x20000000
+#define MDSS_PP_SPLIT_MASK		0x30000000
+
+#define MDSS_MAX_BL_BRIGHTNESS 255
+#define AD_BL_LIN_LEN 256
+#define AD_BL_ATT_LUT_LEN 33
+
+#define MDSS_AD_MODE_AUTO_BL	0x0
+#define MDSS_AD_MODE_AUTO_STR	0x1
+#define MDSS_AD_MODE_TARG_STR	0x3
+#define MDSS_AD_MODE_MAN_STR	0x7
+#define MDSS_AD_MODE_CALIB	0xF
+
+#define MDP_PP_AD_INIT	0x10
+#define MDP_PP_AD_CFG	0x20
+
+struct mdss_ad_init {
+	uint32_t asym_lut[33];
+	uint32_t color_corr_lut[33];
+	uint8_t i_control[2];
+	uint16_t black_lvl;
+	uint16_t white_lvl;
+	uint8_t var;
+	uint8_t limit_ampl;
+	uint8_t i_dither;
+	uint8_t slope_max;
+	uint8_t slope_min;
+	uint8_t dither_ctl;
+	uint8_t format;
+	uint8_t auto_size;
+	uint16_t frame_w;
+	uint16_t frame_h;
+	uint8_t logo_v;
+	uint8_t logo_h;
+	uint32_t alpha;
+	uint32_t alpha_base;
+	uint32_t al_thresh;
+	uint32_t bl_lin_len;
+	uint32_t bl_att_len;
+	uint32_t *bl_lin;
+	uint32_t *bl_lin_inv;
+	uint32_t *bl_att_lut;
+};
+
+#define MDSS_AD_BL_CTRL_MODE_EN 1
+#define MDSS_AD_BL_CTRL_MODE_DIS 0
+struct mdss_ad_cfg {
+	uint32_t mode;
+	uint32_t al_calib_lut[33];
+	uint16_t backlight_min;
+	uint16_t backlight_max;
+	uint16_t backlight_scale;
+	uint16_t amb_light_min;
+	uint16_t filter[2];
+	uint16_t calib[4];
+	uint8_t strength_limit;
+	uint8_t t_filter_recursion;
+	uint16_t stab_itr;
+	uint32_t bl_ctrl_mode;
+};
+
+struct mdss_ad_bl_cfg {
+	uint32_t bl_min_delta;
+	uint32_t bl_low_limit;
+};
+
+/* ops uses standard MDP_PP_* flags */
+struct mdss_ad_init_cfg {
+	uint32_t ops;
+	union {
+		struct mdss_ad_init init;
+		struct mdss_ad_cfg cfg;
+	} params;
+};
+
+/* mode uses MDSS_AD_MODE_* flags */
+struct mdss_ad_input {
+	uint32_t mode;
+	union {
+		uint32_t amb_light;
+		uint32_t strength;
+		uint32_t calib_bl;
+	} in;
+	uint32_t output;
+};
+
+#define MDSS_CALIB_MODE_BL	0x1
+struct mdss_calib_cfg {
+	uint32_t ops;
+	uint32_t calib_mask;
+};
+
+enum {
+	mdp_op_pcc_cfg,
+	mdp_op_csc_cfg,
+	mdp_op_lut_cfg,
+	mdp_op_qseed_cfg,
+	mdp_bl_scale_cfg,
+	mdp_op_pa_cfg,
+	mdp_op_pa_v2_cfg,
+	mdp_op_dither_cfg,
+	mdp_op_gamut_cfg,
+	mdp_op_calib_cfg,
+	mdp_op_ad_cfg,
+	mdp_op_ad_input,
+	mdp_op_calib_mode,
+	mdp_op_calib_buffer,
+	mdp_op_calib_dcm_state,
+	mdp_op_max,
+	mdp_op_pa_dither_cfg,
+	mdp_op_ad_bl_cfg,
+	mdp_op_pp_max = 255,
+};
+#define mdp_op_pa_dither_cfg mdp_op_pa_dither_cfg
+#define mdp_op_pp_max mdp_op_pp_max
+
+#define mdp_op_ad_bl_cfg mdp_op_ad_bl_cfg
+
+enum {
+	WB_FORMAT_NV12,
+	WB_FORMAT_RGB_565,
+	WB_FORMAT_RGB_888,
+	WB_FORMAT_xRGB_8888,
+	WB_FORMAT_ARGB_8888,
+	WB_FORMAT_BGRA_8888,
+	WB_FORMAT_BGRX_8888,
+	WB_FORMAT_ARGB_8888_INPUT_ALPHA /* Need to support */
+};
+
+struct msmfb_mdp_pp {
+	uint32_t op;
+	union {
+		struct mdp_pcc_cfg_data pcc_cfg_data;
+		struct mdp_csc_cfg_data csc_cfg_data;
+		struct mdp_lut_cfg_data lut_cfg_data;
+		struct mdp_qseed_cfg_data qseed_cfg_data;
+		struct mdp_bl_scale_data bl_scale_data;
+		struct mdp_pa_cfg_data pa_cfg_data;
+		struct mdp_pa_v2_cfg_data pa_v2_cfg_data;
+		struct mdp_dither_cfg_data dither_cfg_data;
+		struct mdp_gamut_cfg_data gamut_cfg_data;
+		struct mdp_calib_config_data calib_cfg;
+		struct mdss_ad_init_cfg ad_init_cfg;
+		struct mdss_calib_cfg mdss_calib_cfg;
+		struct mdss_ad_input ad_input;
+		struct mdp_calib_config_buffer calib_buffer;
+		struct mdp_calib_dcm_state calib_dcm;
+		struct mdss_ad_bl_cfg ad_bl_cfg;
+	} data;
+};
+
+#define FB_METADATA_VIDEO_INFO_CODE_SUPPORT 1
+enum {
+	metadata_op_none,
+	metadata_op_base_blend,
+	metadata_op_frame_rate,
+	metadata_op_vic,
+	metadata_op_wb_format,
+	metadata_op_wb_secure,
+	metadata_op_get_caps,
+	metadata_op_crc,
+	metadata_op_get_ion_fd,
+	metadata_op_max
+};
+
+struct mdp_blend_cfg {
+	uint32_t is_premultiplied;
+};
+
+struct mdp_mixer_cfg {
+	uint32_t writeback_format;
+	uint32_t alpha;
+};
+
+struct mdss_hw_caps {
+	uint32_t mdp_rev;
+	uint8_t rgb_pipes;
+	uint8_t vig_pipes;
+	uint8_t dma_pipes;
+	uint8_t max_smp_cnt;
+	uint8_t smp_per_pipe;
+	uint32_t features;
+};
+
+struct msmfb_metadata {
+	uint32_t op;
+	uint32_t flags;
+	union {
+		struct mdp_misr misr_request;
+		struct mdp_blend_cfg blend_cfg;
+		struct mdp_mixer_cfg mixer_cfg;
+		uint32_t panel_frame_rate;
+		uint32_t video_info_code;
+		struct mdss_hw_caps caps;
+		uint8_t secure_en;
+		int fbmem_ionfd;
+	} data;
+};
+
+#define MDP_MAX_FENCE_FD	32
+#define MDP_BUF_SYNC_FLAG_WAIT	1
+#define MDP_BUF_SYNC_FLAG_RETIRE_FENCE	0x10
+
+struct mdp_buf_sync {
+	uint32_t flags;
+	uint32_t acq_fen_fd_cnt;
+	uint32_t session_id;
+	int *acq_fen_fd;
+	int *rel_fen_fd;
+	int *retire_fen_fd;
+};
+
+struct mdp_async_blit_req_list {
+	struct mdp_buf_sync sync;
+	uint32_t count;
+	struct mdp_blit_req req[];
+};
+
+#define MDP_DISPLAY_COMMIT_OVERLAY	1
+
+struct mdp_display_commit {
+	uint32_t flags;
+	uint32_t wait_for_finish;
+	struct fb_var_screeninfo var;
+	/*
+	 * user needs to follow guidelines as per below rules
+	 * 1. source split is enabled: l_roi = roi and r_roi = 0
+	 * 2. source split is disabled:
+	 *	2.1 split display: l_roi = l_roi and r_roi = r_roi
+	 *	2.2 non split display: l_roi = roi and r_roi = 0
+	 */
+	struct mdp_rect l_roi;
+	struct mdp_rect r_roi;
+};
+
+/**
+ * struct mdp_overlay_list - argument for ioctl MSMFB_OVERLAY_PREPARE
+ * @num_overlays:	Number of overlay layers as part of the frame.
+ * @overlay_list:	Pointer to a list of overlay structures identifying
+ *			the layers as part of the frame
+ * @flags:		Flags can be used to extend behavior.
+ * @processed_overlays:	Output parameter indicating how many pipes were
+ *			successful. If there are no errors this number should
+ *			match num_overlays. Otherwise it will indicate the last
+ *			successful index for overlay that couldn't be set.
+ */
+struct mdp_overlay_list {
+	uint32_t num_overlays;
+	struct mdp_overlay **overlay_list;
+	uint32_t flags;
+	uint32_t processed_overlays;
+};
+
+struct mdp_page_protection {
+	uint32_t page_protection;
+};
+
+
+struct mdp_mixer_info {
+	int pndx;
+	int pnum;
+	int ptype;
+	int mixer_num;
+	int z_order;
+};
+
+#define MAX_PIPE_PER_MIXER  7
+
+struct msmfb_mixer_info_req {
+	int mixer_num;
+	int cnt;
+	struct mdp_mixer_info info[MAX_PIPE_PER_MIXER];
+};
+
+enum {
+	DISPLAY_SUBSYSTEM_ID,
+	ROTATOR_SUBSYSTEM_ID,
+};
+
+enum {
+	MDP_IOMMU_DOMAIN_CP,
+	MDP_IOMMU_DOMAIN_NS,
+};
+
+enum {
+	MDP_WRITEBACK_MIRROR_OFF,
+	MDP_WRITEBACK_MIRROR_ON,
+	MDP_WRITEBACK_MIRROR_PAUSE,
+	MDP_WRITEBACK_MIRROR_RESUME,
+};
+
+enum mdp_color_space {
+	MDP_CSC_ITU_R_601,
+	MDP_CSC_ITU_R_601_FR,
+	MDP_CSC_ITU_R_709,
+};
+
+enum {
+	mdp_igc_v1_7 = 1,
+	mdp_igc_vmax,
+	mdp_hist_lut_v1_7,
+	mdp_hist_lut_vmax,
+	mdp_pgc_v1_7,
+	mdp_pgc_vmax,
+	mdp_dither_v1_7,
+	mdp_dither_vmax,
+	mdp_gamut_v1_7,
+	mdp_gamut_vmax,
+	mdp_pa_v1_7,
+	mdp_pa_vmax,
+	mdp_pcc_v1_7,
+	mdp_pcc_vmax,
+	mdp_pp_legacy,
+	mdp_dither_pa_v1_7,
+	mdp_igc_v3,
+	mdp_pp_unknown = 255
+};
+
+#define mdp_dither_pa_v1_7 mdp_dither_pa_v1_7
+#define mdp_pp_unknown mdp_pp_unknown
+#define mdp_igc_v3 mdp_igc_v3
+
+/* PP Features */
+enum {
+	IGC = 1,
+	PCC,
+	GC,
+	PA,
+	GAMUT,
+	DITHER,
+	QSEED,
+	HIST_LUT,
+	HIST,
+	PP_FEATURE_MAX,
+	PA_DITHER,
+	PP_MAX_FEATURES = 25,
+};
+
+#define PA_DITHER PA_DITHER
+#define PP_MAX_FEATURES PP_MAX_FEATURES
+
+struct mdp_pp_feature_version {
+	uint32_t pp_feature;
+	uint32_t version_info;
+};
+#endif /*_UAPI_MSM_MDP_H_*/
diff --git a/include/uapi/linux/msm_mdp_ext.h b/include/uapi/linux/msm_mdp_ext.h
new file mode 100644
index 0000000..05a105b
--- /dev/null
+++ b/include/uapi/linux/msm_mdp_ext.h
@@ -0,0 +1,688 @@
+#ifndef _MSM_MDP_EXT_H_
+#define _MSM_MDP_EXT_H_
+
+#include <linux/msm_mdp.h>
+
+#define MDP_IOCTL_MAGIC 'S'
+/* atomic commit ioctl used for validate and commit request */
+#define MSMFB_ATOMIC_COMMIT	_IOWR(MDP_IOCTL_MAGIC, 128, void *)
+
+/*
+ * Ioctl for updating the layer position asynchronously. Initially, pipes
+ * should be configured with MDP_LAYER_ASYNC flag set during the atomic commit,
+ * after which any number of position update calls can be made. This would
+ * enable multiple position updates within a single vsync. However, the screen
+ * update would happen only after vsync, which would pick the latest update.
+ *
+ * Limitations:
+ * - Currently supported only for video mode panels with single LM or dual LM
+ *   with source_split enabled.
+ * - Only position update is supported with no scaling/cropping.
+ * - Async layers should have unique z_order.
+ */
+#define MSMFB_ASYNC_POSITION_UPDATE _IOWR(MDP_IOCTL_MAGIC, 129, \
+					struct mdp_position_update)
+
+/*
+ * Ioctl for sending the config information.
+ * QSEED3 coefficeint LUT tables is passed by the user space using this IOCTL.
+ */
+#define MSMFB_MDP_SET_CFG _IOW(MDP_IOCTL_MAGIC, 130, \
+					      struct mdp_set_cfg)
+
+/*
+ * To allow proper structure padding for 64bit/32bit target
+ */
+#ifdef __LP64
+#define MDP_LAYER_COMMIT_V1_PAD 3
+#else
+#define MDP_LAYER_COMMIT_V1_PAD 4
+#endif
+
+/*
+ * LAYER FLAG CONFIGURATION
+ */
+/* left-right layer flip flag */
+#define MDP_LAYER_FLIP_LR		0x1
+
+/* up-down layer flip flag */
+#define MDP_LAYER_FLIP_UD		0x2
+
+/*
+ * This flag enables pixel extension for the current layer. Validate/commit
+ * call uses scale parameters when this flag is enabled.
+ */
+#define MDP_LAYER_ENABLE_PIXEL_EXT	0x4
+
+/* Flag indicates that layer is foreground layer */
+#define MDP_LAYER_FORGROUND		0x8
+
+/* Flag indicates that layer is associated with secure session */
+#define MDP_LAYER_SECURE_SESSION	0x10
+
+/*
+ * Flag indicates that layer is drawing solid fill. Validate/commit call
+ * does not expect buffer when this flag is enabled.
+ */
+#define MDP_LAYER_SOLID_FILL		0x20
+
+/* Layer format is deinterlace */
+#define MDP_LAYER_DEINTERLACE		0x40
+
+/* layer contains bandwidth compressed format data */
+#define MDP_LAYER_BWC			0x80
+
+/* layer is async position updatable */
+#define MDP_LAYER_ASYNC			0x100
+
+/* layer contains postprocessing configuration data */
+#define MDP_LAYER_PP			0x200
+
+/* Flag indicates that layer is associated with secure display session */
+#define MDP_LAYER_SECURE_DISPLAY_SESSION 0x400
+
+/* Flag enabled qseed3 scaling for the current layer */
+#define MDP_LAYER_ENABLE_QSEED3_SCALE   0x800
+
+/*
+ * layer will work in multirect mode, where single hardware should
+ * fetch multiple rectangles with a single hardware
+ */
+#define MDP_LAYER_MULTIRECT_ENABLE		0x1000
+
+/*
+ * if flag present and multirect is enabled, multirect will work in parallel
+ * fetch mode, otherwise it will default to serial fetch mode.
+ */
+#define MDP_LAYER_MULTIRECT_PARALLEL_MODE	0x2000
+
+/*
+ * DESTINATION SCALER FLAG CONFIGURATION
+ */
+
+/* Enable/disable Destination scaler */
+#define MDP_DESTSCALER_ENABLE		0x1
+
+/*
+ * Indicating mdp_destination_scaler_data contains
+ * Scaling parameter update. Can be set anytime.
+ */
+#define MDP_DESTSCALER_SCALE_UPDATE	0x2
+
+/*
+ * Indicating mdp_destination_scaler_data contains
+ * Detail enhancement setting update. Can be set anytime.
+ */
+#define MDP_DESTSCALER_ENHANCER_UPDATE	0x4
+
+/*
+ * VALIDATE/COMMIT FLAG CONFIGURATION
+ */
+
+/*
+ * Client enables it to inform that call is to validate layers before commit.
+ * If this flag is not set then driver will use MSMFB_ATOMIC_COMMIT for commit.
+ */
+#define MDP_VALIDATE_LAYER			0x01
+
+/*
+ * This flag is only valid for commit call. Commit behavior is synchronous
+ * when this flag is defined. It blocks current call till processing is
+ * complete. Behavior is asynchronous otherwise.
+ */
+#define MDP_COMMIT_WAIT_FOR_FINISH		0x02
+
+/*
+ * This flag is only valid for commit call and used for debugging purpose. It
+ * forces the to wait for sync fences.
+ */
+#define MDP_COMMIT_SYNC_FENCE_WAIT		0x04
+
+/* Flag to enable AVR(Adaptive variable refresh) feature. */
+#define MDP_COMMIT_AVR_EN			0x08
+
+/*
+ * Flag to select one shot mode when AVR feature is enabled.
+ * Default mode is continuous mode.
+ */
+#define MDP_COMMIT_AVR_ONE_SHOT_MODE		0x10
+
+/* Flag to enable concurrent writeback for the frame */
+#define MDP_COMMIT_CWB_EN 0x800
+
+/*
+ * Flag to select DSPP as the data point for CWB. If CWB
+ * is enabled without this flag, LM will be selected as data point.
+ */
+#define MDP_COMMIT_CWB_DSPP 0x1000
+
+#define MDP_COMMIT_VERSION_1_0		0x00010000
+
+/*
+ * Configuration structures
+ * All parameters are input to driver unless mentioned output parameter
+ * explicitly.
+ */
+struct mdp_layer_plane {
+	/* DMA buffer file descriptor information. */
+	int fd;
+
+	/* Pixel offset in the dma buffer. */
+	uint32_t offset;
+
+	/* Number of bytes in one scan line including padding bytes. */
+	uint32_t stride;
+};
+
+struct mdp_layer_buffer {
+	/* layer width in pixels. */
+	uint32_t width;
+
+	/* layer height in pixels. */
+	uint32_t height;
+
+	/*
+	 * layer format in DRM-style fourcc, refer drm_fourcc.h for
+	 * standard formats
+	 */
+	uint32_t format;
+
+	/* plane to hold the fd, offset, etc for all color components */
+	struct mdp_layer_plane planes[MAX_PLANES];
+
+	/* valid planes count in layer planes list */
+	uint32_t plane_count;
+
+	/* compression ratio factor, value depends on the pixel format */
+	struct mult_factor comp_ratio;
+
+	/*
+	 * SyncFence associated with this buffer. It is used in two ways.
+	 *
+	 * 1. Driver waits to consume the buffer till producer signals in case
+	 * of primary and external display.
+	 *
+	 * 2. Writeback device uses buffer structure for output buffer where
+	 * driver is producer. However, client sends the fence with buffer to
+	 * indicate that consumer is still using the buffer and it is not ready
+	 * for new content.
+	 */
+	int	 fence;
+
+	/* 32bits reserved value for future usage. */
+	uint32_t reserved;
+};
+
+/*
+ * One layer holds configuration for one pipe. If client wants to stage single
+ * layer on two pipes then it should send two different layers with relative
+ * (x,y) information. Client must send same information during validate and
+ * commit call. Commit call may fail if client sends different layer information
+ * attached to same pipe during validate and commit. Device invalidate the pipe
+ * once it receives the vsync for that commit.
+ */
+struct mdp_input_layer {
+	/*
+	 * Flag to enable/disable properties for layer configuration. Refer
+	 * layer flag configuration section for all possible flags.
+	 */
+	uint32_t		flags;
+
+	/*
+	 * Pipe selection for this layer by client. Client provides the index
+	 * in validate and commit call. Device reserves the pipe once validate
+	 * is successful. Device only uses validated pipe during commit call.
+	 * If client sends different layer/pipe configuration in validate &
+	 * commit then commit may fail.
+	 */
+	uint32_t		pipe_ndx;
+
+	/*
+	 * Horizontal decimation value, this indicates the amount of pixels
+	 * dropped for each pixel that is fetched from a line. It does not
+	 * result in bandwidth reduction because pixels are still fetched from
+	 * memory but dropped internally by hardware.
+	 * The decimation value given should be power of two of decimation
+	 * amount.
+	 * 0: no decimation
+	 * 1: decimate by 2 (drop 1 pixel for each pixel fetched)
+	 * 2: decimate by 4 (drop 3 pixels for each pixel fetched)
+	 * 3: decimate by 8 (drop 7 pixels for each pixel fetched)
+	 * 4: decimate by 16 (drop 15 pixels for each pixel fetched)
+	 */
+	uint8_t			horz_deci;
+
+	/*
+	 * Vertical decimation value, this indicates the amount of lines
+	 * dropped for each line that is fetched from overlay. It saves
+	 * bandwidth because decimated pixels are not fetched.
+	 * The decimation value given should be power of two of decimation
+	 * amount.
+	 * 0: no decimation
+	 * 1: decimation by 2 (drop 1 line for each line fetched)
+	 * 2: decimation by 4 (drop 3 lines for each line fetched)
+	 * 3: decimation by 8 (drop 7 lines for each line fetched)
+	 * 4: decimation by 16 (drop 15 lines for each line fetched)
+	 */
+	uint8_t			vert_deci;
+
+	/*
+	 * Used to set plane opacity. The range can be from 0-255, where
+	 * 0 means completely transparent and 255 means fully opaque.
+	 */
+	uint8_t			alpha;
+
+	/*
+	 * Blending stage to occupy in display, if multiple layers are present,
+	 * highest z_order usually means the top most visible layer. The range
+	 * acceptable is from 0-7 to support blending up to 8 layers.
+	 */
+	uint16_t		z_order;
+
+	/*
+	 * Color used as color key for transparency. Any pixel in fetched
+	 * image matching this color will be transparent when blending.
+	 * The color should be in same format as the source image format.
+	 */
+	uint32_t		transp_mask;
+
+	/*
+	 * Solid color used to fill the overlay surface when no source
+	 * buffer is provided.
+	 */
+	uint32_t		bg_color;
+
+	/* blend operation defined in "mdss_mdp_blend_op" enum. */
+	enum mdss_mdp_blend_op		blend_op;
+
+	/* color space of the source */
+	enum mdp_color_space	color_space;
+
+	/*
+	 * Source crop rectangle, portion of image that will be fetched. This
+	 * should always be within boundaries of source image.
+	 */
+	struct mdp_rect		src_rect;
+
+	/*
+	 * Destination rectangle, the position and size of image on screen.
+	 * This should always be within panel boundaries.
+	 */
+	struct mdp_rect		dst_rect;
+
+	/* Scaling parameters. */
+	void __user	*scale;
+
+	/* Buffer attached with each layer. Device uses it for commit call. */
+	struct mdp_layer_buffer	buffer;
+
+	/*
+	 * Source side post processing configuration information for each
+	 * layer.
+	 */
+	void __user		*pp_info;
+
+	/*
+	 * This is an output parameter.
+	 *
+	 * Only for validate call. Frame buffer device sets error code
+	 * based on validate call failure scenario.
+	 */
+	int			error_code;
+
+	/* 32bits reserved value for future usage. */
+	uint32_t		reserved[6];
+};
+
+struct mdp_output_layer {
+	/*
+	 * Flag to enable/disable properties for layer configuration. Refer
+	 * layer flag config section for all possible flags.
+	 */
+	uint32_t			flags;
+
+	/*
+	 * Writeback destination selection for output. Client provides the index
+	 * in validate and commit call.
+	 */
+	uint32_t			writeback_ndx;
+
+	/* Buffer attached with output layer. Device uses it for commit call */
+	struct mdp_layer_buffer		buffer;
+
+	/* 32bits reserved value for future usage. */
+	uint32_t			reserved[6];
+};
+
+/*
+ * Destination scaling info structure holds setup paramaters for upscaling
+ * setting in the destination scaling block.
+ */
+struct mdp_destination_scaler_data {
+	/*
+	 * Flag to switch between mode for destination scaler. Please Refer to
+	 * destination scaler flag config for all possible setting.
+	 */
+	uint32_t			flags;
+
+	/*
+	 * Destination scaler selection index. Client provides the index in
+	 * validate and commit call.
+	 */
+	uint32_t			dest_scaler_ndx;
+
+	/*
+	 * LM width configuration per Destination scaling updates
+	 */
+	uint32_t			lm_width;
+
+	/*
+	 * LM height configuration per Destination scaling updates
+	 */
+	uint32_t			lm_height;
+
+	/*
+	 * The scaling parameters for all the mode except disable. For
+	 * disabling the scaler, there is no need to provide the scale.
+	 * A userspace pointer points to struct mdp_scale_data_v2.
+	 */
+	uint64_t	__user scale;
+};
+
+/*
+ * Commit structure holds layer stack send by client for validate and commit
+ * call. If layers are different between validate and commit call then commit
+ * call will also do validation. In such case, commit may fail.
+ */
+struct mdp_layer_commit_v1 {
+	/*
+	 * Flag to enable/disable properties for commit/validate call. Refer
+	 * validate/commit flag config section for all possible flags.
+	 */
+	uint32_t		flags;
+
+	/*
+	 * This is an output parameter.
+	 *
+	 * Frame buffer device provides release fence handle to client. It
+	 * triggers release fence when display hardware has consumed all the
+	 * buffers attached to this commit call and buffer is ready for reuse
+	 * for primary and external. For writeback case, it triggers it when
+	 * output buffer is ready for consumer.
+	 */
+	int			release_fence;
+
+	/*
+	 * Left_roi is optional configuration. Client configures it only when
+	 * partial update is enabled. It defines the "region of interest" on
+	 * left part of panel when it is split display. For non-split display,
+	 * it defines the "region of interest" on the panel.
+	 */
+	struct mdp_rect		left_roi;
+
+	/*
+	 * Right_roi is optional configuration. Client configures it only when
+	 * partial update is enabled. It defines the "region of interest" on
+	 * right part of panel for split display configuration. It is not
+	 * required for non-split display.
+	 */
+	struct mdp_rect		right_roi;
+
+	 /* Pointer to a list of input layers for composition. */
+	struct mdp_input_layer __user *input_layers;
+
+	/* Input layer count present in input list */
+	uint32_t		input_layer_cnt;
+
+	/*
+	 * Output layer for writeback display. It supports only one
+	 * layer as output layer. This is not required for primary
+	 * and external displays
+	 */
+	struct mdp_output_layer __user *output_layer;
+
+	/*
+	 * This is an output parameter.
+	 *
+	 * Frame buffer device provides retire fence handle if
+	 * COMMIT_RETIRE_FENCE flag is set in commit call. It triggers
+	 * retire fence when current layers are swapped with new layers
+	 * on display hardware. For video mode panel and writeback,
+	 * retire fence and release fences are triggered at the same
+	 * time while command mode panel triggers release fence first
+	 * (on pingpong done) and retire fence (on rdptr done)
+	 * after that.
+	 */
+	int			retire_fence;
+
+	/*
+	 * Scaler data and control for setting up destination scaler.
+	 * A userspace pointer that points to a list of
+	 * struct mdp_destination_scaler_data.
+	 */
+	void __user		*dest_scaler;
+
+	/*
+	 * Represents number of Destination scaler data provied by userspace.
+	 */
+	uint32_t		dest_scaler_cnt;
+
+	/* 32-bits reserved value for future usage. */
+	uint32_t		reserved[MDP_LAYER_COMMIT_V1_PAD];
+};
+
+/*
+ * mdp_overlay_list - argument for ioctl MSMFB_ATOMIC_COMMIT
+ */
+struct mdp_layer_commit {
+	/*
+	 * 32bit version indicates the commit structure selection
+	 * from union. Lower 16bits indicates the minor version while
+	 * higher 16bits indicates the major version. It selects the
+	 * commit structure based on major version selection. Minor version
+	 * indicates that reserved fields are in use.
+	 *
+	 * Current supported version is 1.0 (Major:1 Minor:0)
+	 */
+	uint32_t version;
+	union {
+		/* Layer commit/validate definition for V1 */
+		struct mdp_layer_commit_v1 commit_v1;
+	};
+};
+
+struct mdp_point {
+	uint32_t x;
+	uint32_t y;
+};
+
+/*
+ * Async updatable layers. One layer holds configuration for one pipe.
+ */
+struct mdp_async_layer {
+	/*
+	 * Flag to enable/disable properties for layer configuration. Refer
+	 * layer flag config section for all possible flags.
+	 */
+	uint32_t flags;
+
+	/*
+	 * Pipe selection for this layer by client. Client provides the
+	 * pipe index that the device reserved during ATOMIC_COMMIT.
+	 */
+	uint32_t		pipe_ndx;
+
+	/* Source start x,y. */
+	struct mdp_point	src;
+
+	/* Destination start x,y. */
+	struct mdp_point	dst;
+
+	/*
+	 * This is an output parameter.
+	 *
+	 * Frame buffer device sets error code based on the failure.
+	 */
+	int			error_code;
+
+	uint32_t		reserved[3];
+};
+
+/*
+ * mdp_position_update - argument for ioctl MSMFB_ASYNC_POSITION_UPDATE
+ */
+struct mdp_position_update {
+	 /* Pointer to a list of async updatable input layers */
+	struct mdp_async_layer __user *input_layers;
+
+	/* Input layer count present in input list */
+	uint32_t input_layer_cnt;
+};
+
+#define MAX_DET_CURVES		3
+struct mdp_det_enhance_data {
+	uint32_t enable;
+	int16_t sharpen_level1;
+	int16_t sharpen_level2;
+	uint16_t clip;
+	uint16_t limit;
+	uint16_t thr_quiet;
+	uint16_t thr_dieout;
+	uint16_t thr_low;
+	uint16_t thr_high;
+	uint16_t prec_shift;
+	int16_t adjust_a[MAX_DET_CURVES];
+	int16_t adjust_b[MAX_DET_CURVES];
+	int16_t adjust_c[MAX_DET_CURVES];
+};
+
+/* Flags to enable Scaler and its sub components */
+#define ENABLE_SCALE			0x1
+#define ENABLE_DETAIL_ENHANCE		0x2
+#define ENABLE_DIRECTION_DETECTION	0x4
+
+/* LUT configuration flags */
+#define SCALER_LUT_SWAP			0x1
+#define SCALER_LUT_DIR_WR		0x2
+#define SCALER_LUT_Y_CIR_WR		0x4
+#define SCALER_LUT_UV_CIR_WR		0x8
+#define SCALER_LUT_Y_SEP_WR		0x10
+#define SCALER_LUT_UV_SEP_WR		0x20
+
+/* Y/RGB and UV filter configuration */
+#define FILTER_EDGE_DIRECTED_2D		0x0
+#define FILTER_CIRCULAR_2D		0x1
+#define FILTER_SEPARABLE_1D		0x2
+#define FILTER_BILINEAR			0x3
+
+/* Alpha filters */
+#define FILTER_ALPHA_DROP_REPEAT	0x0
+#define FILTER_ALPHA_BILINEAR		0x1
+
+/**
+ * struct mdp_scale_data_v2
+ * Driver uses this new Data structure for storing all scaling params
+ * This structure contains all pixel extension data and QSEED3 filter
+ * configuration and coefficient table indices
+ */
+struct mdp_scale_data_v2 {
+	uint32_t enable;
+
+	/* Init phase values */
+	int32_t init_phase_x[MAX_PLANES];
+	int32_t phase_step_x[MAX_PLANES];
+	int32_t init_phase_y[MAX_PLANES];
+	int32_t phase_step_y[MAX_PLANES];
+
+	/*
+	 * This should be set to toal horizontal pixels
+	 * left + right +  width
+	 */
+	uint32_t num_ext_pxls_left[MAX_PLANES];
+
+	/* Unused param for backward compatibility */
+	uint32_t num_ext_pxls_right[MAX_PLANES];
+
+	/*
+	 * This should be set to vertical pixels
+	 * top + bottom + height
+	 */
+	uint32_t num_ext_pxls_top[MAX_PLANES];
+
+	/* Unused param for backward compatibility */
+	uint32_t num_ext_pxls_btm[MAX_PLANES];
+
+	/* over fetch pixels */
+	int32_t left_ftch[MAX_PLANES];
+	int32_t left_rpt[MAX_PLANES];
+	int32_t right_ftch[MAX_PLANES];
+	int32_t right_rpt[MAX_PLANES];
+
+	/* Repeat pixels */
+	uint32_t top_rpt[MAX_PLANES];
+	uint32_t btm_rpt[MAX_PLANES];
+	uint32_t top_ftch[MAX_PLANES];
+	uint32_t btm_ftch[MAX_PLANES];
+
+	uint32_t roi_w[MAX_PLANES];
+
+	/*
+	 * alpha plane can only be scaled using bilinear or pixel
+	 * repeat/drop, specify these for Y and UV planes only
+	 */
+	uint32_t preload_x[MAX_PLANES];
+	uint32_t preload_y[MAX_PLANES];
+	uint32_t src_width[MAX_PLANES];
+	uint32_t src_height[MAX_PLANES];
+
+	uint32_t dst_width;
+	uint32_t dst_height;
+
+	uint32_t y_rgb_filter_cfg;
+	uint32_t uv_filter_cfg;
+	uint32_t alpha_filter_cfg;
+	uint32_t blend_cfg;
+
+	uint32_t lut_flag;
+	uint32_t dir_lut_idx;
+
+	/* for Y(RGB) and UV planes*/
+	uint32_t y_rgb_cir_lut_idx;
+	uint32_t uv_cir_lut_idx;
+	uint32_t y_rgb_sep_lut_idx;
+	uint32_t uv_sep_lut_idx;
+
+	struct mdp_det_enhance_data detail_enhance;
+
+	/* reserved value for future usage. */
+	uint64_t reserved[8];
+};
+
+/**
+ * struct mdp_scale_luts_info
+ * This struct pointer is received as payload in SET_CFG_IOCTL when the flags
+ * is set to MDP_QSEED3_LUT_CFG
+ * @dir_lut:      Direction detection coefficients table
+ * @cir_lut:      Circular coefficeints table
+ * @sep_lut:      Separable coefficeints table
+ * @dir_lut_size: Size of direction coefficients table
+ * @cir_lut_size: Size of circular coefficients table
+ * @sep_lut_size: Size of separable coefficients table
+ */
+struct mdp_scale_luts_info {
+	uint64_t __user dir_lut;
+	uint64_t __user cir_lut;
+	uint64_t __user sep_lut;
+	uint32_t dir_lut_size;
+	uint32_t cir_lut_size;
+	uint32_t sep_lut_size;
+};
+
+#define MDP_QSEED3_LUT_CFG 0x1
+
+struct mdp_set_cfg {
+	uint64_t flags;
+	uint32_t len;
+	uint64_t __user payload;
+};
+#endif
diff --git a/include/uapi/linux/msm_rotator.h b/include/uapi/linux/msm_rotator.h
new file mode 100644
index 0000000..e1a2ecb
--- /dev/null
+++ b/include/uapi/linux/msm_rotator.h
@@ -0,0 +1,60 @@
+#ifndef _UAPI__MSM_ROTATOR_H__
+#define _UAPI__MSM_ROTATOR_H__
+
+#include <linux/types.h>
+#include <linux/msm_mdp.h>
+
+#define MSM_ROTATOR_IOCTL_MAGIC 'R'
+
+#define MSM_ROTATOR_IOCTL_START   \
+		_IOWR(MSM_ROTATOR_IOCTL_MAGIC, 1, struct msm_rotator_img_info)
+#define MSM_ROTATOR_IOCTL_ROTATE   \
+		_IOW(MSM_ROTATOR_IOCTL_MAGIC, 2, struct msm_rotator_data_info)
+#define MSM_ROTATOR_IOCTL_FINISH   \
+		_IOW(MSM_ROTATOR_IOCTL_MAGIC, 3, int)
+
+#define ROTATOR_VERSION_01	0xA5B4C301
+
+enum rotator_clk_type {
+	ROTATOR_CORE_CLK,
+	ROTATOR_PCLK,
+	ROTATOR_IMEM_CLK
+};
+
+struct msm_rotator_img_info {
+	unsigned int session_id;
+	struct msmfb_img  src;
+	struct msmfb_img  dst;
+	struct mdp_rect src_rect;
+	unsigned int    dst_x;
+	unsigned int    dst_y;
+	unsigned char   rotations;
+	int enable;
+	unsigned int	downscale_ratio;
+	unsigned int secure;
+};
+
+struct msm_rotator_data_info {
+	int session_id;
+	struct msmfb_data src;
+	struct msmfb_data dst;
+	unsigned int version_key;
+	struct msmfb_data src_chroma;
+	struct msmfb_data dst_chroma;
+};
+
+struct msm_rot_clocks {
+	const char *clk_name;
+	enum rotator_clk_type clk_type;
+	unsigned int clk_rate;
+};
+
+struct msm_rotator_platform_data {
+	unsigned int number_of_clocks;
+	unsigned int hardware_version_number;
+	struct msm_rot_clocks *rotator_clks;
+	struct msm_bus_scale_pdata *bus_scale_table;
+	char rot_iommu_split_domain;
+};
+#endif
+
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 0dba4e4..2817ca1 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -27,7 +27,7 @@
 #define NETLINK_ECRYPTFS	19
 #define NETLINK_RDMA		20
 #define NETLINK_CRYPTO		21	/* Crypto layer */
-
+#define NETLINK_SOCKEV          22      /* Socket Administrative Events */
 #define NETLINK_INET_DIAG	NETLINK_SOCK_DIAG
 
 #define MAX_LINKS 32		
diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h
index d08c63f..0c5d5dd 100644
--- a/include/uapi/linux/packet_diag.h
+++ b/include/uapi/linux/packet_diag.h
@@ -64,7 +64,7 @@
 	__u32	pdmc_count;
 	__u16	pdmc_type;
 	__u16	pdmc_alen;
-	__u8	pdmc_addr[MAX_ADDR_LEN];
+	__u8	pdmc_addr[32]; /* MAX_ADDR_LEN */
 };
 
 struct packet_diag_ring {
diff --git a/include/uapi/linux/sockev.h b/include/uapi/linux/sockev.h
new file mode 100644
index 0000000..b274fbc
--- /dev/null
+++ b/include/uapi/linux/sockev.h
@@ -0,0 +1,31 @@
+#ifndef _SOCKEV_H_
+#define _SOCKEV_H_
+
+#include <linux/types.h>
+#include <linux/netlink.h>
+#include <linux/socket.h>
+
+enum sknetlink_groups {
+	SKNLGRP_UNICAST,
+	SKNLGRP_SOCKEV,
+	__SKNLGRP_MAX
+};
+
+#define SOCKEV_STR_MAX 32
+
+/********************************************************************
+ *		Socket operation messages
+ ****/
+
+struct sknlsockevmsg {
+	__u8 event[SOCKEV_STR_MAX];
+	__u32 pid; /* (struct task_struct*)->pid */
+	__u16 skfamily; /* (struct socket*)->sk->sk_family */
+	__u8 skstate; /* (struct socket*)->sk->sk_state */
+	__u8 skprotocol; /* (struct socket*)->sk->sk_protocol */
+	__u16 sktype; /* (struct socket*)->sk->sk_type */
+	__u64 skflags; /* (struct socket*)->sk->sk_flags */
+};
+
+#endif /* _SOCKEV_H_ */
+
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index e854785..08aa800 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -570,6 +570,7 @@
 	NET_IPV6_PROXY_NDP=23,
 	NET_IPV6_ACCEPT_SOURCE_ROUTE=25,
 	NET_IPV6_ACCEPT_RA_FROM_LOCAL=26,
+	NET_IPV6_ACCEPT_RA_RT_INFO_MIN_PLEN=27,
 	__NET_IPV6_MAX
 };
 
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 37c6c00..a62870e 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -429,6 +429,9 @@
 	V4L2_MPEG_VIDEO_H264_LEVEL_5_0	= 14,
 	V4L2_MPEG_VIDEO_H264_LEVEL_5_1	= 15,
 	V4L2_MPEG_VIDEO_H264_LEVEL_5_2	= 16,
+#define V4L2_MPEG_VIDEO_H264_LEVEL_UNKNOWN \
+	V4L2_MPEG_VIDEO_H264_LEVEL_UNKNOWN
+	V4L2_MPEG_VIDEO_H264_LEVEL_UNKNOWN = 17,
 };
 #define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA	(V4L2_CID_MPEG_BASE+360)
 #define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA	(V4L2_CID_MPEG_BASE+361)
@@ -919,6 +922,9 @@
 	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_6_1	= 23,
 	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_6_2	= 24,
 	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_6_2	= 25,
+#define V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_UNKNOWN \
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_UNKNOWN
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_UNKNOWN = 26,
 };
 
 #define V4L2_CID_MPEG_VIDC_VIDEO_HIER_B_NUM_LAYERS \
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index fd379ec..86cb858 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -539,6 +539,7 @@
 #define V4L2_PIX_FMT_NV12_UBWC        v4l2_fourcc('Q', '1', '2', '8')
 /* UBWC 10-bit Y/CbCr 4:2:0 */
 #define V4L2_PIX_FMT_NV12_TP10_UBWC   v4l2_fourcc('Q', '1', '2', 'A')
+#define V4L2_PIX_FMT_NV12_P010_UBWC   v4l2_fourcc('Q', '1', '2', 'B')
 
 /* two non contiguous planes - one Y, one Cr + Cb interleaved  */
 #define V4L2_PIX_FMT_NV12M   v4l2_fourcc('N', 'M', '1', '2') /* 12  Y/CbCr 4:2:0  */
diff --git a/include/uapi/media/Kbuild b/include/uapi/media/Kbuild
index bf859ff7..5f375c4 100644
--- a/include/uapi/media/Kbuild
+++ b/include/uapi/media/Kbuild
@@ -1,9 +1,10 @@
-header-y += cam_req_mgr.h
 header-y += cam_defs.h
 header-y += cam_isp.h
 header-y += cam_isp_vfe.h
 header-y += cam_isp_ife.h
+header-y += cam_req_mgr.h
 header-y += cam_sensor.h
+header-y += cam_sync.h
 header-y += msm_media_info.h
 header-y += msm_vidc.h
 header-y += msm_sde_rotator.h
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
index 18bd04a..3e2b24c 100644
--- a/include/uapi/media/cam_req_mgr.h
+++ b/include/uapi/media/cam_req_mgr.h
@@ -9,16 +9,17 @@
 
 #define CAM_REQ_MGR_VNODE_NAME "cam-req-mgr-devnode"
 
-#define CAM_DEVICE_TYPE_BASE    (MEDIA_ENT_F_OLD_BASE)
-#define CAM_VNODE_DEVICE_TYPE   (CAM_DEVICE_TYPE_BASE)
-#define CAM_SENSOR_DEVICE_TYPE  (CAM_DEVICE_TYPE_BASE + 1)
-#define CAM_IFE_DEVICE_TYPE     (CAM_DEVICE_TYPE_BASE + 2)
-#define CAM_ICP_DEVICE_TYPE     (CAM_DEVICE_TYPE_BASE + 3)
-#define CAM_LRME_DEVICE_TYPE    (CAM_DEVICE_TYPE_BASE + 4)
-#define CAM_JPEG_DEVICE_TYPE    (CAM_DEVICE_TYPE_BASE + 5)
-#define CAM_FD_DEVICE_TYPE      (CAM_DEVICE_TYPE_BASE + 6)
-#define CAM_CPAS_DEVICE_TYPE    (CAM_DEVICE_TYPE_BASE + 7)
-#define CAM_CSIPHY_DEVICE_TYPE  (CAM_DEVICE_TYPE_BASE + 8)
+#define CAM_DEVICE_TYPE_BASE      (MEDIA_ENT_F_OLD_BASE)
+#define CAM_VNODE_DEVICE_TYPE     (CAM_DEVICE_TYPE_BASE)
+#define CAM_SENSOR_DEVICE_TYPE    (CAM_DEVICE_TYPE_BASE + 1)
+#define CAM_IFE_DEVICE_TYPE       (CAM_DEVICE_TYPE_BASE + 2)
+#define CAM_ICP_DEVICE_TYPE       (CAM_DEVICE_TYPE_BASE + 3)
+#define CAM_LRME_DEVICE_TYPE      (CAM_DEVICE_TYPE_BASE + 4)
+#define CAM_JPEG_DEVICE_TYPE      (CAM_DEVICE_TYPE_BASE + 5)
+#define CAM_FD_DEVICE_TYPE        (CAM_DEVICE_TYPE_BASE + 6)
+#define CAM_CPAS_DEVICE_TYPE      (CAM_DEVICE_TYPE_BASE + 7)
+#define CAM_CSIPHY_DEVICE_TYPE    (CAM_DEVICE_TYPE_BASE + 8)
+#define CAM_ACTUATOR_DEVICE_TYPE  (CAM_DEVICE_TYPE_BASE + 9)
 
 /* cam_req_mgr hdl info */
 #define CAM_REQ_MGR_HDL_IDX_POS           8
diff --git a/include/uapi/media/cam_sync.h b/include/uapi/media/cam_sync.h
new file mode 100644
index 0000000..003c9ad
--- /dev/null
+++ b/include/uapi/media/cam_sync.h
@@ -0,0 +1,134 @@
+#ifndef __UAPI_CAM_SYNC_H__
+#define __UAPI_CAM_SYNC_H__
+
+#include <linux/videodev2.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/media.h>
+
+#define CAM_SYNC_DEVICE_NAME                     "cam_sync_device"
+
+/* V4L event which user space will subscribe to */
+#define CAM_SYNC_V4L_EVENT                       (V4L2_EVENT_PRIVATE_START + 0)
+
+/* Specific event ids to get notified in user space */
+#define CAM_SYNC_V4L_EVENT_ID_CB_TRIG            0
+
+/* Size of opaque payload sent to kernel for safekeeping until signal time */
+#define CAM_SYNC_USER_PAYLOAD_SIZE               2
+
+/* Device type for sync device needed for device discovery */
+#define CAM_SYNC_DEVICE_TYPE                     (MEDIA_ENT_F_OLD_BASE)
+
+#define CAM_SYNC_GET_PAYLOAD_PTR(ev, type)       \
+	(type *)((char *)ev.u.data + sizeof(struct cam_sync_ev_header))
+
+#define CAM_SYNC_GET_HEADER_PTR(ev)              \
+	((struct cam_sync_ev_header *)ev.u.data)
+
+#define CAM_SYNC_STATE_INVALID                   0
+#define CAM_SYNC_STATE_ACTIVE                    1
+#define CAM_SYNC_STATE_SIGNALED_SUCCESS          2
+#define CAM_SYNC_STATE_SIGNALED_ERROR            3
+
+/**
+ * struct cam_sync_ev_header - Event header for sync event notification
+ *
+ * @sync_obj: Sync object
+ * @status:   Status of the object
+ */
+struct cam_sync_ev_header {
+	int32_t sync_obj;
+	int32_t status;
+};
+
+/**
+ * struct cam_sync_info - Sync object creation information
+ *
+ * @name:       Optional string representation of the sync object
+ * @sync_obj:   Sync object returned after creation in kernel
+ */
+struct cam_sync_info {
+	char name[64];
+	int32_t sync_obj;
+};
+
+/**
+ * struct cam_sync_signal - Sync object signaling struct
+ *
+ * @sync_obj:   Sync object to be signaled
+ * @sync_state: State of the sync object to which it should be signaled
+ */
+struct cam_sync_signal {
+	int32_t sync_obj;
+	uint32_t sync_state;
+};
+
+/**
+ * struct cam_sync_merge - Merge information for sync objects
+ *
+ * @sync_objs:  Pointer to sync objects
+ * @num_objs:   Number of objects in the array
+ * @merged:     Merged sync object
+ */
+struct cam_sync_merge {
+	__u64 sync_objs;
+	uint32_t num_objs;
+	int32_t merged;
+};
+
+/**
+ * struct cam_sync_userpayload_info - Payload info from user space
+ *
+ * @sync_obj:   Sync object for which payload has to be registered for
+ * @reserved:   Reserved
+ * @payload:    Pointer to user payload
+ */
+struct cam_sync_userpayload_info {
+	int32_t sync_obj;
+	uint32_t reserved;
+	__u64 payload[CAM_SYNC_USER_PAYLOAD_SIZE];
+};
+
+/**
+ * struct cam_sync_wait - Sync object wait information
+ *
+ * @sync_obj:   Sync object to wait on
+ * @reserved:   Reserved
+ * @timeout_ms: Timeout in milliseconds
+ */
+struct cam_sync_wait {
+	int32_t sync_obj;
+	uint32_t reserved;
+	uint64_t timeout_ms;
+};
+
+/**
+ * struct cam_private_ioctl_arg - Sync driver ioctl argument
+ *
+ * @id:         IOCTL command id
+ * @size:       Size of command payload
+ * @result:     Result of command execution
+ * @reserved:   Reserved
+ * @ioctl_ptr:  Pointer to user data
+ */
+struct cam_private_ioctl_arg {
+	__u32 id;
+	__u32 size;
+	__u32 result;
+	__u32 reserved;
+	__user __u64 ioctl_ptr;
+};
+
+#define CAM_PRIVATE_IOCTL_CMD \
+	_IOWR('V', BASE_VIDIOC_PRIVATE, struct cam_private_ioctl_arg)
+
+#define CAM_SYNC_CREATE                          0
+#define CAM_SYNC_DESTROY                         1
+#define CAM_SYNC_SIGNAL                          2
+#define CAM_SYNC_MERGE                           3
+#define CAM_SYNC_REGISTER_PAYLOAD                4
+#define CAM_SYNC_DEREGISTER_PAYLOAD              5
+#define CAM_SYNC_WAIT                            6
+
+#endif /* __UAPI_CAM_SYNC_H__ */
diff --git a/include/uapi/media/msm_sde_rotator.h b/include/uapi/media/msm_sde_rotator.h
index 790135a..212eb26 100644
--- a/include/uapi/media/msm_sde_rotator.h
+++ b/include/uapi/media/msm_sde_rotator.h
@@ -63,6 +63,7 @@
 #define SDE_PIX_FMT_Y_CBCR_H2V2_P010	V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010
 #define SDE_PIX_FMT_Y_CBCR_H2V2_TP10	V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_TP10
 #define SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC	V4L2_PIX_FMT_NV12_TP10_UBWC
+#define SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC	V4L2_PIX_FMT_NV12_P010_UBWC
 
 /*
  * struct msm_sde_rotator_fence - v4l2 buffer fence info
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index e04ccf0..3048105 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -70,7 +70,7 @@
 	__u32 pcm_frames;
 	__u32 pcm_io_frames;
 	__u32 sampling_rate;
-	uint64_t timestamp;
+	__u64 timestamp;
 } __attribute__((packed, aligned(4)));
 
 /**
@@ -128,24 +128,46 @@
  * @reserved: reserved for furture use
  */
 struct snd_compr_audio_info {
-	uint32_t frame_size;
-	uint32_t reserved[15];
+	__u32 frame_size;
+	__u32 reserved[15];
 } __attribute__((packed, aligned(4)));
 
+#define SNDRV_COMPRESS_RENDER_MODE_AUDIO_MASTER 0
+#define SNDRV_COMPRESS_RENDER_MODE_STC_MASTER 1
+
+#define SNDRV_COMPRESS_CLK_REC_MODE_NONE 0
+#define SNDRV_COMPRESS_CLK_REC_MODE_AUTO 1
+
 /**
  * enum sndrv_compress_encoder
  * @SNDRV_COMPRESS_ENCODER_PADDING: no of samples appended by the encoder at the
  * end of the track
  * @SNDRV_COMPRESS_ENCODER_DELAY: no of samples inserted by the encoder at the
  * beginning of the track
+ * @SNDRV_COMPRESS_PATH_DELAY: dsp path delay in microseconds
+ * @SNDRV_COMPRESS_RENDER_MODE: dsp render mode (audio master or stc)
+ * @SNDRV_COMPRESS_CLK_REC_MODE: clock recovery mode ( none or auto)
+ * @SNDRV_COMPRESS_RENDER_WINDOW: render window
+ * @SNDRV_COMPRESS_START_DELAY: start delay
  */
 enum sndrv_compress_encoder {
 	SNDRV_COMPRESS_ENCODER_PADDING = 1,
 	SNDRV_COMPRESS_ENCODER_DELAY = 2,
 	SNDRV_COMPRESS_MIN_BLK_SIZE = 3,
 	SNDRV_COMPRESS_MAX_BLK_SIZE = 4,
+	SNDRV_COMPRESS_PATH_DELAY = 5,
+	SNDRV_COMPRESS_RENDER_MODE = 6,
+	SNDRV_COMPRESS_CLK_REC_MODE = 7,
+	SNDRV_COMPRESS_RENDER_WINDOW = 8,
+	SNDRV_COMPRESS_START_DELAY = 9,
 };
 
+#define SNDRV_COMPRESS_PATH_DELAY SNDRV_COMPRESS_PATH_DELAY
+#define SNDRV_COMPRESS_RENDER_MODE SNDRV_COMPRESS_RENDER_MODE
+#define SNDRV_COMPRESS_CLK_REC_MODE SNDRV_COMPRESS_CLK_REC_MODE
+#define SNDRV_COMPRESS_RENDER_WINDOW SNDRV_COMPRESS_RENDER_WINDOW
+#define SNDRV_COMPRESS_START_DELAY SNDRV_COMPRESS_START_DELAY
+
 /**
  * struct snd_compr_metadata - compressed stream metadata
  * @key: key id
diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h
index 75f61fb..09593e7 100644
--- a/include/uapi/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
@@ -104,7 +104,8 @@
 #define SND_AUDIOCODEC_ALAC                  ((__u32) 0x00000020)
 #define SND_AUDIOCODEC_APE                   ((__u32) 0x00000021)
 #define SND_AUDIOCODEC_DSD                   ((__u32) 0x00000022)
-#define SND_AUDIOCODEC_MAX                   SND_AUDIOCODEC_DSD
+#define SND_AUDIOCODEC_APTX                  ((__u32) 0x00000023)
+#define SND_AUDIOCODEC_MAX                   SND_AUDIOCODEC_APTX
 
 /*
  * Profile and modes are listed with bit masks. This allows for a
@@ -398,6 +399,12 @@
 	__u32 seek_table_present;
 };
 
+struct snd_dec_aptx {
+	__u32 lap;
+	__u32 uap;
+	__u32 nap;
+};
+
 union snd_codec_options {
 	struct snd_enc_wma wma;
 	struct snd_enc_vorbis vorbis;
@@ -409,6 +416,7 @@
 	struct snd_dec_vorbis vorbis_dec;
 	struct snd_dec_alac alac;
 	struct snd_dec_ape ape;
+	struct snd_dec_aptx aptx_dec;
 };
 
 /** struct snd_codec_desc - description of codec capabilities
diff --git a/include/uapi/sound/lsm_params.h b/include/uapi/sound/lsm_params.h
index eafdc11..9ca5930 100644
--- a/include/uapi/sound/lsm_params.h
+++ b/include/uapi/sound/lsm_params.h
@@ -1,6 +1,9 @@
 #ifndef _UAPI_LSM_PARAMS_H__
 #define _UAPI_LSM_PARAMS_H__
 
+#define LSM_POLLING_ENABLE_SUPPORT
+#define LSM_EVENT_TIMESTAMP_MODE_SUPPORT
+
 #include <linux/types.h>
 #include <sound/asound.h>
 
@@ -18,6 +21,19 @@
 #define LSM_OUT_TRANSFER_MODE_RT (0)
 #define LSM_OUT_TRANSFER_MODE_FTRT (1)
 
+#define LSM_ENDPOINT_DETECT_THRESHOLD (0)
+#define LSM_OPERATION_MODE (1)
+#define LSM_GAIN (2)
+#define LSM_MIN_CONFIDENCE_LEVELS (3)
+#define LSM_REG_SND_MODEL (4)
+#define LSM_DEREG_SND_MODEL (5)
+#define LSM_CUSTOM_PARAMS (6)
+#define LSM_POLLING_ENABLE (7)
+#define LSM_PARAMS_MAX (LSM_POLLING_ENABLE + 1)
+
+#define LSM_EVENT_NON_TIME_STAMP_MODE (0)
+#define LSM_EVENT_TIME_STAMP_MODE (1)
+
 enum lsm_app_id {
 	LSM_VOICE_WAKEUP_APP_ID = 1,
 	LSM_VOICE_WAKEUP_APP_ID_V2 = 2,
@@ -35,18 +51,6 @@
 	LSM_VOICE_WAKEUP_STATUS_REJECTED
 };
 
-enum LSM_PARAM_TYPE {
-	LSM_ENDPOINT_DETECT_THRESHOLD = 0,
-	LSM_OPERATION_MODE,
-	LSM_GAIN,
-	LSM_MIN_CONFIDENCE_LEVELS,
-	LSM_REG_SND_MODEL,
-	LSM_DEREG_SND_MODEL,
-	LSM_CUSTOM_PARAMS,
-	/* driver ioctl will parse only so many params */
-	LSM_PARAMS_MAX,
-};
-
 /*
  * Data for LSM_ENDPOINT_DETECT_THRESHOLD param_type
  * @epd_begin: Begin threshold
@@ -75,6 +79,14 @@
 	__u16 gain;
 };
 
+/*
+ * Data for LSM_POLLING_ENABLE param_type
+ * @poll_en: Polling enable or disable
+ */
+struct snd_lsm_poll_enable {
+	bool poll_en;
+};
+
 
 struct snd_lsm_sound_model_v2 {
 	__u8 __user *data;
@@ -95,11 +107,20 @@
 	__u8 payload[0];
 };
 
+struct snd_lsm_event_status_v3 {
+	__u32 timestamp_lsw;
+	__u32 timestamp_msw;
+	__u16 status;
+	__u16 payload_size;
+	__u8 payload[0];
+};
+
 struct snd_lsm_detection_params {
 	__u8 *conf_level;
 	enum lsm_detection_mode detect_mode;
 	__u8 num_confidence_levels;
 	bool detect_failure;
+	bool poll_enable;
 };
 
 /*
@@ -122,7 +143,7 @@
 	__u32 param_id;
 	__u32 param_size;
 	__u8 __user *param_data;
-	enum LSM_PARAM_TYPE param_type;
+	uint32_t param_type;
 };
 
 /*
@@ -171,5 +192,9 @@
 					struct snd_lsm_module_params)
 #define SNDRV_LSM_OUT_FORMAT_CFG _IOW('U', 0x0C, \
 				      struct snd_lsm_output_format_cfg)
+#define SNDRV_LSM_SET_PORT	_IO('U', 0x0D)
+#define SNDRV_LSM_SET_FWK_MODE_CONFIG	_IOW('U', 0x0E, uint32_t)
+#define SNDRV_LSM_EVENT_STATUS_V3	_IOW('U', 0x0F, \
+					struct snd_lsm_event_status_v3)
 
 #endif
diff --git a/include/uapi/video/Kbuild b/include/uapi/video/Kbuild
index ac7203b..b98fa51 100644
--- a/include/uapi/video/Kbuild
+++ b/include/uapi/video/Kbuild
@@ -1,4 +1,6 @@
 # UAPI Header export list
 header-y += edid.h
+header-y += msm_hdmi_hdcp_mgr.h
+header-y += msm_hdmi_modes.h
 header-y += sisfb.h
 header-y += uvesafb.h
diff --git a/include/uapi/video/msm_hdmi_hdcp_mgr.h b/include/uapi/video/msm_hdmi_hdcp_mgr.h
new file mode 100644
index 0000000..85fa918
--- /dev/null
+++ b/include/uapi/video/msm_hdmi_hdcp_mgr.h
@@ -0,0 +1,54 @@
+#ifndef _UAPI__HDMI_HDCP_MGR_H
+#define _UAPI__MSM_HDMI_HDCP_MGR_H
+
+enum DS_TYPE {  /* type of downstream device */
+	DS_UNKNOWN,
+	DS_RECEIVER,
+	DS_REPEATER,
+};
+
+enum {
+	MSG_ID_IDX,
+	RET_CODE_IDX,
+	HEADER_LEN,
+};
+
+enum RET_CODE {
+	HDCP_NOT_AUTHED,
+	HDCP_AUTHED,
+	HDCP_DISABLE,
+};
+
+enum MSG_ID { /* List of functions expected to be called after it */
+	DOWN_CHECK_TOPOLOGY,
+	UP_REQUEST_TOPOLOGY,
+	UP_SEND_TOPOLOGY,
+	DOWN_REQUEST_TOPOLOGY,
+	MSG_NUM,
+};
+
+enum SOURCE_ID {
+	HDCP_V1_TX,
+	HDCP_V1_RX,
+	HDCP_V2_RX,
+	HDCP_V2_TX,
+	SRC_NUM,
+};
+
+/*
+ * how to parse sysfs params buffer
+ * from hdcp_tx driver.
+ */
+
+struct HDCP_V2V1_MSG_TOPOLOGY {
+	/* indicates downstream's type */
+	uint32_t ds_type;
+	uint8_t bksv[5];
+	uint8_t dev_count;
+	uint8_t depth;
+	uint8_t ksv_list[5 * 127];
+	uint32_t max_cascade_exceeded;
+	uint32_t max_dev_exceeded;
+};
+
+#endif /* _UAPI__MSM_HDMI_HDCP_MGR_H */
diff --git a/include/uapi/video/msm_hdmi_modes.h b/include/uapi/video/msm_hdmi_modes.h
new file mode 100644
index 0000000..8a02997
--- /dev/null
+++ b/include/uapi/video/msm_hdmi_modes.h
@@ -0,0 +1,559 @@
+#ifndef _UAPI_MSM_HDMI_MODES_H__
+#define _UAPI_MSM_HDMI_MODES_H__
+#include <linux/types.h>
+#include <linux/errno.h>
+
+#define MSM_HDMI_RGB_888_24BPP_FORMAT       (1 << 0)
+#define MSM_HDMI_YUV_420_12BPP_FORMAT       (1 << 1)
+
+enum aspect_ratio {
+	HDMI_RES_AR_INVALID,
+	HDMI_RES_AR_4_3,
+	HDMI_RES_AR_5_4,
+	HDMI_RES_AR_16_9,
+	HDMI_RES_AR_16_10,
+	HDMI_RES_AR_64_27,
+	HDMI_RES_AR_256_135,
+	HDMI_RES_AR_MAX,
+};
+
+enum msm_hdmi_s3d_mode {
+	HDMI_S3D_NONE,
+	HDMI_S3D_SIDE_BY_SIDE,
+	HDMI_S3D_TOP_AND_BOTTOM,
+	HDMI_S3D_FRAME_PACKING,
+	HDMI_S3D_MAX,
+};
+
+struct msm_hdmi_mode_timing_info {
+	uint32_t	video_format;
+	uint32_t	active_h;
+	uint32_t	front_porch_h;
+	uint32_t	pulse_width_h;
+	uint32_t	back_porch_h;
+	uint32_t	active_low_h;
+	uint32_t	active_v;
+	uint32_t	front_porch_v;
+	uint32_t	pulse_width_v;
+	uint32_t	back_porch_v;
+	uint32_t	active_low_v;
+	/* Must divide by 1000 to get the actual frequency in MHZ */
+	uint32_t	pixel_freq;
+	/* Must divide by 1000 to get the actual frequency in HZ */
+	uint32_t	refresh_rate;
+	uint32_t	interlaced;
+	uint32_t	supported;
+	enum aspect_ratio ar;
+	/* Flags indicating support for specific pixel formats */
+	uint32_t        pixel_formats;
+};
+
+#define MSM_HDMI_INIT_RES_PAGE          1
+
+#define MSM_HDMI_MODES_CEA		(1 << 0)
+#define MSM_HDMI_MODES_XTND		(1 << 1)
+#define MSM_HDMI_MODES_DVI		(1 << 2)
+#define MSM_HDMI_MODES_ALL		(MSM_HDMI_MODES_CEA |\
+					 MSM_HDMI_MODES_XTND |\
+					 MSM_HDMI_MODES_DVI)
+
+/* all video formats defined by CEA 861D */
+#define HDMI_VFRMT_UNKNOWN		0
+#define HDMI_VFRMT_640x480p60_4_3	1
+#define HDMI_VFRMT_720x480p60_4_3	2
+#define HDMI_VFRMT_720x480p60_16_9	3
+#define HDMI_VFRMT_1280x720p60_16_9	4
+#define HDMI_VFRMT_1920x1080i60_16_9	5
+#define HDMI_VFRMT_720x480i60_4_3	6
+#define HDMI_VFRMT_1440x480i60_4_3	HDMI_VFRMT_720x480i60_4_3
+#define HDMI_VFRMT_720x480i60_16_9	7
+#define HDMI_VFRMT_1440x480i60_16_9	HDMI_VFRMT_720x480i60_16_9
+#define HDMI_VFRMT_720x240p60_4_3	8
+#define HDMI_VFRMT_1440x240p60_4_3	HDMI_VFRMT_720x240p60_4_3
+#define HDMI_VFRMT_720x240p60_16_9	9
+#define HDMI_VFRMT_1440x240p60_16_9	HDMI_VFRMT_720x240p60_16_9
+#define HDMI_VFRMT_2880x480i60_4_3	10
+#define HDMI_VFRMT_2880x480i60_16_9	11
+#define HDMI_VFRMT_2880x240p60_4_3	12
+#define HDMI_VFRMT_2880x240p60_16_9	13
+#define HDMI_VFRMT_1440x480p60_4_3	14
+#define HDMI_VFRMT_1440x480p60_16_9	15
+#define HDMI_VFRMT_1920x1080p60_16_9	16
+#define HDMI_VFRMT_720x576p50_4_3	17
+#define HDMI_VFRMT_720x576p50_16_9	18
+#define HDMI_VFRMT_1280x720p50_16_9	19
+#define HDMI_VFRMT_1920x1080i50_16_9	20
+#define HDMI_VFRMT_720x576i50_4_3	21
+#define HDMI_VFRMT_1440x576i50_4_3	HDMI_VFRMT_720x576i50_4_3
+#define HDMI_VFRMT_720x576i50_16_9	22
+#define HDMI_VFRMT_1440x576i50_16_9	HDMI_VFRMT_720x576i50_16_9
+#define HDMI_VFRMT_720x288p50_4_3	23
+#define HDMI_VFRMT_1440x288p50_4_3	HDMI_VFRMT_720x288p50_4_3
+#define HDMI_VFRMT_720x288p50_16_9	24
+#define HDMI_VFRMT_1440x288p50_16_9	HDMI_VFRMT_720x288p50_16_9
+#define HDMI_VFRMT_2880x576i50_4_3	25
+#define HDMI_VFRMT_2880x576i50_16_9	26
+#define HDMI_VFRMT_2880x288p50_4_3	27
+#define HDMI_VFRMT_2880x288p50_16_9	28
+#define HDMI_VFRMT_1440x576p50_4_3	29
+#define HDMI_VFRMT_1440x576p50_16_9	30
+#define HDMI_VFRMT_1920x1080p50_16_9	31
+#define HDMI_VFRMT_1920x1080p24_16_9	32
+#define HDMI_VFRMT_1920x1080p25_16_9	33
+#define HDMI_VFRMT_1920x1080p30_16_9	34
+#define HDMI_VFRMT_2880x480p60_4_3	35
+#define HDMI_VFRMT_2880x480p60_16_9	36
+#define HDMI_VFRMT_2880x576p50_4_3	37
+#define HDMI_VFRMT_2880x576p50_16_9	38
+#define HDMI_VFRMT_1920x1250i50_16_9	39
+#define HDMI_VFRMT_1920x1080i100_16_9	40
+#define HDMI_VFRMT_1280x720p100_16_9	41
+#define HDMI_VFRMT_720x576p100_4_3	42
+#define HDMI_VFRMT_720x576p100_16_9	43
+#define HDMI_VFRMT_720x576i100_4_3	44
+#define HDMI_VFRMT_1440x576i100_4_3	HDMI_VFRMT_720x576i100_4_3
+#define HDMI_VFRMT_720x576i100_16_9	45
+#define HDMI_VFRMT_1440x576i100_16_9	HDMI_VFRMT_720x576i100_16_9
+#define HDMI_VFRMT_1920x1080i120_16_9	46
+#define HDMI_VFRMT_1280x720p120_16_9	47
+#define HDMI_VFRMT_720x480p120_4_3	48
+#define HDMI_VFRMT_720x480p120_16_9	49
+#define HDMI_VFRMT_720x480i120_4_3	50
+#define HDMI_VFRMT_1440x480i120_4_3	HDMI_VFRMT_720x480i120_4_3
+#define HDMI_VFRMT_720x480i120_16_9	51
+#define HDMI_VFRMT_1440x480i120_16_9	HDMI_VFRMT_720x480i120_16_9
+#define HDMI_VFRMT_720x576p200_4_3	52
+#define HDMI_VFRMT_720x576p200_16_9	53
+#define HDMI_VFRMT_720x576i200_4_3	54
+#define HDMI_VFRMT_1440x576i200_4_3	HDMI_VFRMT_720x576i200_4_3
+#define HDMI_VFRMT_720x576i200_16_9	55
+#define HDMI_VFRMT_1440x576i200_16_9	HDMI_VFRMT_720x576i200_16_9
+#define HDMI_VFRMT_720x480p240_4_3	56
+#define HDMI_VFRMT_720x480p240_16_9	57
+#define HDMI_VFRMT_720x480i240_4_3	58
+#define HDMI_VFRMT_1440x480i240_4_3	HDMI_VFRMT_720x480i240_4_3
+#define HDMI_VFRMT_720x480i240_16_9	59
+#define HDMI_VFRMT_1440x480i240_16_9	HDMI_VFRMT_720x480i240_16_9
+#define HDMI_VFRMT_1280x720p24_16_9	60
+#define HDMI_VFRMT_1280x720p25_16_9	61
+#define HDMI_VFRMT_1280x720p30_16_9	62
+#define HDMI_VFRMT_1920x1080p120_16_9	63
+#define HDMI_VFRMT_1920x1080p100_16_9	64
+#define HDMI_VFRMT_1280x720p24_64_27    65
+#define HDMI_VFRMT_1280x720p25_64_27    66
+#define HDMI_VFRMT_1280x720p30_64_27    67
+#define HDMI_VFRMT_1280x720p50_64_27    68
+#define HDMI_VFRMT_1280x720p60_64_27    69
+#define HDMI_VFRMT_1280x720p100_64_27   70
+#define HDMI_VFRMT_1280x720p120_64_27   71
+#define HDMI_VFRMT_1920x1080p24_64_27   72
+#define HDMI_VFRMT_1920x1080p25_64_27   73
+#define HDMI_VFRMT_1920x1080p30_64_27   74
+#define HDMI_VFRMT_1920x1080p50_64_27   75
+#define HDMI_VFRMT_1920x1080p60_64_27   76
+#define HDMI_VFRMT_1920x1080p100_64_27  77
+#define HDMI_VFRMT_1920x1080p120_64_27  78
+#define HDMI_VFRMT_1680x720p24_64_27    79
+#define HDMI_VFRMT_1680x720p25_64_27    80
+#define HDMI_VFRMT_1680x720p30_64_27    81
+#define HDMI_VFRMT_1680x720p50_64_27    82
+#define HDMI_VFRMT_1680x720p60_64_27    83
+#define HDMI_VFRMT_1680x720p100_64_27   84
+#define HDMI_VFRMT_1680x720p120_64_27   85
+#define HDMI_VFRMT_2560x1080p24_64_27   86
+#define HDMI_VFRMT_2560x1080p25_64_27   87
+#define HDMI_VFRMT_2560x1080p30_64_27   88
+#define HDMI_VFRMT_2560x1080p50_64_27   89
+#define HDMI_VFRMT_2560x1080p60_64_27   90
+#define HDMI_VFRMT_2560x1080p100_64_27  91
+#define HDMI_VFRMT_2560x1080p120_64_27  92
+#define HDMI_VFRMT_3840x2160p24_16_9    93
+#define HDMI_VFRMT_3840x2160p25_16_9    94
+#define HDMI_VFRMT_3840x2160p30_16_9    95
+#define HDMI_VFRMT_3840x2160p50_16_9    96
+#define HDMI_VFRMT_3840x2160p60_16_9    97
+#define HDMI_VFRMT_4096x2160p24_256_135 98
+#define HDMI_VFRMT_4096x2160p25_256_135 99
+#define HDMI_VFRMT_4096x2160p30_256_135 100
+#define HDMI_VFRMT_4096x2160p50_256_135 101
+#define HDMI_VFRMT_4096x2160p60_256_135 102
+#define HDMI_VFRMT_3840x2160p24_64_27   103
+#define HDMI_VFRMT_3840x2160p25_64_27   104
+#define HDMI_VFRMT_3840x2160p30_64_27   105
+#define HDMI_VFRMT_3840x2160p50_64_27   106
+#define HDMI_VFRMT_3840x2160p60_64_27   107
+
+/* Video Identification Codes from 107-127 are reserved for the future */
+#define HDMI_VFRMT_END			127
+
+#define EVFRMT_OFF(x)			(HDMI_VFRMT_END + x)
+
+/* extended video formats */
+#define HDMI_EVFRMT_3840x2160p30_16_9	EVFRMT_OFF(1)
+#define HDMI_EVFRMT_3840x2160p25_16_9	EVFRMT_OFF(2)
+#define HDMI_EVFRMT_3840x2160p24_16_9	EVFRMT_OFF(3)
+#define HDMI_EVFRMT_4096x2160p24_16_9	EVFRMT_OFF(4)
+#define HDMI_EVFRMT_END			HDMI_EVFRMT_4096x2160p24_16_9
+
+#define WQXGA_OFF(x)			(HDMI_EVFRMT_END + x)
+
+/* WQXGA */
+#define HDMI_VFRMT_2560x1600p60_16_9	WQXGA_OFF(1)
+#define HDMI_WQXGAFRMT_END		HDMI_VFRMT_2560x1600p60_16_9
+
+#define WXGA_OFF(x)			(HDMI_WQXGAFRMT_END + x)
+
+/* WXGA */
+#define HDMI_VFRMT_1280x800p60_16_10	WXGA_OFF(1)
+#define HDMI_VFRMT_1366x768p60_16_10	WXGA_OFF(2)
+#define HDMI_WXGAFRMT_END		HDMI_VFRMT_1366x768p60_16_10
+
+#define ETI_OFF(x)			(HDMI_WXGAFRMT_END + x)
+
+/* ESTABLISHED TIMINGS I */
+#define HDMI_VFRMT_800x600p60_4_3	ETI_OFF(1)
+#define ETI_VFRMT_END			HDMI_VFRMT_800x600p60_4_3
+
+#define ETII_OFF(x)			(ETI_VFRMT_END + x)
+
+/* ESTABLISHED TIMINGS II */
+#define HDMI_VFRMT_1024x768p60_4_3	ETII_OFF(1)
+#define HDMI_VFRMT_1280x1024p60_5_4	ETII_OFF(2)
+#define ETII_VFRMT_END			HDMI_VFRMT_1280x1024p60_5_4
+
+#define ETIII_OFF(x)			(ETII_VFRMT_END + x)
+
+/* ESTABLISHED TIMINGS III */
+#define HDMI_VFRMT_848x480p60_16_9	ETIII_OFF(1)
+#define HDMI_VFRMT_1280x960p60_4_3	ETIII_OFF(2)
+#define HDMI_VFRMT_1360x768p60_16_9	ETIII_OFF(3)
+#define HDMI_VFRMT_1440x900p60_16_10	ETIII_OFF(4)
+#define HDMI_VFRMT_1400x1050p60_4_3	ETIII_OFF(5)
+#define HDMI_VFRMT_1680x1050p60_16_10	ETIII_OFF(6)
+#define HDMI_VFRMT_1600x1200p60_4_3	ETIII_OFF(7)
+#define HDMI_VFRMT_1920x1200p60_16_10	ETIII_OFF(8)
+#define ETIII_VFRMT_END			HDMI_VFRMT_1920x1200p60_16_10
+
+#define RESERVE_OFF(x)			(ETIII_VFRMT_END + x)
+
+#define HDMI_VFRMT_RESERVE1		RESERVE_OFF(1)
+#define HDMI_VFRMT_RESERVE2		RESERVE_OFF(2)
+#define HDMI_VFRMT_RESERVE3		RESERVE_OFF(3)
+#define HDMI_VFRMT_RESERVE4		RESERVE_OFF(4)
+#define HDMI_VFRMT_RESERVE5		RESERVE_OFF(5)
+#define HDMI_VFRMT_RESERVE6		RESERVE_OFF(6)
+#define HDMI_VFRMT_RESERVE7		RESERVE_OFF(7)
+#define HDMI_VFRMT_RESERVE8		RESERVE_OFF(8)
+#define RESERVE_VFRMT_END		HDMI_VFRMT_RESERVE8
+
+#define HDMI_VFRMT_MAX			(RESERVE_VFRMT_END + 1)
+
+/* Timing information for supported modes */
+#define VFRMT_NOT_SUPPORTED(VFRMT) \
+	{VFRMT, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, false,		\
+		HDMI_RES_AR_INVALID}
+
+#define HDMI_VFRMT_640x480p60_4_3_TIMING				\
+	{HDMI_VFRMT_640x480p60_4_3, 640, 16, 96, 48, true,		\
+	 480, 10, 2, 33, true, 25200, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_720x480p60_4_3_TIMING				\
+	{HDMI_VFRMT_720x480p60_4_3, 720, 16, 62, 60, true,		\
+	 480, 9, 6, 30, true, 27027, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_720x480p60_16_9_TIMING				\
+	{HDMI_VFRMT_720x480p60_16_9, 720, 16, 62, 60, true,		\
+	 480, 9, 6, 30, true, 27027, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1280x720p60_16_9_TIMING				\
+	{HDMI_VFRMT_1280x720p60_16_9, 1280, 110, 40, 220, false,	\
+	 720, 5, 5, 20, false, 74250, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080i60_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080i60_16_9, 1920, 88, 44, 148, false,	\
+	 540, 2, 5, 5, false, 74250, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1440x480i60_4_3_TIMING				\
+	{HDMI_VFRMT_1440x480i60_4_3, 1440, 38, 124, 114, true,		\
+	 240, 4, 3, 15, true, 27000, 60000, true, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1440x480i60_16_9_TIMING				\
+	{HDMI_VFRMT_1440x480i60_16_9, 1440, 38, 124, 114, true,		\
+	 240, 4, 3, 15, true, 27000, 60000, true, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080p60_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080p60_16_9, 1920, 88, 44, 148, false,	\
+	 1080, 4, 5, 36, false, 148500, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_720x576p50_4_3_TIMING				\
+	{HDMI_VFRMT_720x576p50_4_3, 720, 12, 64, 68, true,		\
+	 576,  5, 5, 39, true, 27000, 50000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_720x576p50_16_9_TIMING				\
+	{HDMI_VFRMT_720x576p50_16_9, 720, 12, 64, 68, true,		\
+	 576,  5, 5, 39, true, 27000, 50000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1280x720p50_16_9_TIMING				\
+	{HDMI_VFRMT_1280x720p50_16_9, 1280, 440, 40, 220, false,	\
+	 720,  5, 5, 20, false, 74250, 50000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1440x576i50_4_3_TIMING				\
+	{HDMI_VFRMT_1440x576i50_4_3, 1440, 24, 126, 138, true,		\
+	 288,  2, 3, 19, true, 27000, 50000, true, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1440x576i50_16_9_TIMING				\
+	{HDMI_VFRMT_1440x576i50_16_9, 1440, 24, 126, 138, true,		\
+	 288,  2, 3, 19, true, 27000, 50000, true, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080p50_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080p50_16_9, 1920, 528, 44, 148, false,	\
+	 1080, 4, 5, 36, false, 148500, 50000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080p24_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080p24_16_9, 1920, 638, 44, 148, false,	\
+	 1080, 4, 5, 36, false, 74250, 24000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080p25_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080p25_16_9, 1920, 528, 44, 148, false,	\
+	 1080, 4, 5, 36, false, 74250, 25000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080p30_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080p30_16_9, 1920, 88, 44, 148, false,	\
+	 1080, 4, 5, 36, false, 74250, 30000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1024x768p60_4_3_TIMING                               \
+	{HDMI_VFRMT_1024x768p60_4_3, 1024, 24, 136, 160, false,         \
+	768, 2, 6, 29, false, 65000, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1280x1024p60_5_4_TIMING				\
+	{HDMI_VFRMT_1280x1024p60_5_4, 1280, 48, 112, 248, false,	\
+	1024, 1, 3, 38, false, 108000, 60000, false, true, HDMI_RES_AR_5_4, 0}
+#define HDMI_VFRMT_2560x1600p60_16_9_TIMING				\
+	{HDMI_VFRMT_2560x1600p60_16_9, 2560, 48, 32, 80, false,		\
+	 1600, 3, 6, 37, false, 268500, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_EVFRMT_3840x2160p30_16_9_TIMING				\
+	{HDMI_EVFRMT_3840x2160p30_16_9, 3840, 176, 88, 296, false,	\
+	 2160, 8, 10, 72, false, 297000, 30000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_EVFRMT_3840x2160p25_16_9_TIMING				\
+	{HDMI_EVFRMT_3840x2160p25_16_9, 3840, 1056, 88, 296, false,	\
+	 2160, 8, 10, 72, false, 297000, 25000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_EVFRMT_3840x2160p24_16_9_TIMING				\
+	{HDMI_EVFRMT_3840x2160p24_16_9, 3840, 1276, 88, 296, false,	\
+	 2160, 8, 10, 72, false, 297000, 24000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_EVFRMT_4096x2160p24_16_9_TIMING				\
+	{HDMI_EVFRMT_4096x2160p24_16_9, 4096, 1020, 88, 296, false,	\
+	 2160, 8, 10, 72, false, 297000, 24000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+
+#define HDMI_VFRMT_800x600p60_4_3_TIMING				\
+	{HDMI_VFRMT_800x600p60_4_3, 800, 40, 128, 88, false,	\
+	 600, 1, 4, 23, false, 40000, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_848x480p60_16_9_TIMING				\
+	{HDMI_VFRMT_848x480p60_16_9, 848, 16, 112, 112, false,	\
+	 480, 6, 8, 23, false, 33750, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1280x960p60_4_3_TIMING\
+	{HDMI_VFRMT_1280x960p60_4_3, 1280, 96, 112, 312, false,	\
+	 960, 1, 3, 36, false, 108000, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1360x768p60_16_9_TIMING\
+	{HDMI_VFRMT_1360x768p60_16_9, 1360, 64, 112, 256, false,	\
+	 768, 3, 6, 18, false, 85500, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1440x900p60_16_10_TIMING\
+	{HDMI_VFRMT_1440x900p60_16_10, 1440, 48, 32, 80, false,	\
+	 900, 3, 6, 17, true, 88750, 60000, false, true, HDMI_RES_AR_16_10, 0}
+#define HDMI_VFRMT_1400x1050p60_4_3_TIMING\
+	{HDMI_VFRMT_1400x1050p60_4_3, 1400, 48, 32, 80, false,	\
+	 1050, 3, 4, 23, true, 101000, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1680x1050p60_16_10_TIMING\
+	{HDMI_VFRMT_1680x1050p60_16_10, 1680, 48, 32, 80, false,	\
+	 1050, 3, 6, 21, true, 119000, 60000, false, true, HDMI_RES_AR_16_10, 0}
+#define HDMI_VFRMT_1600x1200p60_4_3_TIMING\
+	{HDMI_VFRMT_1600x1200p60_4_3, 1600, 64, 192, 304, false,	\
+	 1200, 1, 3, 46, false, 162000, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1920x1200p60_16_10_TIMING\
+	{HDMI_VFRMT_1920x1200p60_16_10, 1920, 48, 32, 80, false,\
+	 1200, 3, 6, 26, true, 154000, 60000, false, true, HDMI_RES_AR_16_10, 0}
+#define HDMI_VFRMT_1366x768p60_16_10_TIMING\
+	{HDMI_VFRMT_1366x768p60_16_10, 1366, 70, 143, 213, false,\
+	 768, 3, 3, 24, false, 85500, 60000, false, true, HDMI_RES_AR_16_10, 0}
+#define HDMI_VFRMT_1280x800p60_16_10_TIMING\
+	{HDMI_VFRMT_1280x800p60_16_10, 1280, 72, 128, 200, true,\
+	 800, 3, 6, 22, false, 83500, 60000, false, true, HDMI_RES_AR_16_10, 0}
+#define HDMI_VFRMT_3840x2160p24_16_9_TIMING                             \
+	{HDMI_VFRMT_3840x2160p24_16_9, 3840, 1276, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 297000, 24000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_3840x2160p25_16_9_TIMING                             \
+	{HDMI_VFRMT_3840x2160p25_16_9, 3840, 1056, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 297000, 25000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_3840x2160p30_16_9_TIMING                             \
+	{HDMI_VFRMT_3840x2160p30_16_9, 3840, 176, 88, 296, false,       \
+	 2160, 8, 10, 72, false, 297000, 30000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_3840x2160p50_16_9_TIMING                             \
+	{HDMI_VFRMT_3840x2160p50_16_9, 3840, 1056, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 594000, 50000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_3840x2160p60_16_9_TIMING                             \
+	{HDMI_VFRMT_3840x2160p60_16_9, 3840, 176, 88, 296, false,       \
+	 2160, 8, 10, 72, false, 594000, 60000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+
+#define HDMI_VFRMT_4096x2160p24_256_135_TIMING                          \
+	{HDMI_VFRMT_4096x2160p24_256_135, 4096, 1020, 88, 296, false,   \
+	 2160, 8, 10, 72, false, 297000, 24000, false, true, \
+		HDMI_RES_AR_256_135, 0}
+#define HDMI_VFRMT_4096x2160p25_256_135_TIMING                          \
+	{HDMI_VFRMT_4096x2160p25_256_135, 4096, 968, 88, 128, false,    \
+	 2160, 8, 10, 72, false, 297000, 25000, false, true, \
+		HDMI_RES_AR_256_135, 0}
+#define HDMI_VFRMT_4096x2160p30_256_135_TIMING                          \
+	{HDMI_VFRMT_4096x2160p30_256_135, 4096, 88, 88, 128, false,     \
+	 2160, 8, 10, 72, false, 297000, 30000, false, true, \
+		HDMI_RES_AR_256_135, 0}
+#define HDMI_VFRMT_4096x2160p50_256_135_TIMING                          \
+	{HDMI_VFRMT_4096x2160p50_256_135, 4096, 968, 88, 128, false,    \
+	 2160, 8, 10, 72, false, 594000, 50000, false, true, \
+		HDMI_RES_AR_256_135, 0}
+#define HDMI_VFRMT_4096x2160p60_256_135_TIMING                          \
+	{HDMI_VFRMT_4096x2160p60_256_135, 4096, 88, 88, 128, false,     \
+	 2160, 8, 10, 72, false, 594000, 60000, false, true, \
+		HDMI_RES_AR_256_135, 0}
+
+#define HDMI_VFRMT_3840x2160p24_64_27_TIMING                             \
+	{HDMI_VFRMT_3840x2160p24_64_27, 3840, 1276, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 297000, 24000, false, true, \
+		HDMI_RES_AR_64_27, 0}
+#define HDMI_VFRMT_3840x2160p25_64_27_TIMING                             \
+	{HDMI_VFRMT_3840x2160p25_64_27, 3840, 1056, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 297000, 25000, false, true, \
+		HDMI_RES_AR_64_27, 0}
+#define HDMI_VFRMT_3840x2160p30_64_27_TIMING                             \
+	{HDMI_VFRMT_3840x2160p30_64_27, 3840, 176, 88, 296, false,       \
+	 2160, 8, 10, 72, false, 297000, 30000, false, true, \
+		HDMI_RES_AR_64_27, 0}
+#define HDMI_VFRMT_3840x2160p50_64_27_TIMING                             \
+	{HDMI_VFRMT_3840x2160p50_64_27, 3840, 1056, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 594000, 50000, false, true, \
+		HDMI_RES_AR_64_27, 0}
+#define HDMI_VFRMT_3840x2160p60_64_27_TIMING                             \
+	{HDMI_VFRMT_3840x2160p60_64_27, 3840, 176, 88, 296, false,       \
+	 2160, 8, 10, 72, false, 594000, 60000, false, true, \
+		HDMI_RES_AR_64_27, 0}
+
+#define MSM_HDMI_MODES_SET_TIMING(LUT, MODE) do {		\
+	struct msm_hdmi_mode_timing_info mode = MODE##_TIMING;	\
+	LUT[MODE] = mode;\
+	} while (0)
+
+#define MSM_HDMI_MODES_INIT_TIMINGS(__lut)	\
+do {	\
+	unsigned int i;	\
+	for (i = 0; i < HDMI_VFRMT_MAX; i++) {	\
+		struct msm_hdmi_mode_timing_info mode =	\
+			VFRMT_NOT_SUPPORTED(i);	\
+		(__lut)[i] = mode;	\
+	}	\
+} while (0)
+
+#define MSM_HDMI_MODES_SET_SUPP_TIMINGS(__lut, __type)	\
+do {	\
+	if (__type & MSM_HDMI_MODES_CEA) {	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_640x480p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_720x480p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_720x480p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1280x720p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080i60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1440x480i60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1440x480i60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_720x576p50_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_720x576p50_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1280x720p50_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1440x576i50_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1440x576i50_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080p50_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080p24_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080p25_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080p30_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p24_16_9);  \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p25_16_9);  \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p30_16_9);  \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p50_16_9);  \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p60_16_9);  \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_4096x2160p24_256_135);\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_4096x2160p25_256_135);\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_4096x2160p30_256_135);\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_4096x2160p50_256_135);\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_4096x2160p60_256_135);\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p24_64_27); \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p25_64_27); \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p30_64_27); \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p50_64_27); \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p60_64_27); \
+	}	\
+	if (__type & MSM_HDMI_MODES_XTND) {	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_EVFRMT_3840x2160p30_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_EVFRMT_3840x2160p25_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_EVFRMT_3840x2160p24_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_EVFRMT_4096x2160p24_16_9);	\
+	}	\
+	if (__type & MSM_HDMI_MODES_DVI) {	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1024x768p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1280x1024p60_5_4);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_2560x1600p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_800x600p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_848x480p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1280x960p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1360x768p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1440x900p60_16_10);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1400x1050p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1680x1050p60_16_10);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1600x1200p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1200p60_16_10);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1366x768p60_16_10);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1280x800p60_16_10);	\
+	}	\
+} while (0)
+
+#define MSM_HDMI_MODES_GET_DETAILS(mode, MODE) do {		\
+	struct msm_hdmi_mode_timing_info info = MODE##_TIMING;	\
+	*mode = info;						\
+	} while (0)
+
+#endif /* _UAPI_MSM_HDMI_MODES_H__ */
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 8199821..85d1c94 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -212,9 +212,10 @@
 		else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE ||
 			 t == PTR_TO_MAP_VALUE_OR_NULL ||
 			 t == PTR_TO_MAP_VALUE_ADJ)
-			verbose("(ks=%d,vs=%d)",
+			verbose("(ks=%d,vs=%d,id=%u)",
 				reg->map_ptr->key_size,
-				reg->map_ptr->value_size);
+				reg->map_ptr->value_size,
+				reg->id);
 		if (reg->min_value != BPF_REGISTER_MIN_RANGE)
 			verbose(",min_value=%lld",
 				(long long)reg->min_value);
@@ -443,11 +444,17 @@
 	regs[BPF_REG_1].type = PTR_TO_CTX;
 }
 
+static void __mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno)
+{
+	regs[regno].type = UNKNOWN_VALUE;
+	regs[regno].id = 0;
+	regs[regno].imm = 0;
+}
+
 static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno)
 {
 	BUG_ON(regno >= MAX_BPF_REG);
-	regs[regno].type = UNKNOWN_VALUE;
-	regs[regno].imm = 0;
+	__mark_reg_unknown_value(regs, regno);
 }
 
 static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno)
@@ -1252,6 +1259,7 @@
 			return -EINVAL;
 		}
 		regs[BPF_REG_0].map_ptr = meta.map_ptr;
+		regs[BPF_REG_0].id = ++env->id_gen;
 	} else {
 		verbose("unknown return type %d of func %d\n",
 			fn->ret_type, func_id);
@@ -1668,8 +1676,7 @@
 						insn->src_reg);
 					return -EACCES;
 				}
-				regs[insn->dst_reg].type = UNKNOWN_VALUE;
-				regs[insn->dst_reg].map_ptr = NULL;
+				mark_reg_unknown_value(regs, insn->dst_reg);
 			}
 		} else {
 			/* case: R = imm
@@ -1931,6 +1938,43 @@
 	check_reg_overflow(true_reg);
 }
 
+static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
+			 enum bpf_reg_type type)
+{
+	struct bpf_reg_state *reg = &regs[regno];
+
+	if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) {
+		reg->type = type;
+		/* We don't need id from this point onwards anymore, thus we
+		 * should better reset it, so that state pruning has chances
+		 * to take effect.
+		 */
+		reg->id = 0;
+		if (type == UNKNOWN_VALUE)
+			__mark_reg_unknown_value(regs, regno);
+	}
+}
+
+/* The logic is similar to find_good_pkt_pointers(), both could eventually
+ * be folded together at some point.
+ */
+static void mark_map_regs(struct bpf_verifier_state *state, u32 regno,
+			  enum bpf_reg_type type)
+{
+	struct bpf_reg_state *regs = state->regs;
+	u32 id = regs[regno].id;
+	int i;
+
+	for (i = 0; i < MAX_BPF_REG; i++)
+		mark_map_reg(regs, i, id, type);
+
+	for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
+		if (state->stack_slot_type[i] != STACK_SPILL)
+			continue;
+		mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE, id, type);
+	}
+}
+
 static int check_cond_jmp_op(struct bpf_verifier_env *env,
 			     struct bpf_insn *insn, int *insn_idx)
 {
@@ -2018,18 +2062,13 @@
 	if (BPF_SRC(insn->code) == BPF_K &&
 	    insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
 	    dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
-		if (opcode == BPF_JEQ) {
-			/* next fallthrough insn can access memory via
-			 * this register
-			 */
-			regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
-			/* branch targer cannot access it, since reg == 0 */
-			mark_reg_unknown_value(other_branch->regs,
-					       insn->dst_reg);
-		} else {
-			other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
-			mark_reg_unknown_value(regs, insn->dst_reg);
-		}
+		/* Mark all identical map registers in each branch as either
+		 * safe or unknown depending R == 0 or R != 0 conditional.
+		 */
+		mark_map_regs(this_branch, insn->dst_reg,
+			      opcode == BPF_JEQ ? PTR_TO_MAP_VALUE : UNKNOWN_VALUE);
+		mark_map_regs(other_branch, insn->dst_reg,
+			      opcode == BPF_JEQ ? UNKNOWN_VALUE : PTR_TO_MAP_VALUE);
 	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
 		   dst_reg->type == PTR_TO_PACKET &&
 		   regs[insn->src_reg].type == PTR_TO_PACKET_END) {
@@ -2469,7 +2508,7 @@
 		 * we didn't do a variable access into a map then we are a-ok.
 		 */
 		if (!varlen_map_access &&
-		    rold->type == rcur->type && rold->imm == rcur->imm)
+		    memcmp(rold, rcur, offsetofend(struct bpf_reg_state, id)) == 0)
 			continue;
 
 		/* If we didn't map access then again we don't care about the
diff --git a/kernel/cgroup_pids.c b/kernel/cgroup_pids.c
index 2bd6737..a57242e 100644
--- a/kernel/cgroup_pids.c
+++ b/kernel/cgroup_pids.c
@@ -229,7 +229,7 @@
 		/* Only log the first time events_limit is incremented. */
 		if (atomic64_inc_return(&pids->events_limit) == 1) {
 			pr_info("cgroup: fork rejected by pids controller in ");
-			pr_cont_cgroup_path(task_cgroup(current, pids_cgrp_id));
+			pr_cont_cgroup_path(css->cgroup);
 			pr_cont("\n");
 		}
 		cgroup_file_notify(&pids->events_file);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 99c91f6..41f376d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -372,6 +372,7 @@
 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
 static DEFINE_PER_CPU(int, perf_sched_cb_usages);
 static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
+static DEFINE_PER_CPU(bool, is_idle);
 
 static atomic_t nr_mmap_events __read_mostly;
 static atomic_t nr_comm_events __read_mostly;
@@ -3605,23 +3606,31 @@
 static int perf_event_read(struct perf_event *event, bool group)
 {
 	int event_cpu, ret = 0;
+	bool active_event_skip_read = false;
 
 	/*
 	 * If event is enabled and currently active on a CPU, update the
 	 * value in the event structure:
 	 */
+	event_cpu = READ_ONCE(event->oncpu);
+
+	if (event->state == PERF_EVENT_STATE_ACTIVE) {
+		if ((unsigned int)event_cpu >= nr_cpu_ids)
+			return 0;
+		if (cpu_isolated(event_cpu) ||
+			(event->attr.exclude_idle &&
+				per_cpu(is_idle, event_cpu)))
+			active_event_skip_read = true;
+	}
+
 	if (event->state == PERF_EVENT_STATE_ACTIVE &&
-						!cpu_isolated(event->oncpu)) {
+		!active_event_skip_read) {
 		struct perf_read_data data = {
 			.event = event,
 			.group = group,
 			.ret = 0,
 		};
 
-		event_cpu = READ_ONCE(event->oncpu);
-		if ((unsigned)event_cpu >= nr_cpu_ids)
-			return 0;
-
 		preempt_disable();
 		event_cpu = __perf_event_read_cpu(event, event_cpu);
 
@@ -3635,10 +3644,12 @@
 		 * Therefore, either way, we'll have an up-to-date event count
 		 * after this.
 		 */
-		(void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
+		(void)smp_call_function_single(event_cpu,
+				__perf_event_read, &data, 1);
 		preempt_enable();
 		ret = data.ret;
-	} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
+	} else if (event->state == PERF_EVENT_STATE_INACTIVE ||
+			active_event_skip_read) {
 		struct perf_event_context *ctx = event->ctx;
 		unsigned long flags;
 
@@ -3731,7 +3742,8 @@
 
 	if (!task) {
 		/* Must be root to operate on a CPU event: */
-		if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
+		if (!is_kernel_event(event) && perf_paranoid_cpu() &&
+			!capable(CAP_SYS_ADMIN))
 			return ERR_PTR(-EACCES);
 
 		/*
@@ -7586,6 +7598,7 @@
 	.start		= perf_swevent_start,
 	.stop		= perf_swevent_stop,
 	.read		= perf_swevent_read,
+	.events_across_hotplug = 1,
 };
 
 #ifdef CONFIG_EVENT_TRACING
@@ -7730,6 +7743,7 @@
 	.start		= perf_swevent_start,
 	.stop		= perf_swevent_stop,
 	.read		= perf_swevent_read,
+	.events_across_hotplug = 1,
 };
 
 static inline void perf_tp_register(void)
@@ -8460,6 +8474,7 @@
 	.start		= cpu_clock_event_start,
 	.stop		= cpu_clock_event_stop,
 	.read		= cpu_clock_event_read,
+	.events_across_hotplug = 1,
 };
 
 /*
@@ -8541,6 +8556,7 @@
 	.start		= task_clock_event_start,
 	.stop		= task_clock_event_stop,
 	.read		= task_clock_event_read,
+	.events_across_hotplug = 1,
 };
 
 static void perf_pmu_nop_void(struct pmu *pmu)
@@ -10342,6 +10358,17 @@
 			continue;
 
 		mutex_lock(&ctx->mutex);
+		raw_spin_lock_irq(&ctx->lock);
+		/*
+		 * Destroy the task <-> ctx relation and mark the context dead.
+		 *
+		 * This is important because even though the task hasn't been
+		 * exposed yet the context has been (through child_list).
+		 */
+		RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL);
+		WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
+		put_task_struct(task); /* cannot be last */
+		raw_spin_unlock_irq(&ctx->lock);
 again:
 		list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
 				group_entry)
@@ -10595,7 +10622,7 @@
 		ret = inherit_task_group(event, parent, parent_ctx,
 					 child, ctxn, &inherited_all);
 		if (ret)
-			break;
+			goto out_unlock;
 	}
 
 	/*
@@ -10611,7 +10638,7 @@
 		ret = inherit_task_group(event, parent, parent_ctx,
 					 child, ctxn, &inherited_all);
 		if (ret)
-			break;
+			goto out_unlock;
 	}
 
 	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
@@ -10639,6 +10666,7 @@
 	}
 
 	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
+out_unlock:
 	mutex_unlock(&parent_ctx->mutex);
 
 	perf_unpin_context(parent_ctx);
@@ -10703,6 +10731,76 @@
 }
 
 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
+static void
+check_hotplug_start_event(struct perf_event *event)
+{
+	if (event->attr.type == PERF_TYPE_SOFTWARE) {
+		switch (event->attr.config) {
+		case PERF_COUNT_SW_CPU_CLOCK:
+			cpu_clock_event_start(event, 0);
+			break;
+		case PERF_COUNT_SW_TASK_CLOCK:
+			break;
+		default:
+			if (event->pmu->start)
+				event->pmu->start(event, 0);
+			break;
+		}
+	}
+}
+
+static int perf_event_start_swevents(unsigned int cpu)
+{
+	struct perf_event_context *ctx;
+	struct pmu *pmu;
+	struct perf_event *event;
+	int idx;
+
+	idx = srcu_read_lock(&pmus_srcu);
+	list_for_each_entry_rcu(pmu, &pmus, entry) {
+		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
+		mutex_lock(&ctx->mutex);
+		raw_spin_lock(&ctx->lock);
+		list_for_each_entry(event, &ctx->event_list, event_entry)
+			check_hotplug_start_event(event);
+		raw_spin_unlock(&ctx->lock);
+		mutex_unlock(&ctx->mutex);
+	}
+	srcu_read_unlock(&pmus_srcu, idx);
+	return 0;
+}
+
+/*
+ * If keeping events across hotplugging is supported, do not
+ * remove the event list so event lives beyond CPU hotplug.
+ * The context is exited via an fd close path when userspace
+ * is done and the target CPU is online. If software clock
+ * event is active, then stop hrtimer associated with it.
+ * Start the timer when the CPU comes back online.
+ */
+static void
+check_hotplug_remove_from_context(struct perf_event *event,
+			   struct perf_cpu_context *cpuctx,
+			   struct perf_event_context *ctx)
+{
+	if (!event->pmu->events_across_hotplug) {
+		__perf_remove_from_context(event, cpuctx,
+			ctx, (void *)DETACH_GROUP);
+	} else if (event->attr.type == PERF_TYPE_SOFTWARE) {
+		switch (event->attr.config) {
+		case PERF_COUNT_SW_CPU_CLOCK:
+			cpu_clock_event_stop(event, 0);
+			break;
+		case PERF_COUNT_SW_TASK_CLOCK:
+			break;
+		default:
+			if (event->pmu->stop)
+				event->pmu->stop(event, 0);
+			break;
+		}
+	}
+}
+
 static void __perf_event_exit_context(void *__info)
 {
 	struct perf_event_context *ctx = __info;
@@ -10711,7 +10809,7 @@
 
 	raw_spin_lock(&ctx->lock);
 	list_for_each_entry(event, &ctx->event_list, event_entry)
-		__perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
+		check_hotplug_remove_from_context(event, cpuctx, ctx);
 	raw_spin_unlock(&ctx->lock);
 }
 
@@ -10763,6 +10861,26 @@
 	.priority = INT_MIN,
 };
 
+static int event_idle_notif(struct notifier_block *nb, unsigned long action,
+							void *data)
+{
+	switch (action) {
+	case IDLE_START:
+		__this_cpu_write(is_idle, true);
+		break;
+	case IDLE_END:
+		__this_cpu_write(is_idle, false);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block perf_event_idle_nb = {
+	.notifier_call = event_idle_notif,
+};
+
+
 void __init perf_event_init(void)
 {
 	int ret;
@@ -10776,6 +10894,7 @@
 	perf_pmu_register(&perf_task_clock, NULL, -1);
 	perf_tp_register();
 	perf_event_init_cpu(smp_processor_id());
+	idle_notifier_register(&perf_event_idle_nb);
 	register_reboot_notifier(&perf_reboot_notifier);
 
 	ret = init_hw_breakpoint();
@@ -10830,6 +10949,23 @@
 }
 device_initcall(perf_event_sysfs_init);
 
+#ifdef CONFIG_HOTPLUG_CPU
+static int perf_cpu_hp_init(void)
+{
+	int ret;
+
+	ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ONLINE,
+				"PERF/CORE/AP_PERF_ONLINE",
+				perf_event_start_swevents,
+				perf_event_exit_cpu);
+	if (ret)
+		pr_err("CPU hotplug notifier for perf core could not be registered: %d\n",
+		       ret);
+	return ret;
+}
+subsys_initcall(perf_cpu_hp_init);
+#endif
+
 #ifdef CONFIG_CGROUP_PERF
 static struct cgroup_subsys_state *
 perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
diff --git a/kernel/exit.c b/kernel/exit.c
index 46a7c2b..83e8afa 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -778,6 +778,7 @@
 
 	exit_signals(tsk);  /* sets PF_EXITING */
 
+	sched_exit(tsk);
 	schedtune_exit_task(tsk);
 
 	/*
diff --git a/kernel/futex.c b/kernel/futex.c
index 38b68c2..4c6b6e6 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2813,7 +2813,6 @@
 {
 	struct hrtimer_sleeper timeout, *to = NULL;
 	struct rt_mutex_waiter rt_waiter;
-	struct rt_mutex *pi_mutex = NULL;
 	struct futex_hash_bucket *hb;
 	union futex_key key2 = FUTEX_KEY_INIT;
 	struct futex_q q = futex_q_init;
@@ -2897,6 +2896,8 @@
 		if (q.pi_state && (q.pi_state->owner != current)) {
 			spin_lock(q.lock_ptr);
 			ret = fixup_pi_state_owner(uaddr2, &q, current);
+			if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
+				rt_mutex_unlock(&q.pi_state->pi_mutex);
 			/*
 			 * Drop the reference to the pi state which
 			 * the requeue_pi() code acquired for us.
@@ -2905,6 +2906,8 @@
 			spin_unlock(q.lock_ptr);
 		}
 	} else {
+		struct rt_mutex *pi_mutex;
+
 		/*
 		 * We have been woken up by futex_unlock_pi(), a timeout, or a
 		 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
@@ -2928,18 +2931,19 @@
 		if (res)
 			ret = (res < 0) ? res : 0;
 
+		/*
+		 * If fixup_pi_state_owner() faulted and was unable to handle
+		 * the fault, unlock the rt_mutex and return the fault to
+		 * userspace.
+		 */
+		if (ret && rt_mutex_owner(pi_mutex) == current)
+			rt_mutex_unlock(pi_mutex);
+
 		/* Unqueue and drop the lock. */
 		unqueue_me_pi(&q);
 	}
 
-	/*
-	 * If fixup_pi_state_owner() faulted and was unable to handle the
-	 * fault, unlock the rt_mutex and return the fault to userspace.
-	 */
-	if (ret == -EFAULT) {
-		if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
-			rt_mutex_unlock(pi_mutex);
-	} else if (ret == -EINTR) {
+	if (ret == -EINTR) {
 		/*
 		 * We've already been requeued, but cannot restart by calling
 		 * futex_lock_pi() directly. We could restart this syscall, but
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index 1591f6b..2bef4ab 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -216,10 +216,8 @@
 		 */
 		if (sem->count == 0)
 			break;
-		if (signal_pending_state(state, current)) {
-			ret = -EINTR;
-			goto out;
-		}
+		if (signal_pending_state(state, current))
+			goto out_nolock;
 		set_task_state(tsk, state);
 		raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 		schedule();
@@ -227,12 +225,19 @@
 	}
 	/* got the lock */
 	sem->count = -1;
-out:
 	list_del(&waiter.list);
 
 	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 
 	return ret;
+
+out_nolock:
+	list_del(&waiter.list);
+	if (!list_empty(&sem->wait_list))
+		__rwsem_do_wake(sem, 1);
+	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+
+	return -EINTR;
 }
 
 void __sched __down_write(struct rw_semaphore *sem)
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 22d67f0..0854263 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -703,12 +703,22 @@
 		/* silent return to keep pcm code cleaner */
 
 	if (!pm_qos_request_active(req)) {
-		WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
+		WARN(1, "pm_qos_remove_request() called for unknown object\n");
 		return;
 	}
 
 	cancel_delayed_work_sync(&req->work);
 
+#ifdef CONFIG_SMP
+	if (req->type == PM_QOS_REQ_AFFINE_IRQ) {
+		int ret = 0;
+		/* Get the current affinity */
+		ret = irq_set_affinity_notifier(req->irq, NULL);
+		if (ret)
+			WARN(1, "IRQ affinity notify set failed\n");
+	}
+#endif
+
 	trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE);
 	pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
 			     req, PM_QOS_REMOVE_REQ,
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5f82983..f7f5256 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -89,6 +89,7 @@
 #include "sched.h"
 #include "../workqueue_internal.h"
 #include "../smpboot.h"
+#include "../time/tick-internal.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/sched.h>
@@ -2171,6 +2172,7 @@
 	wallclock = sched_ktime_clock();
 	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
 	update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
+	cpufreq_update_util(rq, 0);
 	raw_spin_unlock(&rq->lock);
 
 	rcu_read_lock();
@@ -2263,6 +2265,7 @@
 
 		update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
 		update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
+		cpufreq_update_util(rq, 0);
 		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
 		note_task_waking(p, wallclock);
 	}
@@ -3369,6 +3372,8 @@
 
 	wallclock = sched_ktime_clock();
 	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+
+	cpufreq_update_util(rq, 0);
 	early_notif = early_detection_notify(rq, wallclock);
 
 	raw_spin_unlock(&rq->lock);
@@ -3393,7 +3398,8 @@
 	if (curr->sched_class == &fair_sched_class)
 		check_for_migration(rq, curr);
 
-	core_ctl_check(wallclock);
+	if (cpu == tick_do_timer_cpu)
+		core_ctl_check(wallclock);
 	sched_freq_tick(cpu);
 }
 
@@ -3702,6 +3708,7 @@
 	if (likely(prev != next)) {
 		update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
 		update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
+		cpufreq_update_util(rq, 0);
 		if (!is_idle_task(prev) && !prev->on_rq)
 			update_avg_burst(prev);
 
@@ -3715,6 +3722,7 @@
 		rq = context_switch(rq, prev, next, cookie); /* unlocks the rq */
 	} else {
 		update_task_ravg(prev, rq, TASK_UPDATE, wallclock, 0);
+		cpufreq_update_util(rq, 0);
 		lockdep_unpin_lock(&rq->lock, cookie);
 		raw_spin_unlock_irq(&rq->lock);
 	}
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index 983159c..1dde338 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -653,6 +653,9 @@
 	int ret = 0;
 	bool boost_state_changed = false;
 
+	if (unlikely(!initialized))
+		return 0;
+
 	spin_lock_irqsave(&state_lock, flags);
 	for_each_cluster(cluster, index) {
 		if (cluster->is_big_cluster) {
@@ -931,6 +934,42 @@
 
 /* ============================ init code ============================== */
 
+static cpumask_var_t core_ctl_disable_cpumask;
+static bool core_ctl_disable_cpumask_present;
+
+static int __init core_ctl_disable_setup(char *str)
+{
+	if (!*str)
+		return -EINVAL;
+
+	alloc_bootmem_cpumask_var(&core_ctl_disable_cpumask);
+
+	if (cpulist_parse(str, core_ctl_disable_cpumask) < 0) {
+		free_bootmem_cpumask_var(core_ctl_disable_cpumask);
+		return -EINVAL;
+	}
+
+	core_ctl_disable_cpumask_present = true;
+	pr_info("disable_cpumask=%*pbl\n",
+			cpumask_pr_args(core_ctl_disable_cpumask));
+
+	return 0;
+}
+early_param("core_ctl_disable_cpumask", core_ctl_disable_setup);
+
+static bool should_skip(const struct cpumask *mask)
+{
+	if (!core_ctl_disable_cpumask_present)
+		return false;
+
+	/*
+	 * We operate on a cluster basis. Disable the core_ctl for
+	 * a cluster, if all of it's cpus are specified in
+	 * core_ctl_disable_cpumask
+	 */
+	return cpumask_subset(mask, core_ctl_disable_cpumask);
+}
+
 static struct cluster_data *find_cluster_by_first_cpu(unsigned int first_cpu)
 {
 	unsigned int i;
@@ -952,6 +991,9 @@
 	unsigned int cpu;
 	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
 
+	if (should_skip(mask))
+		return 0;
+
 	if (find_cluster_by_first_cpu(first_cpu))
 		return 0;
 
@@ -1052,6 +1094,9 @@
 {
 	unsigned int cpu;
 
+	if (should_skip(cpu_possible_mask))
+		return 0;
+
 	core_ctl_check_interval = (rq_avg_period_ms - RQ_AVG_TOLERANCE)
 					* NSEC_PER_MSEC;
 
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 124eb6a..0085f66 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1770,12 +1770,11 @@
 #ifdef CONFIG_SMP
 		if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded)
 			queue_push_tasks(rq);
-#else
+#endif
 		if (dl_task(rq->curr))
 			check_preempt_curr_dl(rq, p, 0);
 		else
 			resched_curr(rq);
-#endif
 	}
 }
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6a59802..2a8643c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10554,7 +10554,6 @@
 	u8 need_idle:1;
 	u8 need_waker_cluster:1;
 	u8 sync:1;
-	u8 ignore_prev_cpu:1;
 	enum sched_boost_policy boost_policy;
 	u8 pack_task:1;
 	int prev_cpu;
@@ -10564,6 +10563,7 @@
 	u64 cpu_load;
 	u32 sbc_best_flag;
 	u32 sbc_best_cluster_flag;
+	struct cpumask search_cpus;
 };
 
 struct cluster_cpu_stats {
@@ -10768,11 +10768,14 @@
 {
 	struct sched_cluster *next = NULL;
 	int i;
+	struct cpumask search_cpus;
 
 	while (!bitmap_empty(env->backup_list, num_clusters)) {
 		next = next_candidate(env->backup_list, 0, num_clusters);
 		__clear_bit(next->id, env->backup_list);
-		for_each_cpu_and(i, &env->p->cpus_allowed, &next->cpus) {
+
+		cpumask_and(&search_cpus, &env->search_cpus, &next->cpus);
+		for_each_cpu(i, &search_cpus) {
 			trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i),
 			sched_irqload(i), power_cost(i, task_load(env->p) +
 					cpu_cravg_sync(i, env->sync)), 0);
@@ -10944,11 +10947,7 @@
 	int i;
 	struct cpumask search_cpus;
 
-	cpumask_and(&search_cpus, tsk_cpus_allowed(env->p), &c->cpus);
-	cpumask_andnot(&search_cpus, &search_cpus, cpu_isolated_mask);
-
-	if (env->ignore_prev_cpu)
-		cpumask_clear_cpu(env->prev_cpu, &search_cpus);
+	cpumask_and(&search_cpus, &env->search_cpus, &c->cpus);
 
 	env->need_idle = wake_to_idle(env->p) || c->wake_up_idle;
 
@@ -10960,7 +10959,7 @@
 			power_cost(i, task_load(env->p) +
 					cpu_cravg_sync(i, env->sync)), 0);
 
-		if (unlikely(!cpu_active(i)) || skip_cpu(i, env))
+		if (skip_cpu(i, env))
 			continue;
 
 		update_spare_capacity(stats, env, i, c->capacity,
@@ -11015,9 +11014,7 @@
 		return false;
 
 	prev_cpu = env->prev_cpu;
-	if (!cpumask_test_cpu(prev_cpu, tsk_cpus_allowed(task)) ||
-					unlikely(!cpu_active(prev_cpu)) ||
-					cpu_isolated(prev_cpu))
+	if (!cpumask_test_cpu(prev_cpu, &env->search_cpus))
 		return false;
 
 	if (task->ravg.mark_start - task->last_cpu_selected_ts >=
@@ -11050,7 +11047,7 @@
 			spill_threshold_crossed(env, cpu_rq(prev_cpu))) {
 		update_spare_capacity(stats, env, prev_cpu,
 				cluster->capacity, env->cpu_load);
-		env->ignore_prev_cpu = 1;
+		cpumask_clear_cpu(prev_cpu, &env->search_cpus);
 		return false;
 	}
 
@@ -11066,23 +11063,17 @@
 }
 
 static inline bool
-bias_to_waker_cpu(struct task_struct *p, int cpu)
+bias_to_waker_cpu(struct cpu_select_env *env, int cpu)
 {
 	return sysctl_sched_prefer_sync_wakee_to_waker &&
 	       cpu_rq(cpu)->nr_running == 1 &&
-	       cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) &&
-	       cpu_active(cpu) && !cpu_isolated(cpu);
+	       cpumask_test_cpu(cpu, &env->search_cpus);
 }
 
 static inline int
-cluster_allowed(struct task_struct *p, struct sched_cluster *cluster)
+cluster_allowed(struct cpu_select_env *env, struct sched_cluster *cluster)
 {
-	cpumask_t tmp_mask;
-
-	cpumask_and(&tmp_mask, &cluster->cpus, cpu_active_mask);
-	cpumask_and(&tmp_mask, &tmp_mask, &p->cpus_allowed);
-
-	return !cpumask_empty(&tmp_mask);
+	return cpumask_intersects(&env->search_cpus, &cluster->cpus);
 }
 
 /* return cheapest cpu that can fit this task */
@@ -11103,7 +11094,6 @@
 		.need_waker_cluster	= 0,
 		.sync			= sync,
 		.prev_cpu		= target,
-		.ignore_prev_cpu	= 0,
 		.rtg			= NULL,
 		.sbc_best_flag		= 0,
 		.sbc_best_cluster_flag	= 0,
@@ -11116,6 +11106,9 @@
 	bitmap_copy(env.candidate_list, all_cluster_ids, NR_CPUS);
 	bitmap_zero(env.backup_list, NR_CPUS);
 
+	cpumask_and(&env.search_cpus, tsk_cpus_allowed(p), cpu_active_mask);
+	cpumask_andnot(&env.search_cpus, &env.search_cpus, cpu_isolated_mask);
+
 	init_cluster_cpu_stats(&stats);
 	special = env_has_special_flags(&env);
 
@@ -11125,19 +11118,19 @@
 
 	if (grp && grp->preferred_cluster) {
 		pref_cluster = grp->preferred_cluster;
-		if (!cluster_allowed(p, pref_cluster))
+		if (!cluster_allowed(&env, pref_cluster))
 			clear_bit(pref_cluster->id, env.candidate_list);
 		else
 			env.rtg = grp;
 	} else if (!special) {
 		cluster = cpu_rq(cpu)->cluster;
 		if (wake_to_waker_cluster(&env)) {
-			if (bias_to_waker_cpu(p, cpu)) {
+			if (bias_to_waker_cpu(&env, cpu)) {
 				target = cpu;
 				sbc_flag = SBC_FLAG_WAKER_CLUSTER |
 					   SBC_FLAG_WAKER_CPU;
 				goto out;
-			} else if (cluster_allowed(p, cluster)) {
+			} else if (cluster_allowed(&env, cluster)) {
 				env.need_waker_cluster = 1;
 				bitmap_zero(env.candidate_list, NR_CPUS);
 				__set_bit(cluster->id, env.candidate_list);
@@ -11387,8 +11380,15 @@
 	nice = task_nice(p);
 	rcu_read_lock();
 	grp = task_related_thread_group(p);
+	/*
+	 * Don't assume higher capacity means higher power. If the task
+	 * is running on the power efficient CPU, avoid migrating it
+	 * to a lower capacity cluster.
+	 */
 	if (!grp && (nice > SCHED_UPMIGRATE_MIN_NICE ||
-	       upmigrate_discouraged(p)) && cpu_capacity(cpu) > min_capacity) {
+			upmigrate_discouraged(p)) &&
+			cpu_capacity(cpu) > min_capacity &&
+			cpu_max_power_cost(cpu) == max_power_cost) {
 		rcu_read_unlock();
 		return DOWN_MIGRATION;
 	}
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 0fbee29..4de373f 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -454,6 +454,12 @@
 	cluster1 = container_of(a, struct sched_cluster, list);
 	cluster2 = container_of(b, struct sched_cluster, list);
 
+	/*
+	 * Don't assume higher capacity means higher power. If the
+	 * power cost is same, sort the higher capacity cluster before
+	 * the lower capacity cluster to start placing the tasks
+	 * on the higher capacity cluster.
+	 */
 	ret = cluster1->max_power_cost > cluster2->max_power_cost ||
 		(cluster1->max_power_cost == cluster2->max_power_cost &&
 		cluster1->max_possible_capacity <
@@ -952,8 +958,8 @@
 unsigned int __read_mostly sysctl_sched_short_burst;
 unsigned int __read_mostly sysctl_sched_short_sleep = 1 * NSEC_PER_MSEC;
 
-static void
-_update_up_down_migrate(unsigned int *up_migrate, unsigned int *down_migrate)
+static void _update_up_down_migrate(unsigned int *up_migrate,
+			unsigned int *down_migrate, bool is_group)
 {
 	unsigned int delta;
 
@@ -967,7 +973,8 @@
 	*up_migrate >>= 10;
 	*up_migrate *= NSEC_PER_USEC;
 
-	*up_migrate = min(*up_migrate, sched_ravg_window);
+	if (!is_group)
+		*up_migrate = min(*up_migrate, sched_ravg_window);
 
 	*down_migrate /= NSEC_PER_USEC;
 	*down_migrate *= up_down_migrate_scale_factor;
@@ -982,14 +989,14 @@
 	unsigned int up_migrate = pct_to_real(sysctl_sched_upmigrate_pct);
 	unsigned int down_migrate = pct_to_real(sysctl_sched_downmigrate_pct);
 
-	_update_up_down_migrate(&up_migrate, &down_migrate);
+	_update_up_down_migrate(&up_migrate, &down_migrate, false);
 	sched_upmigrate = up_migrate;
 	sched_downmigrate = down_migrate;
 
 	up_migrate = pct_to_real(sysctl_sched_group_upmigrate_pct);
 	down_migrate = pct_to_real(sysctl_sched_group_downmigrate_pct);
 
-	_update_up_down_migrate(&up_migrate, &down_migrate);
+	_update_up_down_migrate(&up_migrate, &down_migrate, true);
 	sched_group_upmigrate = up_migrate;
 	sched_group_downmigrate = down_migrate;
 }
@@ -2571,7 +2578,8 @@
 	trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles, rq->cc.time);
 }
 
-static int account_busy_for_task_demand(struct task_struct *p, int event)
+static int
+account_busy_for_task_demand(struct rq *rq, struct task_struct *p, int event)
 {
 	/*
 	 * No need to bother updating task demand for exiting tasks
@@ -2590,6 +2598,17 @@
 			 (event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
 		return 0;
 
+	/*
+	 * TASK_UPDATE can be called on sleeping task, when its moved between
+	 * related groups
+	 */
+	if (event == TASK_UPDATE) {
+		if (rq->curr == p)
+			return 1;
+
+		return p->on_rq ? SCHED_ACCOUNT_WAIT_TIME : 0;
+	}
+
 	return 1;
 }
 
@@ -2730,7 +2749,7 @@
 	u64 runtime;
 
 	new_window = mark_start < window_start;
-	if (!account_busy_for_task_demand(p, event)) {
+	if (!account_busy_for_task_demand(rq, p, event)) {
 		if (new_window)
 			/*
 			 * If the time accounted isn't being accounted as
@@ -3161,6 +3180,13 @@
 		update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_ktime_clock(),
 				 0);
 
+		/*
+		 * Ensure that we don't report load for 'cpu' again via the
+		 * cpufreq_update_util path in the window that started at
+		 * rq->window_start
+		 */
+		rq->load_reported_window = rq->window_start;
+
 		account_load_subtractions(rq);
 		load[i] = rq->prev_runnable_sum;
 		nload[i] = rq->nt_prev_runnable_sum;
@@ -3591,6 +3617,11 @@
 
 	migrate_top_tasks(p, src_rq, dest_rq);
 
+	if (!same_freq_domain(new_cpu, task_cpu(p))) {
+		cpufreq_update_util(dest_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
+		cpufreq_update_util(src_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
+	}
+
 	if (p == src_rq->ed_task) {
 		src_rq->ed_task = NULL;
 		if (!dest_rq->ed_task)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index bcac711..709f719 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2410,10 +2410,9 @@
 #ifdef CONFIG_SMP
 		if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded)
 			queue_push_tasks(rq);
-#else
+#endif /* CONFIG_SMP */
 		if (p->prio < rq->curr->prio)
 			resched_curr(rq);
-#endif /* CONFIG_SMP */
 	}
 }
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index e7f6794..5e25011 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -79,6 +79,7 @@
 	u64 time;
 };
 
+extern unsigned int sched_disable_window_stats;
 #endif /* CONFIG_SCHED_HMP */
 
 
@@ -770,6 +771,7 @@
 
 	int cstate, wakeup_latency, wakeup_energy;
 	u64 window_start;
+	u64 load_reported_window;
 	unsigned long hmp_flags;
 
 	u64 cur_irqload;
@@ -2142,6 +2144,18 @@
 {
 	struct update_util_data *data;
 
+#ifdef CONFIG_SCHED_HMP
+	/*
+	 * Skip if we've already reported, but not if this is an inter-cluster
+	 * migration
+	 */
+	if (!sched_disable_window_stats &&
+		(rq->load_reported_window == rq->window_start) &&
+		!(flags & SCHED_CPUFREQ_INTERCLUSTER_MIG))
+		return;
+	rq->load_reported_window = rq->window_start;
+#endif
+
 	data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
 	if (data)
 		data->func(data, rq_clock(rq), flags);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 4274797..ed7ba6d 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -547,6 +547,19 @@
 
 	  If in doubt, say N.
 
+config CPU_FREQ_SWITCH_PROFILER
+	bool "CPU frequency switch time profiler"
+	select GENERIC_TRACER
+	help
+	  This option enables the CPU frequency switch profiler. A file is
+	  created in debugfs called "cpu_freq_switch_profile_enabled", which
+	  defaults to zero. When a 1 is echoed into this file, profiling begins.
+	  When a zero is echoed, profiling stops. A "cpu_freq_switch" file is
+	  also created in the trace_stats directory; this file shows the
+	  switches that have occurred and duration statistics.
+
+	  If in doubt, say N.
+
 config FTRACE_MCOUNT_RECORD
 	def_bool y
 	depends on DYNAMIC_FTRACE
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 08e5e47..8ee9cc1 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -38,6 +38,7 @@
 obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
 obj-$(CONFIG_HWLAT_TRACER) += trace_hwlat.o
+obj-$(CONFIG_CPU_FREQ_SWITCH_PROFILER) += trace_cpu_freq_switch.o
 obj-$(CONFIG_NOP_TRACER) += trace_nop.o
 obj-$(CONFIG_STACK_TRACER) += trace_stack.o
 obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
diff --git a/kernel/trace/ipc_logging.c b/kernel/trace/ipc_logging.c
index fa7fd14..6d310ab 100644
--- a/kernel/trace/ipc_logging.c
+++ b/kernel/trace/ipc_logging.c
@@ -515,8 +515,8 @@
 	tsv_qtimer_write(&ectxt);
 	avail_size = (MAX_MSG_SIZE - (ectxt.offset + hdr_size));
 	va_start(arg_list, fmt);
-	data_size = vsnprintf((ectxt.buff + ectxt.offset + hdr_size),
-			      avail_size, fmt, arg_list);
+	data_size = vscnprintf((ectxt.buff + ectxt.offset + hdr_size),
+				avail_size, fmt, arg_list);
 	va_end(arg_list);
 	tsv_write_header(&ectxt, TSV_TYPE_BYTE_ARRAY, data_size);
 	ectxt.offset += data_size;
diff --git a/kernel/trace/trace_cpu_freq_switch.c b/kernel/trace/trace_cpu_freq_switch.c
new file mode 100644
index 0000000..0fcfde3
--- /dev/null
+++ b/kernel/trace/trace_cpu_freq_switch.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2012, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/rbtree.h>
+#include <linux/hrtimer.h>
+#include <linux/tracefs.h>
+#include <linux/ktime.h>
+#include <trace/events/power.h>
+#include "trace_stat.h"
+#include "trace.h"
+
+struct trans {
+	struct rb_node node;
+	unsigned int cpu;
+	unsigned int start_freq;
+	unsigned int end_freq;
+	unsigned int min_us;
+	unsigned int max_us;
+	ktime_t total_t;
+	unsigned int count;
+};
+static struct rb_root freq_trans_tree = RB_ROOT;
+
+static struct trans *tr_search(struct rb_root *root, unsigned int cpu,
+			       unsigned int start_freq, unsigned int end_freq)
+{
+	struct rb_node *node = root->rb_node;
+
+	while (node) {
+		struct trans *tr = container_of(node, struct trans, node);
+
+		if (cpu < tr->cpu)
+			node = node->rb_left;
+		else if (cpu > tr->cpu)
+			node = node->rb_right;
+		else if (start_freq < tr->start_freq)
+			node = node->rb_left;
+		else if (start_freq > tr->start_freq)
+			node = node->rb_right;
+		else if (end_freq < tr->end_freq)
+			node = node->rb_left;
+		else if (end_freq > tr->end_freq)
+			node = node->rb_right;
+		else
+			return tr;
+	}
+	return NULL;
+}
+
+static int tr_insert(struct rb_root *root, struct trans *tr)
+{
+	struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+	while (*new) {
+		struct trans *this = container_of(*new, struct trans, node);
+
+		parent = *new;
+		if (tr->cpu < this->cpu)
+			new = &((*new)->rb_left);
+		else if (tr->cpu > this->cpu)
+			new = &((*new)->rb_right);
+		else if (tr->start_freq < this->start_freq)
+			new = &((*new)->rb_left);
+		else if (tr->start_freq > this->start_freq)
+			new = &((*new)->rb_right);
+		else if (tr->end_freq < this->end_freq)
+			new = &((*new)->rb_left);
+		else if (tr->end_freq > this->end_freq)
+			new = &((*new)->rb_right);
+		else
+			return -EINVAL;
+	}
+
+	rb_link_node(&tr->node, parent, new);
+	rb_insert_color(&tr->node, root);
+
+	return 0;
+}
+
+struct trans_state {
+	spinlock_t lock;
+	unsigned int start_freq;
+	unsigned int end_freq;
+	ktime_t start_t;
+	bool started;
+};
+static DEFINE_PER_CPU(struct trans_state, freq_trans_state);
+
+static DEFINE_SPINLOCK(state_lock);
+
+static void probe_start(void *ignore, unsigned int start_freq,
+			unsigned int end_freq, unsigned int cpu)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&state_lock, flags);
+	per_cpu(freq_trans_state, cpu).start_freq = start_freq;
+	per_cpu(freq_trans_state, cpu).end_freq = end_freq;
+	per_cpu(freq_trans_state, cpu).start_t = ktime_get();
+	per_cpu(freq_trans_state, cpu).started = true;
+	spin_unlock_irqrestore(&state_lock, flags);
+}
+
+static void probe_end(void *ignore, unsigned int cpu)
+{
+	unsigned long flags;
+	struct trans *tr;
+	s64 dur_us;
+	ktime_t dur_t, end_t = ktime_get();
+
+	spin_lock_irqsave(&state_lock, flags);
+
+	if (!per_cpu(freq_trans_state, cpu).started)
+		goto out;
+
+	dur_t = ktime_sub(end_t, per_cpu(freq_trans_state, cpu).start_t);
+	dur_us = ktime_to_us(dur_t);
+
+	tr = tr_search(&freq_trans_tree, cpu,
+		       per_cpu(freq_trans_state, cpu).start_freq,
+		       per_cpu(freq_trans_state, cpu).end_freq);
+	if (!tr) {
+		tr = kzalloc(sizeof(*tr), GFP_ATOMIC);
+		if (!tr) {
+			WARN_ONCE(1, "CPU frequency trace is now invalid!\n");
+			goto out;
+		}
+
+		tr->start_freq = per_cpu(freq_trans_state, cpu).start_freq;
+		tr->end_freq = per_cpu(freq_trans_state, cpu).end_freq;
+		tr->cpu = cpu;
+		tr->min_us = UINT_MAX;
+		tr_insert(&freq_trans_tree, tr);
+	}
+	tr->total_t = ktime_add(tr->total_t, dur_t);
+	tr->count++;
+
+	if (dur_us > tr->max_us)
+		tr->max_us = dur_us;
+	if (dur_us < tr->min_us)
+		tr->min_us = dur_us;
+
+	per_cpu(freq_trans_state, cpu).started = false;
+out:
+	spin_unlock_irqrestore(&state_lock, flags);
+}
+
+static void *freq_switch_stat_start(struct tracer_stat *trace)
+{
+	struct rb_node *n;
+	unsigned long flags;
+
+	spin_lock_irqsave(&state_lock, flags);
+	n = rb_first(&freq_trans_tree);
+	spin_unlock_irqrestore(&state_lock, flags);
+
+	return n;
+}
+
+static void *freq_switch_stat_next(void *prev, int idx)
+{
+	struct rb_node *n;
+	unsigned long flags;
+
+	spin_lock_irqsave(&state_lock, flags);
+	n = rb_next(prev);
+	spin_unlock_irqrestore(&state_lock, flags);
+
+	return n;
+}
+
+static int freq_switch_stat_show(struct seq_file *s, void *p)
+{
+	unsigned long flags;
+	struct trans *tr = p;
+
+	spin_lock_irqsave(&state_lock, flags);
+	seq_printf(s, "%3d %9d %8d %5d %6lld %6d %6d\n", tr->cpu,
+		   tr->start_freq, tr->end_freq, tr->count,
+		   div_s64(ktime_to_us(tr->total_t), tr->count),
+		   tr->min_us, tr->max_us);
+	spin_unlock_irqrestore(&state_lock, flags);
+
+	return 0;
+}
+
+static void freq_switch_stat_release(void *stat)
+{
+	struct trans *tr = stat;
+	unsigned long flags;
+
+	spin_lock_irqsave(&state_lock, flags);
+	rb_erase(&tr->node, &freq_trans_tree);
+	spin_unlock_irqrestore(&state_lock, flags);
+	kfree(tr);
+}
+
+static int freq_switch_stat_headers(struct seq_file *s)
+{
+	seq_puts(s, "CPU START_KHZ  END_KHZ COUNT AVG_US MIN_US MAX_US\n");
+	seq_puts(s, "  |         |        |     |      |      |      |\n");
+	return 0;
+}
+
+struct tracer_stat freq_switch_stats __read_mostly = {
+	.name = "cpu_freq_switch",
+	.stat_start = freq_switch_stat_start,
+	.stat_next = freq_switch_stat_next,
+	.stat_show = freq_switch_stat_show,
+	.stat_release = freq_switch_stat_release,
+	.stat_headers = freq_switch_stat_headers
+};
+
+static void trace_freq_switch_disable(void)
+{
+	unregister_stat_tracer(&freq_switch_stats);
+	unregister_trace_cpu_frequency_switch_end(probe_end, NULL);
+	unregister_trace_cpu_frequency_switch_start(probe_start, NULL);
+	pr_info("disabled cpu frequency switch time profiling\n");
+}
+
+static int trace_freq_switch_enable(void)
+{
+	int ret;
+
+	ret = register_trace_cpu_frequency_switch_start(probe_start, NULL);
+	if (ret)
+		goto out;
+
+	ret = register_trace_cpu_frequency_switch_end(probe_end, NULL);
+	if (ret)
+		goto err_register_switch_end;
+
+	ret = register_stat_tracer(&freq_switch_stats);
+	if (ret)
+		goto err_register_stat_tracer;
+
+	pr_info("enabled cpu frequency switch time profiling\n");
+	return 0;
+
+err_register_stat_tracer:
+	unregister_trace_cpu_frequency_switch_end(probe_end, NULL);
+err_register_switch_end:
+	register_trace_cpu_frequency_switch_start(probe_start, NULL);
+out:
+	pr_err("failed to enable cpu frequency switch time profiling\n");
+
+	return ret;
+}
+
+static DEFINE_MUTEX(debugfs_lock);
+static bool trace_freq_switch_enabled;
+
+static int debug_toggle_tracing(void *data, u64 val)
+{
+	int ret = 0;
+
+	mutex_lock(&debugfs_lock);
+
+	if (val == 1 && !trace_freq_switch_enabled)
+		ret = trace_freq_switch_enable();
+	else if (val == 0 && trace_freq_switch_enabled)
+		trace_freq_switch_disable();
+	else if (val > 1)
+		ret = -EINVAL;
+
+	if (!ret)
+		trace_freq_switch_enabled = val;
+
+	mutex_unlock(&debugfs_lock);
+
+	return ret;
+}
+
+static int debug_tracing_state_get(void *data, u64 *val)
+{
+	mutex_lock(&debugfs_lock);
+	*val = trace_freq_switch_enabled;
+	mutex_unlock(&debugfs_lock);
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debug_tracing_state_fops, debug_tracing_state_get,
+			debug_toggle_tracing, "%llu\n");
+
+static int __init trace_freq_switch_init(void)
+{
+	struct dentry *d_tracer = tracing_init_dentry();
+
+	if (IS_ERR(d_tracer))
+		return 0;
+
+	tracefs_create_file("cpu_freq_switch_profile_enabled",
+		0644, d_tracer, NULL, &debug_tracing_state_fops);
+
+	return 0;
+}
+late_initcall(trace_freq_switch_init);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 84c5076..7ae9b24 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2116,6 +2116,7 @@
 		       current->comm, preempt_count(), task_pid_nr(current),
 		       worker->current_func);
 		debug_show_held_locks(current);
+		BUG_ON(PANIC_CORRUPTION);
 		dump_stack();
 	}
 
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index af5e988..64ec3fd 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -257,6 +257,17 @@
 
 	  If unsure, say N.
 
+config PAGE_OWNER_ENABLE_DEFAULT
+	bool "Enable Track page owner by default"
+	depends on PAGE_OWNER
+	---help---
+	  This keeps track of what call chain is the owner of a page, may
+	  help to find bare alloc_page(s) leaks. If you include this
+	  feature on your build, it is enabled by default. You should pass
+	  "page_owner=off" to boot parameter in order to disable it. Eats
+	  a fair amount of memory if enabled. See tools/vm/page_owner_sort.c
+	  for user-space helper.
+
 config DEBUG_FS
 	bool "Debug Filesystem"
 	select SRCU
@@ -1006,6 +1017,15 @@
 	  (it defaults to deactivated on bootup and will only be activated
 	  if some application like powertop activates it explicitly).
 
+config DEBUG_TASK_STACK_SCAN_OFF
+	bool "Disable kmemleak task stack scan by default"
+	depends on DEBUG_KMEMLEAK
+	help
+	  Say Y here to disable kmemleak task stack scan by default
+	  at compile time. It can be enabled later if required by
+	  writing to the debugfs entry :
+	  echo "stack=on" > /sys/kernel/debug/kmemleak.
+
 config DEBUG_PREEMPT
 	bool "Debug preemptible kernel"
 	depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
@@ -2020,6 +2040,19 @@
 	        memtest=17, mean do 17 test patterns.
 	  If you are unsure how to answer this question, answer N.
 
+config MEMTEST_ENABLE_DEFAULT
+	int "Enable Memtest pattern test by default? (0-17)"
+	range 0 17
+	default "0"
+	depends on MEMTEST
+	help
+	  This option helps to select Memtest to be enabled through
+	  kernel defconfig options. Alternatively it can be enabled
+	  using memtest=<patterns> kernel command line.
+
+	  Default value is kept as "0" so that it is kept as disabled.
+	  To enable enter any value between 1-17 range.
+
 config TEST_STATIC_KEYS
 	tristate "Test static keys"
 	default n
@@ -2029,6 +2062,13 @@
 
 	  If unsure, say N.
 
+config PANIC_ON_DATA_CORRUPTION
+	bool "Cause a Kernel Panic When Data Corruption is detected"
+	help
+	 Select this option to upgrade warnings for potentially
+	 recoverable data corruption scenarios to system-halting panics,
+	 for easier detection and debug.
+
 source "samples/Kconfig"
 
 source "lib/Kconfig.kgdb"
diff --git a/lib/Makefile b/lib/Makefile
index e0eb131..6bde16d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -31,6 +31,8 @@
 lib-y	+= kobject.o klist.o
 obj-y	+= lockref.o
 
+KASAN_SANITIZE_find_bit.o := n
+
 obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
 	 bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
 	 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 3859bf6..7a5c1c0 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -11,6 +11,7 @@
 #include <linux/bug.h>
 #include <linux/kernel.h>
 #include <linux/rculist.h>
+#include <linux/bug.h>
 
 /*
  * Insert a new entry between two known consecutive entries.
@@ -34,6 +35,10 @@
 	WARN(new == prev || new == next,
 	     "list_add double add: new=%p, prev=%p, next=%p.\n",
 	     new, prev, next);
+
+	BUG_ON((prev->next != next || next->prev != prev ||
+		 new == prev || new == next) && PANIC_CORRUPTION);
+
 	next->prev = new;
 	new->next = next;
 	new->prev = prev;
@@ -58,9 +63,11 @@
 		"list_del corruption. prev->next should be %p, "
 		"but was %p\n", entry, prev->next) ||
 	    WARN(next->prev != entry,
-		"list_del corruption. next->prev should be %p, "
-		"but was %p\n", entry, next->prev))
+		"list_del corruption. next->prev should be %p, but was %p\n",
+		entry, next->prev)) {
+		BUG_ON(PANIC_CORRUPTION);
 		return;
+	}
 
 	__list_del(prev, next);
 }
diff --git a/mm/Kconfig b/mm/Kconfig
index 86e3e0e..0183305 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -656,6 +656,15 @@
 
 	  A sane initial value is 80 MB.
 
+config BALANCE_ANON_FILE_RECLAIM
+	bool "During reclaim treat anon and file backed pages equally"
+	depends on SWAP
+	help
+	  When performing memory reclaim treat anonymous and file backed pages
+	  equally.
+	  Swapping anonymous pages out to memory can be efficient enough to justify
+	  treating anonymous and file backed pages equally.
+
 # For architectures that support deferred memory initialisation
 config ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
 	bool
diff --git a/mm/cma.c b/mm/cma.c
index c960459..0306bab 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -134,6 +134,10 @@
 	spin_lock_init(&cma->mem_head_lock);
 #endif
 
+	if (!PageHighMem(pfn_to_page(cma->base_pfn)))
+		kmemleak_free_part(__va(cma->base_pfn << PAGE_SHIFT),
+				cma->count << PAGE_SHIFT);
+
 	return 0;
 
 err:
@@ -380,6 +384,8 @@
 	if (!count)
 		return NULL;
 
+	trace_cma_alloc_start(count, align);
+
 	mask = cma_bitmap_aligned_mask(cma, align);
 	offset = cma_bitmap_aligned_offset(cma, align);
 	bitmap_maxno = cma_bitmap_maxno(cma);
@@ -420,6 +426,8 @@
 
 		pr_debug("%s(): memory range at %p is busy, retrying\n",
 			 __func__, pfn_to_page(pfn));
+
+		trace_cma_alloc_busy_retry(pfn, pfn_to_page(pfn), count, align);
 		/* try again with a bit different memory target */
 		start = bitmap_no + mask + 1;
 	}
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index d1380ed..9a20a55 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -224,8 +224,20 @@
 static unsigned long jiffies_last_scan;
 /* delay between automatic memory scannings */
 static signed long jiffies_scan_wait;
-/* enables or disables the task stacks scanning */
+
+/*
+ * Enables or disables the task stacks scanning.
+ * Set to 1 if at compile time we want it enabled.
+ * Else set to 0 to have it disabled by default.
+ * This can be enabled by writing to "stack=on" using
+ * kmemleak debugfs entry.
+ */
+#ifdef CONFIG_DEBUG_TASK_STACK_SCAN_OFF
+static int kmemleak_stack_scan;
+#else
 static int kmemleak_stack_scan = 1;
+#endif
+
 /* protects the memory scanning, parameters and debug/kmemleak file access */
 static DEFINE_MUTEX(scan_mutex);
 /* setting kmemleak=on, will set this var, skipping the disable */
diff --git a/mm/ksm.c b/mm/ksm.c
index 9ae6011..56e92dc 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -37,6 +37,7 @@
 #include <linux/freezer.h>
 #include <linux/oom.h>
 #include <linux/numa.h>
+#include <linux/show_mem_notifier.h>
 
 #include <asm/tlbflush.h>
 #include "internal.h"
@@ -223,6 +224,9 @@
 /* Milliseconds ksmd should sleep between batches */
 static unsigned int ksm_thread_sleep_millisecs = 20;
 
+/* Boolean to indicate whether to use deferred timer or not */
+static bool use_deferred_timer;
+
 #ifdef CONFIG_NUMA
 /* Zeroed when merging across nodes is not allowed */
 static unsigned int ksm_merge_across_nodes = 1;
@@ -236,7 +240,7 @@
 #define KSM_RUN_MERGE	1
 #define KSM_RUN_UNMERGE	2
 #define KSM_RUN_OFFLINE	4
-static unsigned long ksm_run = KSM_RUN_STOP;
+static unsigned long ksm_run = KSM_RUN_MERGE;
 static void wait_while_offlining(void);
 
 static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
@@ -247,6 +251,20 @@
 		sizeof(struct __struct), __alignof__(struct __struct),\
 		(__flags), NULL)
 
+static int ksm_show_mem_notifier(struct notifier_block *nb,
+				unsigned long action,
+				void *data)
+{
+	pr_info("ksm_pages_sharing: %lu\n", ksm_pages_sharing);
+	pr_info("ksm_pages_shared: %lu\n", ksm_pages_shared);
+
+	return 0;
+}
+
+static struct notifier_block ksm_show_mem_notifier_block = {
+	.notifier_call = ksm_show_mem_notifier,
+};
+
 static int __init ksm_slab_init(void)
 {
 	rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
@@ -1705,6 +1723,41 @@
 	}
 }
 
+static void process_timeout(unsigned long __data)
+{
+	wake_up_process((struct task_struct *)__data);
+}
+
+static signed long __sched deferred_schedule_timeout(signed long timeout)
+{
+	struct timer_list timer;
+	unsigned long expire;
+
+	__set_current_state(TASK_INTERRUPTIBLE);
+	if (timeout < 0) {
+		pr_err("schedule_timeout: wrong timeout value %lx\n",
+							timeout);
+		__set_current_state(TASK_RUNNING);
+		goto out;
+	}
+
+	expire = timeout + jiffies;
+
+	setup_deferrable_timer_on_stack(&timer, process_timeout,
+			(unsigned long)current);
+	mod_timer(&timer, expire);
+	schedule();
+	del_singleshot_timer_sync(&timer);
+
+	/* Remove the timer from the object tracker */
+	destroy_timer_on_stack(&timer);
+
+	timeout = expire - jiffies;
+
+out:
+	return timeout < 0 ? 0 : timeout;
+}
+
 static int ksmd_should_run(void)
 {
 	return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list);
@@ -1725,7 +1778,11 @@
 		try_to_freeze();
 
 		if (ksmd_should_run()) {
-			schedule_timeout_interruptible(
+			if (use_deferred_timer)
+				deferred_schedule_timeout(
+				msecs_to_jiffies(ksm_thread_sleep_millisecs));
+			else
+				schedule_timeout_interruptible(
 				msecs_to_jiffies(ksm_thread_sleep_millisecs));
 		} else {
 			wait_event_freezable(ksm_thread_wait,
@@ -2175,6 +2232,26 @@
 }
 KSM_ATTR(run);
 
+static ssize_t deferred_timer_show(struct kobject *kobj,
+				    struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, 8, "%d\n", use_deferred_timer);
+}
+
+static ssize_t deferred_timer_store(struct kobject *kobj,
+				     struct kobj_attribute *attr,
+				     const char *buf, size_t count)
+{
+	unsigned long enable;
+	int err;
+
+	err = kstrtoul(buf, 10, &enable);
+	use_deferred_timer = enable;
+
+	return count;
+}
+KSM_ATTR(deferred_timer);
+
 #ifdef CONFIG_NUMA
 static ssize_t merge_across_nodes_show(struct kobject *kobj,
 				struct kobj_attribute *attr, char *buf)
@@ -2287,6 +2364,7 @@
 	&pages_unshared_attr.attr,
 	&pages_volatile_attr.attr,
 	&full_scans_attr.attr,
+	&deferred_timer_attr.attr,
 #ifdef CONFIG_NUMA
 	&merge_across_nodes_attr.attr,
 #endif
@@ -2331,6 +2409,8 @@
 	/* There is no significance to this priority 100 */
 	hotplug_memory_notifier(ksm_memory_callback, 100);
 #endif
+
+	show_mem_notifier_register(&ksm_show_mem_notifier_block);
 	return 0;
 
 out_free:
diff --git a/mm/memory.c b/mm/memory.c
index cbb1e5e..91e1653 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3050,7 +3050,7 @@
 }
 
 static unsigned long fault_around_bytes __read_mostly =
-	rounddown_pow_of_two(65536);
+	rounddown_pow_of_two(4096);
 
 #ifdef CONFIG_DEBUG_FS
 static int fault_around_bytes_get(void *data, u64 *val)
diff --git a/mm/memtest.c b/mm/memtest.c
index 8eaa4c3..15a423e 100644
--- a/mm/memtest.c
+++ b/mm/memtest.c
@@ -80,8 +80,8 @@
 }
 
 /* default is disabled */
-static unsigned int memtest_pattern __initdata;
-
+static unsigned int memtest_pattern __initdata =
+		CONFIG_MEMTEST_ENABLE_DEFAULT;
 static int __init parse_memtest(char *arg)
 {
 	int ret = 0;
diff --git a/mm/migrate.c b/mm/migrate.c
index 66ce6b4..f49de3cf 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1319,6 +1319,8 @@
 	int swapwrite = current->flags & PF_SWAPWRITE;
 	int rc;
 
+	trace_mm_migrate_pages_start(mode, reason);
+
 	if (!swapwrite)
 		current->flags |= PF_SWAPWRITE;
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b47fda0..f61724f4f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1742,9 +1742,9 @@
 	set_page_refcounted(page);
 
 	arch_alloc_page(page, order);
+	kasan_alloc_pages(page, order);
 	kernel_map_pages(page, 1 << order, 1);
 	kernel_poison_pages(page, 1 << order, 1);
-	kasan_alloc_pages(page, order);
 	set_page_owner(page, order, gfp_flags);
 }
 
@@ -2058,8 +2058,12 @@
  * potentially hurts the reliability of high-order allocations when under
  * intense memory pressure but failed atomic allocations should be easier
  * to recover from than an OOM.
+ *
+ * If @force is true, try to unreserve a pageblock even though highatomic
+ * pageblock is exhausted.
  */
-static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
+static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
+						bool force)
 {
 	struct zonelist *zonelist = ac->zonelist;
 	unsigned long flags;
@@ -2067,11 +2071,16 @@
 	struct zone *zone;
 	struct page *page;
 	int order;
+	bool ret;
 
 	for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
 								ac->nodemask) {
-		/* Preserve at least one pageblock */
-		if (zone->nr_reserved_highatomic <= pageblock_nr_pages)
+		/*
+		 * Preserve at least one pageblock unless memory pressure
+		 * is really high.
+		 */
+		if (!force && zone->nr_reserved_highatomic <=
+					pageblock_nr_pages)
 			continue;
 
 		spin_lock_irqsave(&zone->lock, flags);
@@ -2085,13 +2094,25 @@
 				continue;
 
 			/*
-			 * It should never happen but changes to locking could
-			 * inadvertently allow a per-cpu drain to add pages
-			 * to MIGRATE_HIGHATOMIC while unreserving so be safe
-			 * and watch for underflows.
+			 * In page freeing path, migratetype change is racy so
+			 * we can counter several free pages in a pageblock
+			 * in this loop althoug we changed the pageblock type
+			 * from highatomic to ac->migratetype. So we should
+			 * adjust the count once.
 			 */
-			zone->nr_reserved_highatomic -= min(pageblock_nr_pages,
-				zone->nr_reserved_highatomic);
+			if (get_pageblock_migratetype(page) ==
+							MIGRATE_HIGHATOMIC) {
+				/*
+				 * It should never happen but changes to
+				 * locking could inadvertently allow a per-cpu
+				 * drain to add pages to MIGRATE_HIGHATOMIC
+				 * while unreserving so be safe and watch for
+				 * underflows.
+				 */
+				zone->nr_reserved_highatomic -= min(
+						pageblock_nr_pages,
+						zone->nr_reserved_highatomic);
+			}
 
 			/*
 			 * Convert to ac->migratetype and avoid the normal
@@ -2103,12 +2124,16 @@
 			 * may increase.
 			 */
 			set_pageblock_migratetype(page, ac->migratetype);
-			move_freepages_block(zone, page, ac->migratetype);
-			spin_unlock_irqrestore(&zone->lock, flags);
-			return;
+			ret = move_freepages_block(zone, page, ac->migratetype);
+			if (ret) {
+				spin_unlock_irqrestore(&zone->lock, flags);
+				return ret;
+			}
 		}
 		spin_unlock_irqrestore(&zone->lock, flags);
 	}
+
+	return false;
 }
 
 /* Remove an element from the buddy allocator from the fallback list */
@@ -2133,7 +2158,8 @@
 
 		page = list_first_entry(&area->free_list[fallback_mt],
 						struct page, lru);
-		if (can_steal)
+		if (can_steal &&
+			get_pageblock_migratetype(page) != MIGRATE_HIGHATOMIC)
 			steal_suitable_fallback(zone, page, start_migratetype);
 
 		/* Remove the page from the freelists */
@@ -2542,7 +2568,8 @@
 		struct page *endpage = page + (1 << order) - 1;
 		for (; page < endpage; page += pageblock_nr_pages) {
 			int mt = get_pageblock_migratetype(page);
-			if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
+			if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
+				&& mt != MIGRATE_HIGHATOMIC)
 				set_pageblock_migratetype(page,
 							  MIGRATE_MOVABLE);
 		}
@@ -3313,7 +3340,7 @@
 	 * Shrink them them and try again
 	 */
 	if (!page && !drained) {
-		unreserve_highatomic_pageblock(ac);
+		unreserve_highatomic_pageblock(ac, false);
 		drain_all_pages(NULL);
 		drained = true;
 		goto retry;
@@ -3430,8 +3457,10 @@
 	 * Make sure we converge to OOM if we cannot make any progress
 	 * several times in the row.
 	 */
-	if (*no_progress_loops > MAX_RECLAIM_RETRIES)
-		return false;
+	if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
+		/* Before OOM, exhaust highatomic_reserve */
+		return unreserve_highatomic_pageblock(ac, true);
+	}
 
 	/*
 	 * Keep reclaiming pages while there is a chance this will lead
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 60634dc..d2db436 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -25,7 +25,8 @@
 	depot_stack_handle_t handle;
 };
 
-static bool page_owner_disabled = true;
+static bool page_owner_disabled =
+	!IS_ENABLED(CONFIG_PAGE_OWNER_ENABLE_DEFAULT);
 DEFINE_STATIC_KEY_FALSE(page_owner_inited);
 
 static depot_stack_handle_t dummy_handle;
@@ -41,6 +42,9 @@
 	if (strcmp(buf, "on") == 0)
 		page_owner_disabled = false;
 
+	if (strcmp(buf, "off") == 0)
+		page_owner_disabled = true;
+
 	return 0;
 }
 early_param("page_owner", early_page_owner_param);
diff --git a/mm/page_poison.c b/mm/page_poison.c
index 2e647c6..0abd75e 100644
--- a/mm/page_poison.c
+++ b/mm/page_poison.c
@@ -106,7 +106,8 @@
 	return error && !(error & (error - 1));
 }
 
-static void check_poison_mem(unsigned char *mem, size_t bytes)
+static void check_poison_mem(struct page *page,
+			     unsigned char *mem, size_t bytes)
 {
 	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
 	unsigned char *start;
@@ -127,12 +128,15 @@
 	if (!__ratelimit(&ratelimit))
 		return;
 	else if (start == end && single_bit_flip(*start, PAGE_POISON))
-		pr_err("pagealloc: single bit error\n");
+		pr_err("pagealloc: single bit error on page with phys start 0x%lx\n",
+			(unsigned long)page_to_phys(page));
 	else
-		pr_err("pagealloc: memory corruption\n");
+		pr_err("pagealloc: memory corruption on page with phys start 0x%lx\n",
+			(unsigned long)page_to_phys(page));
 
 	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
 			end - start + 1, 1);
+	BUG_ON(PANIC_CORRUPTION);
 	dump_stack();
 }
 
@@ -144,7 +148,7 @@
 		return;
 
 	addr = kmap_atomic(page);
-	check_poison_mem(addr, PAGE_SIZE);
+	check_poison_mem(page, addr, PAGE_SIZE);
 	clear_page_poison(page);
 	kunmap_atomic(addr);
 }
diff --git a/mm/percpu.c b/mm/percpu.c
index 2557143..f014ceb 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1010,8 +1010,11 @@
 		mutex_unlock(&pcpu_alloc_mutex);
 	}
 
-	if (chunk != pcpu_reserved_chunk)
+	if (chunk != pcpu_reserved_chunk) {
+		spin_lock_irqsave(&pcpu_lock, flags);
 		pcpu_nr_empty_pop_pages -= occ_pages;
+		spin_unlock_irqrestore(&pcpu_lock, flags);
+	}
 
 	if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
 		pcpu_schedule_balance_work();
diff --git a/mm/readahead.c b/mm/readahead.c
index c8a955b..7dc48ba 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -233,6 +233,8 @@
 
 /*
  * Set the initial window size, round to next power of 2 and square
+ * Small size is not dependent on max value - only a one-page read is regarded
+ * as small.
  * for small size, x 4 for medium, and x 2 for large
  * for 128k (32 page) max ra
  * 1-8 page = 32k initial, > 8 page = 128k initial
@@ -241,7 +243,7 @@
 {
 	unsigned long newsize = roundup_pow_of_two(size);
 
-	if (newsize <= max / 32)
+	if (newsize <= 1)
 		newsize = newsize * 4;
 	else if (newsize <= max / 4)
 		newsize = newsize * 2;
diff --git a/mm/slab.c b/mm/slab.c
index bd878f0..1f82d16 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2332,7 +2332,7 @@
 	return nr_freed;
 }
 
-int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
+int __kmem_cache_shrink(struct kmem_cache *cachep)
 {
 	int ret = 0;
 	int node;
@@ -2352,7 +2352,7 @@
 
 int __kmem_cache_shutdown(struct kmem_cache *cachep)
 {
-	return __kmem_cache_shrink(cachep, false);
+	return __kmem_cache_shrink(cachep);
 }
 
 void __kmem_cache_release(struct kmem_cache *cachep)
diff --git a/mm/slab.h b/mm/slab.h
index bc05fdc..ceb7d70 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -146,7 +146,7 @@
 
 int __kmem_cache_shutdown(struct kmem_cache *);
 void __kmem_cache_release(struct kmem_cache *);
-int __kmem_cache_shrink(struct kmem_cache *, bool);
+int __kmem_cache_shrink(struct kmem_cache *);
 void slab_kmem_cache_release(struct kmem_cache *);
 
 struct seq_file;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 329b038..5d2f24f 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -573,6 +573,29 @@
 	get_online_cpus();
 	get_online_mems();
 
+#ifdef CONFIG_SLUB
+	/*
+	 * In case of SLUB, we need to disable empty slab caching to
+	 * avoid pinning the offline memory cgroup by freeable kmem
+	 * pages charged to it. SLAB doesn't need this, as it
+	 * periodically purges unused slabs.
+	 */
+	mutex_lock(&slab_mutex);
+	list_for_each_entry(s, &slab_caches, list) {
+		c = is_root_cache(s) ? cache_from_memcg_idx(s, idx) : NULL;
+		if (c) {
+			c->cpu_partial = 0;
+			c->min_partial = 0;
+		}
+	}
+	mutex_unlock(&slab_mutex);
+	/*
+	 * kmem_cache->cpu_partial is checked locklessly (see
+	 * put_cpu_partial()). Make sure the change is visible.
+	 */
+	synchronize_sched();
+#endif
+
 	mutex_lock(&slab_mutex);
 	list_for_each_entry(s, &slab_caches, list) {
 		if (!is_root_cache(s))
@@ -584,7 +607,7 @@
 		if (!c)
 			continue;
 
-		__kmem_cache_shrink(c, true);
+		__kmem_cache_shrink(c);
 		arr->entries[idx] = NULL;
 	}
 	mutex_unlock(&slab_mutex);
@@ -755,7 +778,7 @@
 	get_online_cpus();
 	get_online_mems();
 	kasan_cache_shrink(cachep);
-	ret = __kmem_cache_shrink(cachep, false);
+	ret = __kmem_cache_shrink(cachep);
 	put_online_mems();
 	put_online_cpus();
 	return ret;
diff --git a/mm/slob.c b/mm/slob.c
index 5ec1580..eac04d4 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -634,7 +634,7 @@
 {
 }
 
-int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate)
+int __kmem_cache_shrink(struct kmem_cache *d)
 {
 	return 0;
 }
diff --git a/mm/slub.c b/mm/slub.c
index 7aa0e97..30be24b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -663,11 +663,21 @@
 	dump_stack();
 }
 
+#ifdef CONFIG_SLUB_DEBUG_PANIC_ON
+static void slab_panic(const char *cause)
+{
+	panic("%s\n", cause);
+}
+#else
+static inline void slab_panic(const char *cause) {}
+#endif
+
 void object_err(struct kmem_cache *s, struct page *page,
 			u8 *object, char *reason)
 {
 	slab_bug(s, "%s", reason);
 	print_trailer(s, page, object);
+	slab_panic(reason);
 }
 
 static void slab_err(struct kmem_cache *s, struct page *page,
@@ -682,6 +692,7 @@
 	slab_bug(s, "%s", buf);
 	print_page_info(page);
 	dump_stack();
+	slab_panic("slab error");
 }
 
 static void init_object(struct kmem_cache *s, void *object, u8 val)
@@ -703,6 +714,7 @@
 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
 						void *from, void *to)
 {
+	slab_panic("object poison overwritten");
 	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
 	memset(from, data, to - from);
 }
@@ -1661,6 +1673,7 @@
 	if (current->reclaim_state)
 		current->reclaim_state->reclaimed_slab += pages;
 	memcg_uncharge_slab(page, order, s);
+	kasan_alloc_pages(page, order);
 	__free_pages(page, order);
 }
 
@@ -3869,6 +3882,7 @@
 	if (unlikely(!PageSlab(page))) {
 		BUG_ON(!PageCompound(page));
 		kfree_hook(x);
+		kasan_alloc_pages(page, compound_order(page));
 		__free_pages(page, compound_order(page));
 		return;
 	}
@@ -3887,7 +3901,7 @@
  * being allocated from last increasing the chance that the last objects
  * are freed in them.
  */
-int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
+int __kmem_cache_shrink(struct kmem_cache *s)
 {
 	int node;
 	int i;
@@ -3899,21 +3913,6 @@
 	unsigned long flags;
 	int ret = 0;
 
-	if (deactivate) {
-		/*
-		 * Disable empty slabs caching. Used to avoid pinning offline
-		 * memory cgroups by kmem pages that can be freed.
-		 */
-		s->cpu_partial = 0;
-		s->min_partial = 0;
-
-		/*
-		 * s->cpu_partial is checked locklessly (see put_cpu_partial),
-		 * so we have to make sure the change is visible.
-		 */
-		synchronize_sched();
-	}
-
 	flush_all(s);
 	for_each_kmem_cache_node(s, node, n) {
 		INIT_LIST_HEAD(&discard);
@@ -3970,7 +3969,7 @@
 
 	mutex_lock(&slab_mutex);
 	list_for_each_entry(s, &slab_caches, list)
-		__kmem_cache_shrink(s, false);
+		__kmem_cache_shrink(s);
 	mutex_unlock(&slab_mutex);
 
 	return 0;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 30a88b9..9d3f6d3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -319,6 +319,10 @@
 	long batch_size = shrinker->batch ? shrinker->batch
 					  : SHRINK_BATCH;
 	long scanned = 0, next_deferred;
+	long min_cache_size = batch_size;
+
+	if (current_is_kswapd())
+		min_cache_size = 0;
 
 	freeable = shrinker->count_objects(shrinker, shrinkctl);
 	if (freeable == 0)
@@ -386,7 +390,7 @@
 	 * scanning at high prio and therefore should try to reclaim as much as
 	 * possible.
 	 */
-	while (total_scan >= batch_size ||
+	while (total_scan > min_cache_size ||
 	       total_scan >= freeable) {
 		unsigned long ret;
 		unsigned long nr_to_scan = min(batch_size, total_scan);
@@ -2204,7 +2208,8 @@
 	 * lruvec even if it has plenty of old anonymous pages unless the
 	 * system is under heavy pressure.
 	 */
-	if (!inactive_list_is_low(lruvec, true, sc) &&
+	if (!IS_ENABLED(CONFIG_BALANCE_ANON_FILE_RECLAIM) &&
+	    !inactive_list_is_low(lruvec, true, sc) &&
 	    lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) {
 		scan_balance = SCAN_FILE;
 		goto out;
diff --git a/net/Kconfig b/net/Kconfig
index cd20118..d5ff4f7 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -321,6 +321,15 @@
 	  with many clients some protection against DoS by a single (spoofed)
 	  flow that greatly exceeds average workload.
 
+config SOCKEV_NLMCAST
+	bool "Enable SOCKEV Netlink Multicast"
+	default n
+	---help---
+	  Default client for SOCKEV notifier events. Sends multicast netlink
+	  messages whenever the socket event notifier is invoked. Enable if
+	  user space entities need to be notified of socket events without
+	  having to poll /proc
+
 menu "Network testing"
 
 config NET_PKTGEN
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 7cb41ae..8498e35 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -186,8 +186,9 @@
 		/* Do not flood unicast traffic to ports that turn it off */
 		if (pkt_type == BR_PKT_UNICAST && !(p->flags & BR_FLOOD))
 			continue;
+		/* Do not flood if mc off, except for traffic we originate */
 		if (pkt_type == BR_PKT_MULTICAST &&
-		    !(p->flags & BR_MCAST_FLOOD))
+		    !(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev)
 			continue;
 
 		/* Do not flood to ports that enable proxy ARP */
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 855b72f..267b46a 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -29,6 +29,7 @@
 static int
 br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+	br_drop_fake_rtable(skb);
 	return netif_receive_skb(skb);
 }
 
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 7fbdbae..aa1df1a 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -521,21 +521,6 @@
 }
 
 
-/* PF_BRIDGE/LOCAL_IN ************************************************/
-/* The packet is locally destined, which requires a real
- * dst_entry, so detach the fake one.  On the way up, the
- * packet would pass through PRE_ROUTING again (which already
- * took place when the packet entered the bridge), but we
- * register an IPv4 PRE_ROUTING 'sabotage' hook that will
- * prevent this from happening. */
-static unsigned int br_nf_local_in(void *priv,
-				   struct sk_buff *skb,
-				   const struct nf_hook_state *state)
-{
-	br_drop_fake_rtable(skb);
-	return NF_ACCEPT;
-}
-
 /* PF_BRIDGE/FORWARD *************************************************/
 static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
@@ -906,12 +891,6 @@
 		.priority = NF_BR_PRI_BRNF,
 	},
 	{
-		.hook = br_nf_local_in,
-		.pf = NFPROTO_BRIDGE,
-		.hooknum = NF_BR_LOCAL_IN,
-		.priority = NF_BR_PRI_BRNF,
-	},
-	{
 		.hook = br_nf_forward_ip,
 		.pf = NFPROTO_BRIDGE,
 		.hooknum = NF_BR_FORWARD,
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index d243688..d3f6c26 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -1334,7 +1334,6 @@
 		if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
 		    (xorstate & CEPH_OSD_EXISTS)) {
 			pr_info("osd%d does not exist\n", osd);
-			map->osd_weight[osd] = CEPH_OSD_IN;
 			ret = set_primary_affinity(map, osd,
 						   CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
 			if (ret)
diff --git a/net/core/Makefile b/net/core/Makefile
index d6508c2..77bb89b 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -24,6 +24,7 @@
 obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o
 obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
 obj-$(CONFIG_LWTUNNEL) += lwtunnel.o
+obj-$(CONFIG_SOCKEV_NLMCAST) += sockev_nlmcast.o
 obj-$(CONFIG_DST_CACHE) += dst_cache.o
 obj-$(CONFIG_HWBM) += hwbm.o
 obj-$(CONFIG_NET_DEVLINK) += devlink.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 555ed4b..dff8012 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1697,27 +1697,54 @@
 static struct static_key netstamp_needed __read_mostly;
 #ifdef HAVE_JUMP_LABEL
 static atomic_t netstamp_needed_deferred;
+static atomic_t netstamp_wanted;
 static void netstamp_clear(struct work_struct *work)
 {
 	int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
+	int wanted;
 
-	while (deferred--)
-		static_key_slow_dec(&netstamp_needed);
+	wanted = atomic_add_return(deferred, &netstamp_wanted);
+	if (wanted > 0)
+		static_key_enable(&netstamp_needed);
+	else
+		static_key_disable(&netstamp_needed);
 }
 static DECLARE_WORK(netstamp_work, netstamp_clear);
 #endif
 
 void net_enable_timestamp(void)
 {
+#ifdef HAVE_JUMP_LABEL
+	int wanted;
+
+	while (1) {
+		wanted = atomic_read(&netstamp_wanted);
+		if (wanted <= 0)
+			break;
+		if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
+			return;
+	}
+	atomic_inc(&netstamp_needed_deferred);
+	schedule_work(&netstamp_work);
+#else
 	static_key_slow_inc(&netstamp_needed);
+#endif
 }
 EXPORT_SYMBOL(net_enable_timestamp);
 
 void net_disable_timestamp(void)
 {
 #ifdef HAVE_JUMP_LABEL
-	/* net_disable_timestamp() can be called from non process context */
-	atomic_inc(&netstamp_needed_deferred);
+	int wanted;
+
+	while (1) {
+		wanted = atomic_read(&netstamp_wanted);
+		if (wanted <= 1)
+			break;
+		if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
+			return;
+	}
+	atomic_dec(&netstamp_needed_deferred);
 	schedule_work(&netstamp_work);
 #else
 	static_key_slow_dec(&netstamp_needed);
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 11fce17..46e8830 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -69,27 +69,17 @@
 	return 0;
 }
 
-static void update_classid(struct cgroup_subsys_state *css, void *v)
-{
-	struct css_task_iter it;
-	struct task_struct *p;
-
-	css_task_iter_start(css, &it);
-	while ((p = css_task_iter_next(&it))) {
-		task_lock(p);
-		iterate_fd(p->files, 0, update_classid_sock, v);
-		task_unlock(p);
-	}
-	css_task_iter_end(&it);
-}
-
 static void cgrp_attach(struct cgroup_taskset *tset)
 {
 	struct cgroup_subsys_state *css;
+	struct task_struct *p;
 
-	cgroup_taskset_first(tset, &css);
-	update_classid(css,
-		       (void *)(unsigned long)css_cls_state(css)->classid);
+	cgroup_taskset_for_each(p, css, tset) {
+		task_lock(p);
+		iterate_fd(p->files, 0, update_classid_sock,
+			   (void *)(unsigned long)css_cls_state(css)->classid);
+		task_unlock(p);
+	}
 }
 
 static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
@@ -101,12 +91,22 @@
 			 u64 value)
 {
 	struct cgroup_cls_state *cs = css_cls_state(css);
+	struct css_task_iter it;
+	struct task_struct *p;
 
 	cgroup_sk_alloc_disable();
 
 	cs->classid = (u32)value;
 
-	update_classid(css, (void *)(unsigned long)cs->classid);
+	css_task_iter_start(css, &it);
+	while ((p = css_task_iter_next(&it))) {
+		task_lock(p);
+		iterate_fd(p->files, 0, update_classid_sock,
+			   (void *)(unsigned long)cs->classid);
+		task_unlock(p);
+	}
+	css_task_iter_end(&it);
+
 	return 0;
 }
 
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 1e3e008..f0f462c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3814,13 +3814,14 @@
 	if (!skb_may_tx_timestamp(sk, false))
 		return;
 
-	/* take a reference to prevent skb_orphan() from freeing the socket */
-	sock_hold(sk);
-
-	*skb_hwtstamps(skb) = *hwtstamps;
-	__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
-
-	sock_put(sk);
+	/* Take a reference to prevent skb_orphan() from freeing the socket,
+	 * but only if the socket refcount is not zero.
+	 */
+	if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
+		*skb_hwtstamps(skb) = *hwtstamps;
+		__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
+		sock_put(sk);
+	}
 }
 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
 
@@ -3871,7 +3872,7 @@
 {
 	struct sock *sk = skb->sk;
 	struct sock_exterr_skb *serr;
-	int err;
+	int err = 1;
 
 	skb->wifi_acked_valid = 1;
 	skb->wifi_acked = acked;
@@ -3881,14 +3882,15 @@
 	serr->ee.ee_errno = ENOMSG;
 	serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
 
-	/* take a reference to prevent skb_orphan() from freeing the socket */
-	sock_hold(sk);
-
-	err = sock_queue_err_skb(sk, skb);
+	/* Take a reference to prevent skb_orphan() from freeing the socket,
+	 * but only if the socket refcount is not zero.
+	 */
+	if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
+		err = sock_queue_err_skb(sk, skb);
+		sock_put(sk);
+	}
 	if (err)
 		kfree_skb(skb);
-
-	sock_put(sk);
 }
 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
 
diff --git a/net/core/sock.c b/net/core/sock.c
index 87a740b..19562f7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1437,6 +1437,11 @@
 		pr_debug("%s: optmem leakage (%d bytes) detected\n",
 			 __func__, atomic_read(&sk->sk_omem_alloc));
 
+	if (sk->sk_frag.page) {
+		put_page(sk->sk_frag.page);
+		sk->sk_frag.page = NULL;
+	}
+
 	if (sk->sk_peer_cred)
 		put_cred(sk->sk_peer_cred);
 	put_pid(sk->sk_peer_pid);
@@ -1533,6 +1538,12 @@
 			is_charged = sk_filter_charge(newsk, filter);
 
 		if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
+			/* We need to make sure that we don't uncharge the new
+			 * socket if we couldn't charge it in the first place
+			 * as otherwise we uncharge the parent's filter.
+			 */
+			if (!is_charged)
+				RCU_INIT_POINTER(newsk->sk_filter, NULL);
 			/* It is still raw copy of parent, so invalidate
 			 * destructor and make plain sk_free() */
 			newsk->sk_destruct = NULL;
@@ -2741,11 +2752,6 @@
 
 	sk_refcnt_debug_release(sk);
 
-	if (sk->sk_frag.page) {
-		put_page(sk->sk_frag.page);
-		sk->sk_frag.page = NULL;
-	}
-
 	sock_put(sk);
 }
 EXPORT_SYMBOL(sk_common_release);
diff --git a/net/core/sockev_nlmcast.c b/net/core/sockev_nlmcast.c
new file mode 100644
index 0000000..04f61fc
--- /dev/null
+++ b/net/core/sockev_nlmcast.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2014-2015, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Default SOCKEV client implementation
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/netlink.h>
+#include <linux/sockev.h>
+#include <net/sock.h>
+
+static int registration_status;
+static struct sock *socknlmsgsk;
+
+static void sockev_skmsg_recv(struct sk_buff *skb)
+{
+	pr_debug("%s(): Got unsolicited request\n", __func__);
+}
+
+static struct netlink_kernel_cfg nlcfg = {
+	.input = sockev_skmsg_recv
+};
+
+static void _sockev_event(unsigned long event, __u8 *evstr, int buflen)
+{
+	switch (event) {
+	case SOCKEV_SOCKET:
+		strlcpy(evstr, "SOCKEV_SOCKET", buflen);
+		break;
+	case SOCKEV_BIND:
+		strlcpy(evstr, "SOCKEV_BIND", buflen);
+		break;
+	case SOCKEV_LISTEN:
+		strlcpy(evstr, "SOCKEV_LISTEN", buflen);
+		break;
+	case SOCKEV_ACCEPT:
+		strlcpy(evstr, "SOCKEV_ACCEPT", buflen);
+		break;
+	case SOCKEV_CONNECT:
+		strlcpy(evstr, "SOCKEV_CONNECT", buflen);
+		break;
+	case SOCKEV_SHUTDOWN:
+		strlcpy(evstr, "SOCKEV_SHUTDOWN", buflen);
+		break;
+	default:
+		strlcpy(evstr, "UNKNOWN", buflen);
+	}
+}
+
+static int sockev_client_cb(struct notifier_block *nb,
+			    unsigned long event, void *data)
+{
+	struct sk_buff *skb;
+	struct nlmsghdr *nlh;
+	struct sknlsockevmsg *smsg;
+	struct socket *sock;
+
+	sock = (struct socket *)data;
+	if (socknlmsgsk == 0)
+		goto done;
+	if ((!socknlmsgsk) || (!sock) || (!sock->sk))
+		goto done;
+
+	if (sock->sk->sk_family != AF_INET && sock->sk->sk_family != AF_INET6)
+		goto done;
+
+	if (event != SOCKEV_BIND && event != SOCKEV_LISTEN)
+		goto done;
+
+	skb = nlmsg_new(sizeof(struct sknlsockevmsg), GFP_KERNEL);
+	if (!skb)
+		goto done;
+
+	nlh = nlmsg_put(skb, 0, 0, event, sizeof(struct sknlsockevmsg), 0);
+	if (!nlh) {
+		kfree_skb(skb);
+		goto done;
+	}
+
+	NETLINK_CB(skb).dst_group = SKNLGRP_SOCKEV;
+
+	smsg = nlmsg_data(nlh);
+	smsg->pid = current->pid;
+	_sockev_event(event, smsg->event, sizeof(smsg->event));
+	smsg->skfamily = sock->sk->sk_family;
+	smsg->skstate = sock->sk->sk_state;
+	smsg->skprotocol = sock->sk->sk_protocol;
+	smsg->sktype = sock->sk->sk_type;
+	smsg->skflags = sock->sk->sk_flags;
+
+	nlmsg_notify(socknlmsgsk, skb, 0, SKNLGRP_SOCKEV, 0, GFP_KERNEL);
+done:
+	return 0;
+}
+
+static struct notifier_block sockev_notifier_client = {
+	.notifier_call = sockev_client_cb,
+	.next = 0,
+	.priority = 0
+};
+
+/* ***************** Startup/Shutdown *************************************** */
+
+static int __init sockev_client_init(void)
+{
+	int rc;
+
+	registration_status = 1;
+	rc = sockev_register_notify(&sockev_notifier_client);
+	if (rc != 0) {
+		registration_status = 0;
+		pr_err("%s(): Failed to register cb (%d)\n", __func__, rc);
+	}
+	socknlmsgsk = netlink_kernel_create(&init_net, NETLINK_SOCKEV, &nlcfg);
+	if (!socknlmsgsk) {
+		pr_err("%s(): Failed to initialize netlink socket\n", __func__);
+		if (registration_status)
+			sockev_unregister_notify(&sockev_notifier_client);
+		registration_status = 0;
+	}
+
+	return rc;
+}
+
+static void __exit sockev_client_exit(void)
+{
+	if (registration_status)
+		sockev_unregister_notify(&sockev_notifier_client);
+}
+
+module_init(sockev_client_init)
+module_exit(sockev_client_exit)
+MODULE_LICENSE("GPL v2");
+
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index f053198..5e3a730 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -749,6 +749,7 @@
 	for (i = 0; i < hc->tx_seqbufc; i++)
 		kfree(hc->tx_seqbuf[i]);
 	hc->tx_seqbufc = 0;
+	dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
 }
 
 static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 8fedc2d..4a05d78 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -577,6 +577,7 @@
 	struct dccp_sock *dp = dccp_sk(sk);
 	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
 	const int old_state = sk->sk_state;
+	bool acceptable;
 	int queued = 0;
 
 	/*
@@ -603,8 +604,13 @@
 	 */
 	if (sk->sk_state == DCCP_LISTEN) {
 		if (dh->dccph_type == DCCP_PKT_REQUEST) {
-			if (inet_csk(sk)->icsk_af_ops->conn_request(sk,
-								    skb) < 0)
+			/* It is possible that we process SYN packets from backlog,
+			 * so we need to make sure to disable BH right there.
+			 */
+			local_bh_disable();
+			acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
+			local_bh_enable();
+			if (!acceptable)
 				return 1;
 			consume_skb(skb);
 			return 0;
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index edbe59d..86b0933 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -289,7 +289,8 @@
 
 	switch (type) {
 	case ICMP_REDIRECT:
-		dccp_do_redirect(skb, sk);
+		if (!sock_owned_by_user(sk))
+			dccp_do_redirect(skb, sk);
 		goto out;
 	case ICMP_SOURCE_QUENCH:
 		/* Just silently ignore these. */
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 7506c03..237d62c 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -122,10 +122,12 @@
 	np = inet6_sk(sk);
 
 	if (type == NDISC_REDIRECT) {
-		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
+		if (!sock_owned_by_user(sk)) {
+			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
 
-		if (dst)
-			dst->ops->redirect(dst, sk, skb);
+			if (dst)
+				dst->ops->redirect(dst, sk, skb);
+		}
 		goto out;
 	}
 
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 53eddf9..39e7e2b 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -122,6 +122,7 @@
 			/* It is still raw copy of parent, so invalidate
 			 * destructor and make plain sk_free() */
 			newsk->sk_destruct = NULL;
+			bh_unlock_sock(newsk);
 			sk_free(newsk);
 			return NULL;
 		}
@@ -145,6 +146,13 @@
 	struct dccp_request_sock *dreq = dccp_rsk(req);
 	bool own_req;
 
+	/* TCP/DCCP listeners became lockless.
+	 * DCCP stores complex state in its request_sock, so we need
+	 * a protection for them, now this code runs without being protected
+	 * by the parent (listener) lock.
+	 */
+	spin_lock_bh(&dreq->dreq_lock);
+
 	/* Check for retransmitted REQUEST */
 	if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
 
@@ -159,7 +167,7 @@
 			inet_rtx_syn_ack(sk, req);
 		}
 		/* Network Duplicate, discard packet */
-		return NULL;
+		goto out;
 	}
 
 	DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
@@ -185,20 +193,20 @@
 
 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
 							 req, &own_req);
-	if (!child)
-		goto listen_overflow;
+	if (child) {
+		child = inet_csk_complete_hashdance(sk, child, req, own_req);
+		goto out;
+	}
 
-	return inet_csk_complete_hashdance(sk, child, req, own_req);
-
-listen_overflow:
-	dccp_pr_debug("listen_overflow!\n");
 	DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
 drop:
 	if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
 		req->rsk_ops->send_reset(sk, skb);
 
 	inet_csk_reqsk_queue_drop(sk, req);
-	return NULL;
+out:
+	spin_unlock_bh(&dreq->dreq_lock);
+	return child;
 }
 
 EXPORT_SYMBOL_GPL(dccp_check_req);
@@ -249,6 +257,7 @@
 {
 	struct dccp_request_sock *dreq = dccp_rsk(req);
 
+	spin_lock_init(&dreq->dreq_lock);
 	inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport;
 	inet_rsk(req)->ir_num	   = ntohs(dccp_hdr(skb)->dccph_dport);
 	inet_rsk(req)->acked	   = 0;
diff --git a/net/ipc_router/ipc_router_core.c b/net/ipc_router/ipc_router_core.c
index cdf372f..e057887 100644
--- a/net/ipc_router/ipc_router_core.c
+++ b/net/ipc_router/ipc_router_core.c
@@ -2798,6 +2798,9 @@
 	if (!port_ptr || !name)
 		return -EINVAL;
 
+	if (port_ptr->type != CLIENT_PORT)
+		return -EINVAL;
+
 	if (name->addrtype != MSM_IPC_ADDR_NAME)
 		return -EINVAL;
 
diff --git a/net/ipc_router/ipc_router_socket.c b/net/ipc_router/ipc_router_socket.c
index a84fc11..02242a1 100644
--- a/net/ipc_router/ipc_router_socket.c
+++ b/net/ipc_router/ipc_router_socket.c
@@ -543,10 +543,18 @@
 static int msm_ipc_router_close(struct socket *sock)
 {
 	struct sock *sk = sock->sk;
-	struct msm_ipc_port *port_ptr = msm_ipc_sk_port(sk);
+	struct msm_ipc_port *port_ptr;
 	int ret;
 
+	if (!sk)
+		return -EINVAL;
+
 	lock_sock(sk);
+	port_ptr = msm_ipc_sk_port(sk);
+	if (!port_ptr) {
+		release_sock(sk);
+		return -EINVAL;
+	}
 	ret = msm_ipc_router_close_port(port_ptr);
 	msm_ipc_unload_default_node(msm_ipc_sk(sk)->default_node_vote_info);
 	release_sock(sk);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index c836bfe..8bc6c4e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1475,8 +1475,10 @@
 	int proto = iph->protocol;
 	int err = -ENOSYS;
 
-	if (skb->encapsulation)
+	if (skb->encapsulation) {
+		skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
 		skb_set_inner_network_header(skb, nhoff);
+	}
 
 	csum_replace2(&iph->check, iph->tot_len, newlen);
 	iph->tot_len = newlen;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 18412f9..98fd2f7 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1082,7 +1082,8 @@
 
 	net = sock_net(skb->sk);
 	nlh = nlmsg_hdr(skb);
-	if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len ||
+	if (skb->len < nlmsg_total_size(sizeof(*frn)) ||
+	    skb->len < nlh->nlmsg_len ||
 	    nlmsg_len(nlh) < sizeof(*frn))
 		return;
 
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 5ba912d..873df83 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1973,6 +1973,7 @@
 {
 	int res;
 
+	tos &= IPTOS_RT_MASK;
 	rcu_read_lock();
 
 	/* Multicast recognition logic is moved from route cache to here.
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 7fb6704..e074816 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5572,6 +5572,7 @@
 	struct inet_connection_sock *icsk = inet_csk(sk);
 
 	tcp_set_state(sk, TCP_ESTABLISHED);
+	icsk->icsk_ack.lrcvtime = tcp_time_stamp;
 
 	if (skb) {
 		icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
@@ -5790,7 +5791,6 @@
 			 * to stand against the temptation 8)     --ANK
 			 */
 			inet_csk_schedule_ack(sk);
-			icsk->icsk_ack.lrcvtime = tcp_time_stamp;
 			tcp_enter_quickack_mode(sk);
 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
 						  TCP_DELACK_MAX, TCP_RTO_MAX);
@@ -5917,9 +5917,15 @@
 		if (th->syn) {
 			if (th->fin)
 				goto discard;
-			if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
-				return 1;
+			/* It is possible that we process SYN packets from backlog,
+			 * so we need to make sure to disable BH right there.
+			 */
+			local_bh_disable();
+			acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
+			local_bh_enable();
 
+			if (!acceptable)
+				return 1;
 			consume_skb(skb);
 			return 0;
 		}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index eb5a0e1..eca1433 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -269,10 +269,13 @@
  */
 void tcp_v4_mtu_reduced(struct sock *sk)
 {
-	struct dst_entry *dst;
 	struct inet_sock *inet = inet_sk(sk);
-	u32 mtu = tcp_sk(sk)->mtu_info;
+	struct dst_entry *dst;
+	u32 mtu;
 
+	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
+		return;
+	mtu = tcp_sk(sk)->mtu_info;
 	dst = inet_csk_update_pmtu(sk, mtu);
 	if (!dst)
 		return;
@@ -418,7 +421,8 @@
 
 	switch (type) {
 	case ICMP_REDIRECT:
-		do_redirect(icmp_skb, sk);
+		if (!sock_owned_by_user(sk))
+			do_redirect(icmp_skb, sk);
 		goto out;
 	case ICMP_SOURCE_QUENCH:
 		/* Just silently ignore these. */
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 6234eba..8615a6b 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -466,6 +466,7 @@
 		newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
 		minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U);
 		newicsk->icsk_rto = TCP_TIMEOUT_INIT;
+		newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
 
 		newtp->packets_out = 0;
 		newtp->retrans_out = 0;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 3ea1cf8..b1e65b3 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -249,7 +249,8 @@
 
 	sk_mem_reclaim_partial(sk);
 
-	if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
+	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
+	    !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
 		goto out;
 
 	if (time_after(icsk->icsk_ack.timeout, jiffies)) {
@@ -552,7 +553,8 @@
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	int event;
 
-	if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
+	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
+	    !icsk->icsk_pending)
 		goto out;
 
 	if (time_after(icsk->icsk_timeout, jiffies)) {
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 965ca86..0cfb91f 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -223,6 +223,7 @@
 	.accept_ra_rtr_pref	= 1,
 	.rtr_probe_interval	= 60 * HZ,
 #ifdef CONFIG_IPV6_ROUTE_INFO
+	.accept_ra_rt_info_min_plen = 0,
 	.accept_ra_rt_info_max_plen = 0,
 #endif
 #endif
@@ -270,6 +271,7 @@
 	.accept_ra_rtr_pref	= 1,
 	.rtr_probe_interval	= 60 * HZ,
 #ifdef CONFIG_IPV6_ROUTE_INFO
+	.accept_ra_rt_info_min_plen = 0,
 	.accept_ra_rt_info_max_plen = 0,
 #endif
 #endif
@@ -4963,6 +4965,7 @@
 	array[DEVCONF_RTR_PROBE_INTERVAL] =
 		jiffies_to_msecs(cnf->rtr_probe_interval);
 #ifdef CONFIG_IPV6_ROUTE_INFO
+	array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
 	array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
 #endif
 #endif
@@ -5939,6 +5942,13 @@
 	},
 #ifdef CONFIG_IPV6_ROUTE_INFO
 	{
+		.procname	= "accept_ra_rt_info_min_plen",
+		.data		= &ipv6_devconf.accept_ra_rt_info_min_plen,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
 		.procname	= "accept_ra_rt_info_max_plen",
 		.data		= &ipv6_devconf.accept_ra_rt_info_max_plen,
 		.maxlen		= sizeof(int),
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index ef54852..8c88a37 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -908,6 +908,8 @@
 			ins = &rt->dst.rt6_next;
 			iter = *ins;
 			while (iter) {
+				if (iter->rt6i_metric > rt->rt6i_metric)
+					break;
 				if (rt6_qualify_for_ecmp(iter)) {
 					*ins = iter->dst.rt6_next;
 					fib6_purge_rt(iter, fn, info->nl_net);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index fc7b401..33b04ec 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -294,8 +294,10 @@
 	struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
 	int err = -ENOSYS;
 
-	if (skb->encapsulation)
+	if (skb->encapsulation) {
+		skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6));
 		skb_set_inner_network_header(skb, nhoff);
+	}
 
 	iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
 
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 9a87bfb..e27b8fd 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -757,13 +757,14 @@
 	 *	Fragment the datagram.
 	 */
 
-	*prevhdr = NEXTHDR_FRAGMENT;
 	troom = rt->dst.dev->needed_tailroom;
 
 	/*
 	 *	Keep copying data until we run out.
 	 */
 	while (left > 0)	{
+		u8 *fragnexthdr_offset;
+
 		len = left;
 		/* IF: it doesn't fit, use 'mtu' - the data space left */
 		if (len > mtu)
@@ -808,6 +809,10 @@
 		 */
 		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
 
+		fragnexthdr_offset = skb_network_header(frag);
+		fragnexthdr_offset += prevhdr - skb_network_header(skb);
+		*fragnexthdr_offset = NEXTHDR_FRAGMENT;
+
 		/*
 		 *	Build fragment header.
 		 */
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 3bce120..bbeedff 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -692,6 +692,10 @@
 	u->link = p->link;
 	u->i_key = p->i_key;
 	u->o_key = p->o_key;
+	if (u->i_key)
+		u->i_flags |= GRE_KEY;
+	if (u->o_key)
+		u->o_flags |= GRE_KEY;
 	u->proto = p->proto;
 
 	memcpy(u->name, p->name, sizeof(u->name));
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index d8e6714..01858ac 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1395,6 +1395,8 @@
 			if (ri->prefix_len == 0 &&
 			    !in6_dev->cnf.accept_ra_defrtr)
 				continue;
+			if (ri->prefix_len < in6_dev->cnf.accept_ra_rt_info_min_plen)
+				continue;
 			if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
 				continue;
 			rt6_route_rcv(skb->dev, (u8 *)p, (p->nd_opt_len) << 3,
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 9948b5c..986d4ca 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -589,6 +589,7 @@
 	hdr = ipv6_hdr(skb);
 	fhdr = (struct frag_hdr *)skb_transport_header(skb);
 
+	skb_orphan(skb);
 	fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
 		     skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
 	if (fq == NULL) {
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 37c4b38..1c3bc0a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -376,10 +376,12 @@
 	np = inet6_sk(sk);
 
 	if (type == NDISC_REDIRECT) {
-		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
+		if (!sock_owned_by_user(sk)) {
+			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
 
-		if (dst)
-			dst->ops->redirect(dst, sk, skb);
+			if (dst)
+				dst->ops->redirect(dst, sk, skb);
+		}
 		goto out;
 	}
 
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index f6fbd25..26d5718 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1037,6 +1037,7 @@
 	ipc6.hlimit = -1;
 	ipc6.tclass = -1;
 	ipc6.dontfrag = -1;
+	sockc.tsflags = sk->sk_tsflags;
 
 	/* destination address check */
 	if (sin6) {
@@ -1157,7 +1158,6 @@
 
 	fl6.flowi6_mark = sk->sk_mark;
 	fl6.flowi6_uid = sk->sk_uid;
-	sockc.tsflags = sk->sk_tsflags;
 
 	if (msg->msg_controllen) {
 		opt = &opt_space;
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index c0f0750..ff750bb 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -388,7 +388,7 @@
 drop:
 	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
 	kfree_skb(skb);
-	return -1;
+	return 0;
 }
 
 /* Userspace will call sendmsg() on the tunnel socket to send L2TP
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 5b77377..1309e2c 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -956,7 +956,8 @@
 				/* fall through */
 			case NETDEV_CHANGE:
 				nh->nh_flags |= RTNH_F_LINKDOWN;
-				ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
+				if (event != NETDEV_UNREGISTER)
+					ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
 				break;
 			}
 			if (event == NETDEV_UNREGISTER)
@@ -1696,6 +1697,7 @@
 	for (index = 0; index < platform_labels; index++) {
 		struct mpls_route *rt = rtnl_dereference(platform_label[index]);
 		RCU_INIT_POINTER(platform_label[index], NULL);
+		mpls_notify_route(net, index, rt, NULL, NULL);
 		mpls_rt_free(rt);
 	}
 	rtnl_unlock();
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
index 3bf0c59..0f5628a 100644
--- a/net/netfilter/xt_qtaguid.c
+++ b/net/netfilter/xt_qtaguid.c
@@ -1814,8 +1814,11 @@
 }
 
 #ifdef DDEBUG
-/* This function is not in xt_qtaguid_print.c because of locks visibility */
-static void prdebug_full_state(int indent_level, const char *fmt, ...)
+/*
+ * This function is not in xt_qtaguid_print.c because of locks visibility.
+ * The lock of sock_tag_list must be aquired before calling this function
+ */
+static void prdebug_full_state_locked(int indent_level, const char *fmt, ...)
 {
 	va_list args;
 	char *fmt_buff;
@@ -1836,16 +1839,12 @@
 	kfree(buff);
 	va_end(args);
 
-	spin_lock_bh(&sock_tag_list_lock);
 	prdebug_sock_tag_tree(indent_level, &sock_tag_tree);
-	spin_unlock_bh(&sock_tag_list_lock);
 
-	spin_lock_bh(&sock_tag_list_lock);
 	spin_lock_bh(&uid_tag_data_tree_lock);
 	prdebug_uid_tag_data_tree(indent_level, &uid_tag_data_tree);
 	prdebug_proc_qtu_data_tree(indent_level, &proc_qtu_data_tree);
 	spin_unlock_bh(&uid_tag_data_tree_lock);
-	spin_unlock_bh(&sock_tag_list_lock);
 
 	spin_lock_bh(&iface_stat_list_lock);
 	prdebug_iface_stat_list(indent_level, &iface_stat_list);
@@ -1854,7 +1853,7 @@
 	pr_debug("qtaguid: %s(): }\n", __func__);
 }
 #else
-static void prdebug_full_state(int indent_level, const char *fmt, ...) {}
+static void prdebug_full_state_locked(int indent_level, const char *fmt, ...) {}
 #endif
 
 struct proc_ctrl_print_info {
@@ -1977,8 +1976,11 @@
 			   (u64)atomic64_read(&qtu_events.match_no_sk),
 			   (u64)atomic64_read(&qtu_events.match_no_sk_file));
 
-		/* Count the following as part of the last item_index */
-		prdebug_full_state(0, "proc ctrl");
+		/* Count the following as part of the last item_index. No need
+		 * to lock the sock_tag_list here since it is already locked when
+		 * starting the seq_file operation
+		 */
+		prdebug_full_state_locked(0, "proc ctrl");
 	}
 
 	return 0;
@@ -2887,8 +2889,10 @@
 
 	sock_tag_tree_erase(&st_to_free_tree);
 
-	prdebug_full_state(0, "%s(): pid=%u tgid=%u", __func__,
+	spin_lock_bh(&sock_tag_list_lock);
+	prdebug_full_state_locked(0, "%s(): pid=%u tgid=%u", __func__,
 			   current->pid, current->tgid);
+	spin_unlock_bh(&sock_tag_list_lock);
 	return 0;
 }
 
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index eab210b..48386bf 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -367,7 +367,6 @@
 	} else if (key->eth.type == htons(ETH_P_IPV6)) {
 		enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
 
-		skb_orphan(skb);
 		memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
 		err = nf_ct_frag6_gather(net, skb, user);
 		if (err) {
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index ae25ded..0792541 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -588,7 +588,7 @@
 			ipv4 = true;
 			break;
 		case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
-			SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst,
+			SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.src,
 					nla_get_in6_addr(a), is_mask);
 			ipv6 = true;
 			break;
@@ -649,6 +649,8 @@
 			tun_flags |= TUNNEL_VXLAN_OPT;
 			opts_type = type;
 			break;
+		case OVS_TUNNEL_KEY_ATTR_PAD:
+			break;
 		default:
 			OVS_NLERR(log, "Unknown IP tunnel attribute %d",
 				  type);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 34de326..f2b04a7 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3140,7 +3140,7 @@
 			    int addr_len)
 {
 	struct sock *sk = sock->sk;
-	char name[15];
+	char name[sizeof(uaddr->sa_data) + 1];
 
 	/*
 	 *	Check legality
@@ -3148,7 +3148,11 @@
 
 	if (addr_len != sizeof(struct sockaddr))
 		return -EINVAL;
-	strlcpy(name, uaddr->sa_data, sizeof(name));
+	/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
+	 * zero-terminated.
+	 */
+	memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
+	name[sizeof(uaddr->sa_data)] = 0;
 
 	return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
 }
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index c6c2a93..c651cfc 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -820,10 +820,8 @@
 		goto out_module_put;
 
 	err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops);
-	if (err < 0)
+	if (err <= 0)
 		goto out_module_put;
-	if (err == 0)
-		goto noflush_out;
 
 	nla_nest_end(skb, nest);
 
@@ -840,7 +838,6 @@
 out_module_put:
 	module_put(ops->owner);
 err_out:
-noflush_out:
 	kfree_skb(skb);
 	return err;
 }
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index eae07a2..1191179 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -113,6 +113,9 @@
 	if (ret < 0)
 		return ret;
 
+	if (!tb[TCA_CONNMARK_PARMS])
+		return -EINVAL;
+
 	parm = nla_data(tb[TCA_CONNMARK_PARMS]);
 
 	if (!tcf_hash_check(tn, parm->index, a, bind)) {
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index e7d9638..f85313d 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -228,7 +228,6 @@
 
 	return skb->len;
 nla_put_failure:
-	rcu_read_unlock();
 	nlmsg_trim(skb, b);
 	return -1;
 }
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index 41adf36..b5c279b 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -504,6 +504,7 @@
 
 static void __exit strp_mod_exit(void)
 {
+	destroy_workqueue(strp_wq);
 }
 module_init(strp_mod_init);
 module_exit(strp_mod_exit);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index e2c37061..69502fa 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -486,7 +486,8 @@
 	struct ib_cq *sendcq, *recvcq;
 	int rc;
 
-	max_sge = min(ia->ri_device->attrs.max_sge, RPCRDMA_MAX_SEND_SGES);
+	max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge,
+			RPCRDMA_MAX_SEND_SGES);
 	if (max_sge < RPCRDMA_MIN_SEND_SGES) {
 		pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
 		return -ENOMEM;
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 6a0d485..c36757e 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -146,6 +146,7 @@
 	if (s) {
 		struct unix_sock *u = unix_sk(s);
 
+		BUG_ON(!atomic_long_read(&u->inflight));
 		BUG_ON(list_empty(&u->link));
 
 		if (atomic_long_dec_and_test(&u->inflight))
@@ -341,6 +342,14 @@
 	}
 	list_del(&cursor);
 
+	/* Now gc_candidates contains only garbage.  Restore original
+	 * inflight counters for these as well, and remove the skbuffs
+	 * which are creating the cycle(s).
+	 */
+	skb_queue_head_init(&hitlist);
+	list_for_each_entry(u, &gc_candidates, link)
+		scan_children(&u->sk, inc_inflight, &hitlist);
+
 	/* not_cycle_list contains those sockets which do not make up a
 	 * cycle.  Restore these to the inflight list.
 	 */
@@ -350,14 +359,6 @@
 		list_move_tail(&u->link, &gc_inflight_list);
 	}
 
-	/* Now gc_candidates contains only garbage.  Restore original
-	 * inflight counters for these as well, and remove the skbuffs
-	 * which are creating the cycle(s).
-	 */
-	skb_queue_head_init(&hitlist);
-	list_for_each_entry(u, &gc_candidates, link)
-	scan_children(&u->sk, inc_inflight, &hitlist);
-
 	spin_unlock(&unix_gc_lock);
 
 	/* Here we are. Hitlist is filled. Die. */
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 034f70c..9ed6b0f 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -548,21 +548,17 @@
 {
 	int err;
 
-	rtnl_lock();
-
 	if (!cb->args[0]) {
 		err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
 				  nl80211_fam.attrbuf, nl80211_fam.maxattr,
 				  nl80211_policy);
 		if (err)
-			goto out_unlock;
+			return err;
 
 		*wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk),
 						   nl80211_fam.attrbuf);
-		if (IS_ERR(*wdev)) {
-			err = PTR_ERR(*wdev);
-			goto out_unlock;
-		}
+		if (IS_ERR(*wdev))
+			return PTR_ERR(*wdev);
 		*rdev = wiphy_to_rdev((*wdev)->wiphy);
 		/* 0 is the first index - add 1 to parse only once */
 		cb->args[0] = (*rdev)->wiphy_idx + 1;
@@ -572,10 +568,8 @@
 		struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
 		struct wireless_dev *tmp;
 
-		if (!wiphy) {
-			err = -ENODEV;
-			goto out_unlock;
-		}
+		if (!wiphy)
+			return -ENODEV;
 		*rdev = wiphy_to_rdev(wiphy);
 		*wdev = NULL;
 
@@ -586,21 +580,11 @@
 			}
 		}
 
-		if (!*wdev) {
-			err = -ENODEV;
-			goto out_unlock;
-		}
+		if (!*wdev)
+			return -ENODEV;
 	}
 
 	return 0;
- out_unlock:
-	rtnl_unlock();
-	return err;
-}
-
-static void nl80211_finish_wdev_dump(struct cfg80211_registered_device *rdev)
-{
-	rtnl_unlock();
 }
 
 /* IE validation */
@@ -2584,17 +2568,17 @@
 	int filter_wiphy = -1;
 	struct cfg80211_registered_device *rdev;
 	struct wireless_dev *wdev;
+	int ret;
 
 	rtnl_lock();
 	if (!cb->args[2]) {
 		struct nl80211_dump_wiphy_state state = {
 			.filter_wiphy = -1,
 		};
-		int ret;
 
 		ret = nl80211_dump_wiphy_parse(skb, cb, &state);
 		if (ret)
-			return ret;
+			goto out_unlock;
 
 		filter_wiphy = state.filter_wiphy;
 
@@ -2639,12 +2623,14 @@
 		wp_idx++;
 	}
  out:
-	rtnl_unlock();
-
 	cb->args[0] = wp_idx;
 	cb->args[1] = if_idx;
 
-	return skb->len;
+	ret = skb->len;
+ out_unlock:
+	rtnl_unlock();
+
+	return ret;
 }
 
 static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
@@ -4371,9 +4357,10 @@
 	int sta_idx = cb->args[2];
 	int err;
 
+	rtnl_lock();
 	err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
 	if (err)
-		return err;
+		goto out_err;
 
 	if (!wdev->netdev) {
 		err = -EINVAL;
@@ -4408,7 +4395,7 @@
 	cb->args[2] = sta_idx;
 	err = skb->len;
  out_err:
-	nl80211_finish_wdev_dump(rdev);
+	rtnl_unlock();
 
 	return err;
 }
@@ -5179,9 +5166,10 @@
 	int path_idx = cb->args[2];
 	int err;
 
+	rtnl_lock();
 	err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
 	if (err)
-		return err;
+		goto out_err;
 
 	if (!rdev->ops->dump_mpath) {
 		err = -EOPNOTSUPP;
@@ -5214,7 +5202,7 @@
 	cb->args[2] = path_idx;
 	err = skb->len;
  out_err:
-	nl80211_finish_wdev_dump(rdev);
+	rtnl_unlock();
 	return err;
 }
 
@@ -5374,9 +5362,10 @@
 	int path_idx = cb->args[2];
 	int err;
 
+	rtnl_lock();
 	err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
 	if (err)
-		return err;
+		goto out_err;
 
 	if (!rdev->ops->dump_mpp) {
 		err = -EOPNOTSUPP;
@@ -5409,7 +5398,7 @@
 	cb->args[2] = path_idx;
 	err = skb->len;
  out_err:
-	nl80211_finish_wdev_dump(rdev);
+	rtnl_unlock();
 	return err;
 }
 
@@ -7559,9 +7548,12 @@
 	int start = cb->args[2], idx = 0;
 	int err;
 
+	rtnl_lock();
 	err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
-	if (err)
+	if (err) {
+		rtnl_unlock();
 		return err;
+	}
 
 	wdev_lock(wdev);
 	spin_lock_bh(&rdev->bss_lock);
@@ -7584,7 +7576,7 @@
 	wdev_unlock(wdev);
 
 	cb->args[2] = idx;
-	nl80211_finish_wdev_dump(rdev);
+	rtnl_unlock();
 
 	return skb->len;
 }
@@ -7668,9 +7660,10 @@
 	int res;
 	bool radio_stats;
 
+	rtnl_lock();
 	res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
 	if (res)
-		return res;
+		goto out_err;
 
 	/* prepare_wdev_dump parsed the attributes */
 	radio_stats = nl80211_fam.attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS];
@@ -7711,7 +7704,7 @@
 	cb->args[2] = survey_idx;
 	res = skb->len;
  out_err:
-	nl80211_finish_wdev_dump(rdev);
+	rtnl_unlock();
 	return res;
 }
 
@@ -11302,17 +11295,13 @@
 	void *data = NULL;
 	unsigned int data_len = 0;
 
-	rtnl_lock();
-
 	if (cb->args[0]) {
 		/* subtract the 1 again here */
 		struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
 		struct wireless_dev *tmp;
 
-		if (!wiphy) {
-			err = -ENODEV;
-			goto out_unlock;
-		}
+		if (!wiphy)
+			return -ENODEV;
 		*rdev = wiphy_to_rdev(wiphy);
 		*wdev = NULL;
 
@@ -11333,13 +11322,11 @@
 			  nl80211_fam.attrbuf, nl80211_fam.maxattr,
 			  nl80211_policy);
 	if (err)
-		goto out_unlock;
+		return err;
 
 	if (!nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_ID] ||
-	    !nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) {
-		err = -EINVAL;
-		goto out_unlock;
-	}
+	    !nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_SUBCMD])
+		return -EINVAL;
 
 	*wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk),
 					   nl80211_fam.attrbuf);
@@ -11348,10 +11335,8 @@
 
 	*rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk),
 					   nl80211_fam.attrbuf);
-	if (IS_ERR(*rdev)) {
-		err = PTR_ERR(*rdev);
-		goto out_unlock;
-	}
+	if (IS_ERR(*rdev))
+		return PTR_ERR(*rdev);
 
 	vid = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_ID]);
 	subcmd = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_SUBCMD]);
@@ -11364,19 +11349,15 @@
 		if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd)
 			continue;
 
-		if (!vcmd->dumpit) {
-			err = -EOPNOTSUPP;
-			goto out_unlock;
-		}
+		if (!vcmd->dumpit)
+			return -EOPNOTSUPP;
 
 		vcmd_idx = i;
 		break;
 	}
 
-	if (vcmd_idx < 0) {
-		err = -EOPNOTSUPP;
-		goto out_unlock;
-	}
+	if (vcmd_idx < 0)
+		return -EOPNOTSUPP;
 
 	if (nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_DATA]) {
 		data = nla_data(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_DATA]);
@@ -11393,9 +11374,6 @@
 
 	/* keep rtnl locked in successful case */
 	return 0;
- out_unlock:
-	rtnl_unlock();
-	return err;
 }
 
 static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
@@ -11410,9 +11388,10 @@
 	int err;
 	struct nlattr *vendor_data;
 
+	rtnl_lock();
 	err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev);
 	if (err)
-		return err;
+		goto out;
 
 	vcmd_idx = cb->args[2];
 	data = (void *)cb->args[3];
@@ -11421,18 +11400,26 @@
 
 	if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV |
 			   WIPHY_VENDOR_CMD_NEED_NETDEV)) {
-		if (!wdev)
-			return -EINVAL;
+		if (!wdev) {
+			err = -EINVAL;
+			goto out;
+		}
 		if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV &&
-		    !wdev->netdev)
-			return -EINVAL;
+		    !wdev->netdev) {
+			err = -EINVAL;
+			goto out;
+		}
 
 		if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) {
 			if (wdev->netdev &&
-			    !netif_running(wdev->netdev))
-				return -ENETDOWN;
-			if (!wdev->netdev && !wdev->p2p_started)
-				return -ENETDOWN;
+			    !netif_running(wdev->netdev)) {
+				err = -ENETDOWN;
+				goto out;
+			}
+			if (!wdev->netdev && !wdev->p2p_started) {
+				err = -ENETDOWN;
+				goto out;
+			}
 		}
 	}
 
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index eeb23d2..bc0ebd4 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2340,6 +2340,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(regulatory_hint_user);
 
 int regulatory_hint_indoor(bool is_indoor, u32 portid)
 {
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index f6ced31..822ac90 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -28,9 +28,6 @@
 bool reg_supported_dfs_region(enum nl80211_dfs_regions dfs_region);
 enum nl80211_dfs_regions reg_get_dfs_region(struct wiphy *wiphy);
 
-int regulatory_hint_user(const char *alpha2,
-			 enum nl80211_user_reg_hint_type user_reg_hint_type);
-
 /**
  * regulatory_hint_indoor - hint operation in indoor env. or not
  * @is_indoor: if true indicates that user space thinks that the
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 5bf7e1bf..e0437a7 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3062,6 +3062,11 @@
 {
 	int rv;
 
+	/* Initialize the per-net locks here */
+	spin_lock_init(&net->xfrm.xfrm_state_lock);
+	spin_lock_init(&net->xfrm.xfrm_policy_lock);
+	mutex_init(&net->xfrm.xfrm_cfg_mutex);
+
 	rv = xfrm_statistics_init(net);
 	if (rv < 0)
 		goto out_statistics;
@@ -3078,11 +3083,6 @@
 	if (rv < 0)
 		goto out;
 
-	/* Initialize the per-net locks here */
-	spin_lock_init(&net->xfrm.xfrm_state_lock);
-	spin_lock_init(&net->xfrm.xfrm_policy_lock);
-	mutex_init(&net->xfrm.xfrm_cfg_mutex);
-
 	return 0;
 
 out:
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 671a1d0..a7e27e1 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -412,7 +412,14 @@
 	up = nla_data(rp);
 	ulen = xfrm_replay_state_esn_len(up);
 
-	if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
+	/* Check the overall length and the internal bitmap length to avoid
+	 * potential overflow. */
+	if (nla_len(rp) < ulen ||
+	    xfrm_replay_state_esn_len(replay_esn) != ulen ||
+	    replay_esn->bmp_len != up->bmp_len)
+		return -EINVAL;
+
+	if (up->replay_window > up->bmp_len * sizeof(__u32) * 8)
 		return -EINVAL;
 
 	return 0;
diff --git a/security/security.c b/security/security.c
index f825304..1ba5274 100644
--- a/security/security.c
+++ b/security/security.c
@@ -508,6 +508,7 @@
 		return 0;
 	return call_int_hook(path_chown, 0, path, uid, gid);
 }
+EXPORT_SYMBOL(security_path_chown);
 
 int security_path_chroot(const struct path *path)
 {
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 8dd39fe..bbba7be 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -375,7 +375,8 @@
 		 * the elapsed time to detect xruns.
 		 */
 		jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
-		if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
+		if ((jdelta < runtime->hw_ptr_buffer_jiffies / 2) ||
+		    (runtime->hw_ptr_buffer_jiffies <= 0))
 			goto no_delta_check;
 		hdelta = jdelta - delta * HZ / runtime->rate;
 		xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 4c93520..f3b1d7f 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1832,6 +1832,7 @@
 	     info->output_pool != client->pool->size)) {
 		if (snd_seq_write_pool_allocated(client)) {
 			/* remove all existing cells */
+			snd_seq_pool_mark_closing(client->pool);
 			snd_seq_queue_client_leave_cells(client->number);
 			snd_seq_pool_done(client->pool);
 		}
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index 86240d0..3f4efcb 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -70,6 +70,9 @@
 		return;
 	*fifo = NULL;
 
+	if (f->pool)
+		snd_seq_pool_mark_closing(f->pool);
+
 	snd_seq_fifo_clear(f);
 
 	/* wake up clients if any */
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index dfa5156..5847c44 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -414,6 +414,18 @@
 	return 0;
 }
 
+/* refuse the further insertion to the pool */
+void snd_seq_pool_mark_closing(struct snd_seq_pool *pool)
+{
+	unsigned long flags;
+
+	if (snd_BUG_ON(!pool))
+		return;
+	spin_lock_irqsave(&pool->lock, flags);
+	pool->closing = 1;
+	spin_unlock_irqrestore(&pool->lock, flags);
+}
+
 /* remove events */
 int snd_seq_pool_done(struct snd_seq_pool *pool)
 {
@@ -424,10 +436,6 @@
 		return -EINVAL;
 
 	/* wait for closing all threads */
-	spin_lock_irqsave(&pool->lock, flags);
-	pool->closing = 1;
-	spin_unlock_irqrestore(&pool->lock, flags);
-
 	if (waitqueue_active(&pool->output_sleep))
 		wake_up(&pool->output_sleep);
 
@@ -484,6 +492,7 @@
 	*ppool = NULL;
 	if (pool == NULL)
 		return 0;
+	snd_seq_pool_mark_closing(pool);
 	snd_seq_pool_done(pool);
 	kfree(pool);
 	return 0;
diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h
index 4a2ec77..32f959c 100644
--- a/sound/core/seq/seq_memory.h
+++ b/sound/core/seq/seq_memory.h
@@ -84,6 +84,7 @@
 int snd_seq_pool_init(struct snd_seq_pool *pool);
 
 /* done pool - free events */
+void snd_seq_pool_mark_closing(struct snd_seq_pool *pool);
 int snd_seq_pool_done(struct snd_seq_pool *pool);
 
 /* create pool */
diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c
index ab4cdab..79edd88 100644
--- a/sound/pci/ctxfi/cthw20k1.c
+++ b/sound/pci/ctxfi/cthw20k1.c
@@ -1905,7 +1905,7 @@
 		return err;
 
 	/* Set DMA transfer mask */
-	if (dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) {
+	if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) {
 		dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits));
 	} else {
 		dma_set_mask(&pci->dev, DMA_BIT_MASK(32));
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 0c62b1d..112caa2 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6058,6 +6058,8 @@
 		ALC295_STANDARD_PINS,
 		{0x17, 0x21014040},
 		{0x18, 0x21a19050}),
+	SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+		ALC295_STANDARD_PINS),
 	SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC298_STANDARD_PINS,
 		{0x17, 0x90170110}),
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 9685b02..b4867ff 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -927,11 +927,18 @@
 
 config AUDIO_EXT_CLK
 	tristate
-	default y if SND_SOC_WCD9335=y || SND_SOC_WCD9330=y || SND_SOC_MSM8X16_WCD=y
+	default y if SND_SOC_WCD9335=y || SND_SOC_WCD9330=y || SND_SOC_SDM660_CDC=y
 
 config SND_SOC_WCD_MBHC
 	tristate
-	default y if (SND_SOC_MSM8909_WCD=y || SND_SOC_MSM8X16_WCD=y || SND_SOC_WCD9335=y) && SND_SOC_MDMCALIFORNIUM!=y
+	default y if (SND_SOC_MSM8909_WCD=y || SND_SOC_SDM660_CDC=y || SND_SOC_WCD9335=y) && SND_SOC_MDMCALIFORNIUM!=y
+	select SND_SOC_WCD_MBHC_LEGACY
+
+config SND_SOC_WCD_MBHC_LEGACY
+	tristate
+
+config SND_SOC_WCD_MBHC_ADC
+	tristate
 
 config SND_SOC_WCD_DSP_MGR
 	tristate
@@ -1159,11 +1166,12 @@
 
 config SND_SOC_MSM_HDMI_CODEC_RX
 	bool "HDMI Audio Playback"
-	depends on FB_MSM_MDSS_HDMI_PANEL && (SND_SOC_APQ8084 || SND_SOC_MSM8994 || SND_SOC_MSM8996 || SND_SOC_MSM8998)
+	depends on FB_MSM_MDSS_HDMI_PANEL && (SND_SOC_APQ8084 || SND_SOC_MSM8994 || SND_SOC_MSM8996 || SND_SOC_MSM8998 || SND_SOC_SDM660_COMMON)
 	help
 	HDMI audio drivers should be built only if the platform
         supports hdmi panel.
 
-source "sound/soc/codecs/msm8x16/Kconfig"
+source "sound/soc/codecs/sdm660_cdc/Kconfig"
+source "sound/soc/codecs/msm_sdw/Kconfig"
 
 endmenu
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 78db388..8c84460 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -173,7 +173,11 @@
 endif
 snd-soc-wcd-cpe-objs := wcd_cpe_services.o wcd_cpe_core.o
 snd-soc-wsa881x-objs := wsa881x.o wsa881x-tables.o wsa881x-regmap.o wsa881x-temp-sensor.o
-snd-soc-wcd-mbhc-objs := wcd-mbhc-v2.o
+ifeq ($(CONFIG_SND_SOC_WCD_MBHC_LEGACY), y)
+	snd-soc-wcd-mbhc-objs := wcd-mbhc-v2.o wcd-mbhc-legacy.o
+else ifeq ($(CONFIG_SND_SOC_WCD_MBHC_ADC), y)
+	snd-soc-wcd-mbhc-objs := wcd-mbhc-v2.o wcd-mbhc-adc.o
+endif
 snd-soc-wsa881x-analog-objs := wsa881x-analog.o wsa881x-tables-analog.o
 snd-soc-wsa881x-analog-objs += wsa881x-regmap-analog.o wsa881x-irq.o
 snd-soc-wcd-dsp-utils-objs := wcd-dsp-utils.o
@@ -482,4 +486,5 @@
 obj-$(CONFIG_SND_SOC_MAX9877)	+= snd-soc-max9877.o
 obj-$(CONFIG_SND_SOC_MAX98504)	+= snd-soc-max98504.o
 obj-$(CONFIG_SND_SOC_TPA6130A2)	+= snd-soc-tpa6130a2.o
-obj-y += msm8x16/
+obj-y += sdm660_cdc/
+obj-y += msm_sdw/
diff --git a/sound/soc/codecs/msm8x16/Makefile b/sound/soc/codecs/msm8x16/Makefile
deleted file mode 100644
index 1e4522f..0000000
--- a/sound/soc/codecs/msm8x16/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-snd-soc-msm8952-wcd-objs := msm8x16-wcd.o msm8x16-wcd-tables.o msm89xx-regmap.o
-obj-$(CONFIG_SND_SOC_MSM8X16_WCD)      += snd-soc-msm8952-wcd.o msm8916-wcd-irq.o
diff --git a/sound/soc/codecs/msm8x16/msm8x16-wcd-tables.c b/sound/soc/codecs/msm8x16/msm8x16-wcd-tables.c
deleted file mode 100644
index b969639..0000000
--- a/sound/soc/codecs/msm8x16/msm8x16-wcd-tables.c
+++ /dev/null
@@ -1,263 +0,0 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include "msm8x16-wcd.h"
-
-const u8 msm89xx_pmic_cdc_reg_readable[MSM89XX_PMIC_CDC_CACHE_SIZE] = {
-		[MSM89XX_PMIC_DIGITAL_REVISION1] = 1,
-		[MSM89XX_PMIC_DIGITAL_REVISION2] = 1,
-		[MSM89XX_PMIC_DIGITAL_PERPH_TYPE] = 1,
-		[MSM89XX_PMIC_DIGITAL_PERPH_SUBTYPE] = 1,
-		[MSM89XX_PMIC_DIGITAL_INT_RT_STS] = 1,
-		[MSM89XX_PMIC_DIGITAL_INT_SET_TYPE] = 1,
-		[MSM89XX_PMIC_DIGITAL_INT_POLARITY_HIGH] = 1,
-		[MSM89XX_PMIC_DIGITAL_INT_POLARITY_LOW] = 1,
-		[MSM89XX_PMIC_DIGITAL_INT_EN_SET] = 1,
-		[MSM89XX_PMIC_DIGITAL_INT_EN_CLR] = 1,
-		[MSM89XX_PMIC_DIGITAL_INT_LATCHED_STS] = 1,
-		[MSM89XX_PMIC_DIGITAL_INT_PENDING_STS] = 1,
-		[MSM89XX_PMIC_DIGITAL_INT_MID_SEL] = 1,
-		[MSM89XX_PMIC_DIGITAL_INT_PRIORITY] = 1,
-		[MSM89XX_PMIC_DIGITAL_GPIO_MODE] = 1,
-		[MSM89XX_PMIC_DIGITAL_PIN_CTL_OE] = 1,
-		[MSM89XX_PMIC_DIGITAL_PIN_CTL_DATA] = 1,
-		[MSM89XX_PMIC_DIGITAL_PIN_STATUS] = 1,
-		[MSM89XX_PMIC_DIGITAL_HDRIVE_CTL] = 1,
-		[MSM89XX_PMIC_DIGITAL_CDC_RST_CTL] = 1,
-		[MSM89XX_PMIC_DIGITAL_CDC_TOP_CLK_CTL] = 1,
-		[MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL] = 1,
-		[MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL] = 1,
-		[MSM89XX_PMIC_DIGITAL_CDC_CONN_TX1_CTL] = 1,
-		[MSM89XX_PMIC_DIGITAL_CDC_CONN_TX2_CTL] = 1,
-		[MSM89XX_PMIC_DIGITAL_CDC_CONN_HPHR_DAC_CTL] = 1,
-		[MSM89XX_PMIC_DIGITAL_CDC_CONN_RX1_CTL] = 1,
-		[MSM89XX_PMIC_DIGITAL_CDC_CONN_RX2_CTL] = 1,
-		[MSM89XX_PMIC_DIGITAL_CDC_CONN_RX3_CTL] = 1,
-		[MSM89XX_PMIC_DIGITAL_CDC_CONN_RX_LB_CTL] = 1,
-		[MSM89XX_PMIC_DIGITAL_CDC_RX_CTL1] = 1,
-		[MSM89XX_PMIC_DIGITAL_CDC_RX_CTL2] = 1,
-		[MSM89XX_PMIC_DIGITAL_CDC_RX_CTL3] = 1,
-		[MSM89XX_PMIC_DIGITAL_DEM_BYPASS_DATA0] = 1,
-		[MSM89XX_PMIC_DIGITAL_DEM_BYPASS_DATA1] = 1,
-		[MSM89XX_PMIC_DIGITAL_DEM_BYPASS_DATA2] = 1,
-		[MSM89XX_PMIC_DIGITAL_DEM_BYPASS_DATA3] = 1,
-		[MSM89XX_PMIC_DIGITAL_DIG_DEBUG_CTL] = 1,
-		[MSM89XX_PMIC_DIGITAL_SPARE_0] = 1,
-		[MSM89XX_PMIC_DIGITAL_SPARE_1] = 1,
-		[MSM89XX_PMIC_DIGITAL_SPARE_2] = 1,
-		[MSM89XX_PMIC_ANALOG_REVISION1] = 1,
-		[MSM89XX_PMIC_ANALOG_REVISION2] = 1,
-		[MSM89XX_PMIC_ANALOG_REVISION3] = 1,
-		[MSM89XX_PMIC_ANALOG_REVISION4] = 1,
-		[MSM89XX_PMIC_ANALOG_PERPH_TYPE] = 1,
-		[MSM89XX_PMIC_ANALOG_PERPH_SUBTYPE] = 1,
-		[MSM89XX_PMIC_ANALOG_INT_RT_STS] = 1,
-		[MSM89XX_PMIC_ANALOG_INT_SET_TYPE] = 1,
-		[MSM89XX_PMIC_ANALOG_INT_POLARITY_HIGH] = 1,
-		[MSM89XX_PMIC_ANALOG_INT_POLARITY_LOW] = 1,
-		[MSM89XX_PMIC_ANALOG_INT_EN_SET] = 1,
-		[MSM89XX_PMIC_ANALOG_INT_EN_CLR] = 1,
-		[MSM89XX_PMIC_ANALOG_INT_LATCHED_STS] = 1,
-		[MSM89XX_PMIC_ANALOG_INT_PENDING_STS] = 1,
-		[MSM89XX_PMIC_ANALOG_INT_MID_SEL] = 1,
-		[MSM89XX_PMIC_ANALOG_INT_PRIORITY] = 1,
-		[MSM89XX_PMIC_ANALOG_MICB_1_EN] = 1,
-		[MSM89XX_PMIC_ANALOG_MICB_1_VAL] = 1,
-		[MSM89XX_PMIC_ANALOG_MICB_1_CTL] = 1,
-		[MSM89XX_PMIC_ANALOG_MICB_1_INT_RBIAS] = 1,
-		[MSM89XX_PMIC_ANALOG_MICB_2_EN] = 1,
-		[MSM89XX_PMIC_ANALOG_TX_1_2_ATEST_CTL_2] = 1,
-		[MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1] = 1,
-		[MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2] = 1,
-		[MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL] = 1,
-		[MSM89XX_PMIC_ANALOG_MBHC_DBNC_TIMER] = 1,
-		[MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL] = 1,
-		[MSM89XX_PMIC_ANALOG_MBHC_BTN1_ZDETM_CTL] = 1,
-		[MSM89XX_PMIC_ANALOG_MBHC_BTN2_ZDETH_CTL] = 1,
-		[MSM89XX_PMIC_ANALOG_MBHC_BTN3_CTL] = 1,
-		[MSM89XX_PMIC_ANALOG_MBHC_BTN4_CTL] = 1,
-		[MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT] = 1,
-		[MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT] = 1,
-		[MSM89XX_PMIC_ANALOG_TX_1_EN] = 1,
-		[MSM89XX_PMIC_ANALOG_TX_2_EN] = 1,
-		[MSM89XX_PMIC_ANALOG_TX_1_2_TEST_CTL_1] = 1,
-		[MSM89XX_PMIC_ANALOG_TX_1_2_TEST_CTL_2] = 1,
-		[MSM89XX_PMIC_ANALOG_TX_1_2_ATEST_CTL] = 1,
-		[MSM89XX_PMIC_ANALOG_TX_1_2_OPAMP_BIAS] = 1,
-		[MSM89XX_PMIC_ANALOG_TX_1_2_TXFE_CLKDIV] = 1,
-		[MSM89XX_PMIC_ANALOG_TX_3_EN] = 1,
-		[MSM89XX_PMIC_ANALOG_NCP_EN] = 1,
-		[MSM89XX_PMIC_ANALOG_NCP_CLK] = 1,
-		[MSM89XX_PMIC_ANALOG_NCP_DEGLITCH] = 1,
-		[MSM89XX_PMIC_ANALOG_NCP_FBCTRL] = 1,
-		[MSM89XX_PMIC_ANALOG_NCP_BIAS] = 1,
-		[MSM89XX_PMIC_ANALOG_NCP_VCTRL] = 1,
-		[MSM89XX_PMIC_ANALOG_NCP_TEST] = 1,
-		[MSM89XX_PMIC_ANALOG_RX_CLOCK_DIVIDER] = 1,
-		[MSM89XX_PMIC_ANALOG_RX_COM_OCP_CTL] = 1,
-		[MSM89XX_PMIC_ANALOG_RX_COM_OCP_COUNT] = 1,
-		[MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC] = 1,
-		[MSM89XX_PMIC_ANALOG_RX_HPH_BIAS_PA] = 1,
-		[MSM89XX_PMIC_ANALOG_RX_HPH_BIAS_LDO_OCP] = 1,
-		[MSM89XX_PMIC_ANALOG_RX_HPH_BIAS_CNP] = 1,
-		[MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN] = 1,
-		[MSM89XX_PMIC_ANALOG_RX_HPH_CNP_WG_CTL] = 1,
-		[MSM89XX_PMIC_ANALOG_RX_HPH_CNP_WG_TIME] = 1,
-		[MSM89XX_PMIC_ANALOG_RX_HPH_L_TEST] = 1,
-		[MSM89XX_PMIC_ANALOG_RX_HPH_L_PA_DAC_CTL] = 1,
-		[MSM89XX_PMIC_ANALOG_RX_HPH_R_TEST] = 1,
-		[MSM89XX_PMIC_ANALOG_RX_HPH_R_PA_DAC_CTL] = 1,
-		[MSM89XX_PMIC_ANALOG_RX_EAR_CTL] = 1,
-		[MSM89XX_PMIC_ANALOG_RX_ATEST] = 1,
-		[MSM89XX_PMIC_ANALOG_RX_HPH_STATUS] = 1,
-		[MSM89XX_PMIC_ANALOG_RX_EAR_STATUS] = 1,
-		[MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL] = 1,
-		[MSM89XX_PMIC_ANALOG_SPKR_DRV_CLIP_DET] = 1,
-		[MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL] = 1,
-		[MSM89XX_PMIC_ANALOG_SPKR_ANA_BIAS_SET] = 1,
-		[MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL] = 1,
-		[MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL] = 1,
-		[MSM89XX_PMIC_ANALOG_SPKR_DRV_MISC] = 1,
-		[MSM89XX_PMIC_ANALOG_SPKR_DRV_DBG] = 1,
-		[MSM89XX_PMIC_ANALOG_CURRENT_LIMIT] = 1,
-		[MSM89XX_PMIC_ANALOG_OUTPUT_VOLTAGE] = 1,
-		[MSM89XX_PMIC_ANALOG_BYPASS_MODE] = 1,
-		[MSM89XX_PMIC_ANALOG_BOOST_EN_CTL] = 1,
-		[MSM89XX_PMIC_ANALOG_SLOPE_COMP_IP_ZERO] = 1,
-		[MSM89XX_PMIC_ANALOG_RDSON_MAX_DUTY_CYCLE] = 1,
-		[MSM89XX_PMIC_ANALOG_BOOST_TEST1_1] = 1,
-		[MSM89XX_PMIC_ANALOG_BOOST_TEST_2] = 1,
-		[MSM89XX_PMIC_ANALOG_SPKR_SAR_STATUS] = 1,
-		[MSM89XX_PMIC_ANALOG_SPKR_DRV_STATUS] = 1,
-		[MSM89XX_PMIC_ANALOG_PBUS_ADD_CSR] = 1,
-		[MSM89XX_PMIC_ANALOG_PBUS_ADD_SEL] = 1,
-		[MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL] = 1,
-		[MSM89XX_PMIC_DIGITAL_INT_LATCHED_CLR] = 1,
-		[MSM89XX_PMIC_ANALOG_INT_LATCHED_CLR] = 1,
-		[MSM89XX_PMIC_ANALOG_NCP_CLIM_ADDR] = 1,
-		[MSM89XX_PMIC_DIGITAL_SEC_ACCESS] = 1,
-		[MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL3] = 1,
-		[MSM89XX_PMIC_ANALOG_SEC_ACCESS] = 1,
-};
-
-const u8 msm89xx_cdc_core_reg_readable[MSM89XX_CDC_CORE_CACHE_SIZE] = {
-		[MSM89XX_CDC_CORE_CLK_RX_RESET_CTL] = 1,
-		[MSM89XX_CDC_CORE_CLK_TX_RESET_B1_CTL] = 1,
-		[MSM89XX_CDC_CORE_CLK_DMIC_B1_CTL] = 1,
-		[MSM89XX_CDC_CORE_CLK_RX_I2S_CTL] = 1,
-		[MSM89XX_CDC_CORE_CLK_TX_I2S_CTL] = 1,
-		[MSM89XX_CDC_CORE_CLK_OTHR_RESET_B1_CTL] = 1,
-		[MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL] = 1,
-		[MSM89XX_CDC_CORE_CLK_OTHR_CTL] = 1,
-		[MSM89XX_CDC_CORE_CLK_RX_B1_CTL] = 1,
-		[MSM89XX_CDC_CORE_CLK_MCLK_CTL] = 1,
-		[MSM89XX_CDC_CORE_CLK_PDM_CTL] = 1,
-		[MSM89XX_CDC_CORE_CLK_SD_CTL] = 1,
-		[MSM89XX_CDC_CORE_CLK_WSA_VI_B1_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX1_B1_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX2_B1_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX3_B1_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX1_B2_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX2_B2_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX3_B2_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX1_B3_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX2_B3_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX3_B3_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX1_B4_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX2_B4_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX3_B4_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX1_B5_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX2_B5_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX3_B5_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX1_B6_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX2_B6_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX3_B6_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX1_VOL_CTL_B1_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX2_VOL_CTL_B1_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX3_VOL_CTL_B1_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX1_VOL_CTL_B2_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX2_VOL_CTL_B2_CTL] = 1,
-		[MSM89XX_CDC_CORE_RX3_VOL_CTL_B2_CTL] = 1,
-		[MSM89XX_CDC_CORE_TOP_GAIN_UPDATE] = 1,
-		[MSM89XX_CDC_CORE_TOP_CTL] = 1,
-		[MSM89XX_CDC_CORE_DEBUG_DESER1_CTL] = 1,
-		[MSM89XX_CDC_CORE_DEBUG_DESER2_CTL] = 1,
-		[MSM89XX_CDC_CORE_DEBUG_B1_CTL_CFG] = 1,
-		[MSM89XX_CDC_CORE_DEBUG_B2_CTL_CFG] = 1,
-		[MSM89XX_CDC_CORE_DEBUG_B3_CTL_CFG] = 1,
-		[MSM89XX_CDC_CORE_IIR1_GAIN_B1_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR1_GAIN_B2_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR2_GAIN_B2_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR1_GAIN_B3_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR2_GAIN_B3_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR1_GAIN_B4_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR2_GAIN_B4_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR1_GAIN_B5_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR2_GAIN_B5_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR1_GAIN_B6_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR2_GAIN_B6_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR1_GAIN_B7_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR2_GAIN_B7_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR1_GAIN_B8_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR2_GAIN_B8_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR1_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR2_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR1_GAIN_TIMER_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR2_GAIN_TIMER_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR2_COEF_B1_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL] = 1,
-		[MSM89XX_CDC_CORE_IIR2_COEF_B2_CTL] = 1,
-		[MSM89XX_CDC_CORE_CONN_RX1_B1_CTL] = 1,
-		[MSM89XX_CDC_CORE_CONN_RX1_B2_CTL] = 1,
-		[MSM89XX_CDC_CORE_CONN_RX1_B3_CTL] = 1,
-		[MSM89XX_CDC_CORE_CONN_RX2_B1_CTL] = 1,
-		[MSM89XX_CDC_CORE_CONN_RX2_B2_CTL] = 1,
-		[MSM89XX_CDC_CORE_CONN_RX2_B3_CTL] = 1,
-		[MSM89XX_CDC_CORE_CONN_RX3_B1_CTL] = 1,
-		[MSM89XX_CDC_CORE_CONN_RX3_B2_CTL] = 1,
-		[MSM89XX_CDC_CORE_CONN_TX_B1_CTL] = 1,
-		[MSM89XX_CDC_CORE_CONN_EQ1_B1_CTL] = 1,
-		[MSM89XX_CDC_CORE_CONN_EQ1_B2_CTL] = 1,
-		[MSM89XX_CDC_CORE_CONN_EQ1_B3_CTL] = 1,
-		[MSM89XX_CDC_CORE_CONN_EQ1_B4_CTL] = 1,
-		[MSM89XX_CDC_CORE_CONN_EQ2_B1_CTL] = 1,
-		[MSM89XX_CDC_CORE_CONN_EQ2_B2_CTL] = 1,
-		[MSM89XX_CDC_CORE_CONN_EQ2_B3_CTL] = 1,
-		[MSM89XX_CDC_CORE_CONN_EQ2_B4_CTL] = 1,
-		[MSM89XX_CDC_CORE_CONN_TX_I2S_SD1_CTL] = 1,
-		[MSM89XX_CDC_CORE_TX1_VOL_CTL_TIMER] = 1,
-		[MSM89XX_CDC_CORE_TX2_VOL_CTL_TIMER] = 1,
-		[MSM89XX_CDC_CORE_TX3_VOL_CTL_TIMER] = 1,
-		[MSM89XX_CDC_CORE_TX4_VOL_CTL_TIMER] = 1,
-		[MSM89XX_CDC_CORE_TX1_VOL_CTL_GAIN] = 1,
-		[MSM89XX_CDC_CORE_TX2_VOL_CTL_GAIN] = 1,
-		[MSM89XX_CDC_CORE_TX3_VOL_CTL_GAIN] = 1,
-		[MSM89XX_CDC_CORE_TX4_VOL_CTL_GAIN] = 1,
-		[MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG] = 1,
-		[MSM89XX_CDC_CORE_TX2_VOL_CTL_CFG] = 1,
-		[MSM89XX_CDC_CORE_TX3_VOL_CTL_CFG] = 1,
-		[MSM89XX_CDC_CORE_TX4_VOL_CTL_CFG] = 1,
-		[MSM89XX_CDC_CORE_TX1_MUX_CTL] = 1,
-		[MSM89XX_CDC_CORE_TX2_MUX_CTL] = 1,
-		[MSM89XX_CDC_CORE_TX3_MUX_CTL] = 1,
-		[MSM89XX_CDC_CORE_TX4_MUX_CTL] = 1,
-		[MSM89XX_CDC_CORE_TX1_CLK_FS_CTL] = 1,
-		[MSM89XX_CDC_CORE_TX2_CLK_FS_CTL] = 1,
-		[MSM89XX_CDC_CORE_TX3_CLK_FS_CTL] = 1,
-		[MSM89XX_CDC_CORE_TX4_CLK_FS_CTL] = 1,
-		[MSM89XX_CDC_CORE_TX1_DMIC_CTL] = 1,
-		[MSM89XX_CDC_CORE_TX2_DMIC_CTL] = 1,
-		[MSM89XX_CDC_CORE_TX3_DMIC_CTL] = 1,
-		[MSM89XX_CDC_CORE_TX4_DMIC_CTL] = 1,
-};
diff --git a/sound/soc/codecs/msm8x16/msm8x16-wcd.c b/sound/soc/codecs/msm8x16/msm8x16-wcd.c
deleted file mode 100644
index f76dde7..0000000
--- a/sound/soc/codecs/msm8x16/msm8x16-wcd.c
+++ /dev/null
@@ -1,6022 +0,0 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/device.h>
-#include <linux/printk.h>
-#include <linux/ratelimit.h>
-#include <linux/debugfs.h>
-#include <linux/io.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/pm_runtime.h>
-#include <linux/kernel.h>
-#include <linux/gpio.h>
-#include <linux/spmi.h>
-#include <linux/of_gpio.h>
-#include <linux/regulator/consumer.h>
-#include <linux/mfd/wcd9xxx/core.h>
-#include <linux/qdsp6v2/apr.h>
-#include <linux/timer.h>
-#include <linux/workqueue.h>
-#include <linux/sched.h>
-#include <sound/q6afe-v2.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/tlv.h>
-#include <sound/q6core.h>
-#include <soc/qcom/subsystem_notif.h>
-#include "../../msm/msmfalcon-common.h"
-#include "../wcd-mbhc-v2.h"
-#include "msm8916-wcd-irq.h"
-#include "msm8x16-wcd.h"
-
-#define DRV_NAME "msm-codec"
-#define MSM89XX_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
-			SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000)
-#define MSM89XX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
-		SNDRV_PCM_FMTBIT_S24_LE)
-
-#define NUM_INTERPOLATORS	3
-#define BITS_PER_REG		8
-#define MSM89XX_TX_PORT_NUMBER	4
-
-#define MSM89XX_I2S_MASTER_MODE_MASK	0x08
-#define MSM89XX_DIGITAL_CODEC_BASE_ADDR		0x771C000
-#define PMIC_SLAVE_ID_0		0
-#define PMIC_SLAVE_ID_1		1
-
-#define PMIC_MBG_OK		0x2C08
-#define PMIC_LDO7_EN_CTL	0x4646
-#define MASK_MSB_BIT		0x80
-
-#define CODEC_DT_MAX_PROP_SIZE			40
-#define MSM89XX_DIGITAL_CODEC_REG_SIZE		0x400
-#define MAX_ON_DEMAND_SUPPLY_NAME_LENGTH	64
-
-#define MCLK_RATE_9P6MHZ	9600000
-#define MCLK_RATE_12P288MHZ	12288000
-
-#define BUS_DOWN 1
-
-/*
- *50 Milliseconds sufficient for DSP bring up in the modem
- * after Sub System Restart
- */
-#define ADSP_STATE_READY_TIMEOUT_MS 50
-
-#define HPHL_PA_DISABLE (0x01 << 1)
-#define HPHR_PA_DISABLE (0x01 << 2)
-#define EAR_PA_DISABLE (0x01 << 3)
-#define SPKR_PA_DISABLE (0x01 << 4)
-
-enum {
-	BOOST_SWITCH = 0,
-	BOOST_ALWAYS,
-	BYPASS_ALWAYS,
-	BOOST_ON_FOREVER,
-};
-
-#define EAR_PMD 0
-#define EAR_PMU 1
-#define SPK_PMD 2
-#define SPK_PMU 3
-
-#define MICBIAS_DEFAULT_VAL 1800000
-#define MICBIAS_MIN_VAL 1600000
-#define MICBIAS_STEP_SIZE 50000
-
-#define DEFAULT_BOOST_VOLTAGE 5000
-#define MIN_BOOST_VOLTAGE 4000
-#define MAX_BOOST_VOLTAGE 5550
-#define BOOST_VOLTAGE_STEP 50
-
-#define MSM89XX_MBHC_BTN_COARSE_ADJ  100 /* in mV */
-#define MSM89XX_MBHC_BTN_FINE_ADJ 12 /* in mV */
-
-#define VOLTAGE_CONVERTER(value, min_value, step_size)\
-	((value - min_value)/step_size)
-
-enum {
-	AIF1_PB = 0,
-	AIF1_CAP,
-	AIF2_VIFEED,
-	NUM_CODEC_DAIS,
-};
-
-enum {
-	RX_MIX1_INP_SEL_ZERO = 0,
-	RX_MIX1_INP_SEL_IIR1,
-	RX_MIX1_INP_SEL_IIR2,
-	RX_MIX1_INP_SEL_RX1,
-	RX_MIX1_INP_SEL_RX2,
-	RX_MIX1_INP_SEL_RX3,
-};
-
-static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
-static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
-static struct snd_soc_dai_driver msm8x16_wcd_i2s_dai[];
-/* By default enable the internal speaker boost */
-static bool spkr_boost_en = true;
-
-#define MSM89XX_ACQUIRE_LOCK(x) \
-	mutex_lock_nested(&x, SINGLE_DEPTH_NESTING)
-
-#define MSM89XX_RELEASE_LOCK(x) mutex_unlock(&x)
-
-
-/* Codec supports 2 IIR filters */
-enum {
-	IIR1 = 0,
-	IIR2,
-	IIR_MAX,
-};
-
-/* Codec supports 5 bands */
-enum {
-	BAND1 = 0,
-	BAND2,
-	BAND3,
-	BAND4,
-	BAND5,
-	BAND_MAX,
-};
-
-struct hpf_work {
-	struct msm8x16_wcd_priv *msm8x16_wcd;
-	u32 decimator;
-	u8 tx_hpf_cut_of_freq;
-	struct delayed_work dwork;
-};
-
-static struct hpf_work tx_hpf_work[NUM_DECIMATORS];
-
-static char on_demand_supply_name[][MAX_ON_DEMAND_SUPPLY_NAME_LENGTH] = {
-	"cdc-vdd-mic-bias",
-};
-
-static unsigned long rx_digital_gain_reg[] = {
-	MSM89XX_CDC_CORE_RX1_VOL_CTL_B2_CTL,
-	MSM89XX_CDC_CORE_RX2_VOL_CTL_B2_CTL,
-	MSM89XX_CDC_CORE_RX3_VOL_CTL_B2_CTL,
-};
-
-static unsigned long tx_digital_gain_reg[] = {
-	MSM89XX_CDC_CORE_TX1_VOL_CTL_GAIN,
-	MSM89XX_CDC_CORE_TX2_VOL_CTL_GAIN,
-};
-
-enum {
-	MSM89XX_SPMI_DIGITAL = 0,
-	MSM89XX_SPMI_ANALOG,
-	MSM89XX_CODEC_CORE,
-	MAX_MSM89XX_DEVICE
-};
-
-static struct wcd_mbhc_register
-	wcd_mbhc_registers[WCD_MBHC_REG_FUNC_MAX] = {
-
-	WCD_MBHC_REGISTER("WCD_MBHC_L_DET_EN",
-			  MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1, 0x80, 7, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_GND_DET_EN",
-			  MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1, 0x40, 6, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_MECH_DETECTION_TYPE",
-			  MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1, 0x20, 5, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_MIC_CLAMP_CTL",
-			  MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1, 0x18, 3, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_ELECT_DETECTION_TYPE",
-			  MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1, 0x01, 0, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_HS_L_DET_PULL_UP_CTRL",
-			  MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0xC0, 6, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_HS_L_DET_PULL_UP_COMP_CTRL",
-			  MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0x20, 5, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_HPHL_PLUG_TYPE",
-			  MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0x10, 4, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_GND_PLUG_TYPE",
-			  MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0x08, 3, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_SW_HPH_LP_100K_TO_GND",
-			  MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0x01, 0, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_ELECT_SCHMT_ISRC",
-			  MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0x06, 1, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_FSM_EN",
-			  MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL, 0x80, 7, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_INSREM_DBNC",
-			  MSM89XX_PMIC_ANALOG_MBHC_DBNC_TIMER, 0xF0, 4, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_BTN_DBNC",
-			  MSM89XX_PMIC_ANALOG_MBHC_DBNC_TIMER, 0x0C, 2, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_HS_VREF",
-			  MSM89XX_PMIC_ANALOG_MBHC_BTN3_CTL, 0x03, 0, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_HS_COMP_RESULT",
-			  MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT, 0x01,
-			  0, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_MIC_SCHMT_RESULT",
-			  MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT, 0x02,
-			  1, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_HPHL_SCHMT_RESULT",
-			  MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT, 0x08,
-			  3, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_HPHR_SCHMT_RESULT",
-			  MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT, 0x04,
-			  2, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_OCP_FSM_EN",
-			  MSM89XX_PMIC_ANALOG_RX_COM_OCP_CTL, 0x10, 4, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_BTN_RESULT",
-			  MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT, 0xFF, 0, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_BTN_ISRC_CTL",
-			  MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL, 0x70, 4, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_ELECT_RESULT",
-			  MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT, 0xFF,
-			  0, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_MICB_CTRL",
-			  MSM89XX_PMIC_ANALOG_MICB_2_EN, 0xC0, 6, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_HPH_CNP_WG_TIME",
-			  MSM89XX_PMIC_ANALOG_RX_HPH_CNP_WG_TIME, 0xFC, 2, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_HPHR_PA_EN",
-			  MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN, 0x10, 4, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_HPHL_PA_EN",
-			  MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN, 0x20, 5, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_HPH_PA_EN",
-			  MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN, 0x30, 4, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_SWCH_LEVEL_REMOVE",
-			  MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT,
-			  0x10, 4, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_PULLDOWN_CTRL",
-			  MSM89XX_PMIC_ANALOG_MICB_2_EN, 0x20, 5, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_ANC_DET_EN",
-			  0, 0, 0, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_FSM_STATUS",
-			  0, 0, 0, 0),
-	WCD_MBHC_REGISTER("WCD_MBHC_MUX_CTL",
-			  0, 0, 0, 0),
-};
-
-struct msm8x16_wcd_spmi {
-	struct spmi_device *spmi;
-	int base;
-};
-
-/* Multiply gain_adj and offset by 1000 and 100 to avoid float arithmetic */
-static const struct wcd_imped_i_ref imped_i_ref[] = {
-	{I_h4_UA, 8, 800, 9000, 10000},
-	{I_pt5_UA, 10, 100, 990, 4600},
-	{I_14_UA, 17, 14, 1050, 700},
-	{I_l4_UA, 10, 4, 1165, 110},
-	{I_1_UA, 0, 1, 1200, 65},
-};
-
-static const struct wcd_mbhc_intr intr_ids = {
-	.mbhc_sw_intr =  MSM89XX_IRQ_MBHC_HS_DET,
-	.mbhc_btn_press_intr = MSM89XX_IRQ_MBHC_PRESS,
-	.mbhc_btn_release_intr = MSM89XX_IRQ_MBHC_RELEASE,
-	.mbhc_hs_ins_intr = MSM89XX_IRQ_MBHC_INSREM_DET1,
-	.mbhc_hs_rem_intr = MSM89XX_IRQ_MBHC_INSREM_DET,
-	.hph_left_ocp = MSM89XX_IRQ_HPHL_OCP,
-	.hph_right_ocp = MSM89XX_IRQ_HPHR_OCP,
-};
-
-static int msm_digcdc_clock_control(bool flag);
-static int msm8x16_wcd_dt_parse_vreg_info(struct device *dev,
-	struct msm8x16_wcd_regulator *vreg,
-	const char *vreg_name, bool ondemand);
-static struct msm8x16_wcd_pdata *msm8x16_wcd_populate_dt_pdata(
-	struct device *dev);
-static int msm8x16_wcd_enable_ext_mb_source(struct wcd_mbhc *mbhc,
-					    bool turn_on);
-static void msm8x16_trim_btn_reg(struct snd_soc_codec *codec);
-static void msm8x16_wcd_set_micb_v(struct snd_soc_codec *codec);
-static void msm8x16_wcd_set_boost_v(struct snd_soc_codec *codec);
-static void msm8x16_wcd_set_auto_zeroing(struct snd_soc_codec *codec,
-		bool enable);
-static void msm8x16_wcd_configure_cap(struct snd_soc_codec *codec,
-		bool micbias1, bool micbias2);
-static bool msm8x16_wcd_use_mb(struct snd_soc_codec *codec);
-
-struct msm8x16_wcd_spmi msm8x16_wcd_modules[MAX_MSM89XX_DEVICE];
-
-static void *adsp_state_notifier;
-
-static struct snd_soc_codec *registered_codec;
-static struct snd_soc_codec *registered_digcodec;
-
-static int get_codec_version(struct msm8x16_wcd_priv *msm8x16_wcd)
-{
-	if (msm8x16_wcd->codec_version == DIANGU)
-		return DIANGU;
-	else if (msm8x16_wcd->codec_version == CAJON_2_0)
-		return CAJON_2_0;
-	else if (msm8x16_wcd->codec_version == CAJON)
-		return CAJON;
-	else if (msm8x16_wcd->codec_version == CONGA)
-		return CONGA;
-	else if (msm8x16_wcd->pmic_rev == TOMBAK_2_0)
-		return TOMBAK_2_0;
-	else if (msm8x16_wcd->pmic_rev == TOMBAK_1_0)
-		return TOMBAK_1_0;
-
-	pr_err("%s: unsupported codec version\n", __func__);
-	return UNSUPPORTED;
-}
-
-static int msm_digcdc_clock_control(bool flag)
-{
-	int ret = -EINVAL;
-	struct msm_asoc_mach_data *pdata = NULL;
-
-	pdata = snd_soc_card_get_drvdata(registered_codec->component.card);
-
-	if (flag) {
-		mutex_lock(&pdata->cdc_int_mclk0_mutex);
-		if (atomic_read(&pdata->int_mclk0_enabled) == false) {
-			pdata->digital_cdc_core_clk.enable = 1;
-			ret = afe_set_lpass_clock_v2(
-					AFE_PORT_ID_INT0_MI2S_RX,
-					&pdata->digital_cdc_core_clk);
-			if (ret < 0) {
-				pr_err("failed to enable the INT_MCLK0\n");
-				goto err_mclk;
-			}
-			pr_err("enabled digital codec core clk\n");
-			atomic_set(&pdata->int_mclk0_enabled, true);
-			schedule_delayed_work(&pdata->disable_int_mclk0_work,
-					      50);
-		}
-err_mclk:
-		mutex_unlock(&pdata->cdc_int_mclk0_mutex);
-		return ret;
-	}
-	return 0;
-}
-
-void enable_digital_callback(void *flag)
-{
-	msm_digcdc_clock_control(true);
-}
-
-void disable_digital_callback(void *flag)
-{
-	msm_digcdc_clock_control(false);
-}
-
-static int snd_soc_read_wrapper(struct snd_soc_codec *codec, u16 reg)
-{
-	int ret = -EINVAL;
-	struct msm8x16_wcd *msm8x16_wcd = codec->control_data;
-
-	pr_err("%s reg = %x\n", __func__, reg);
-	mutex_lock(&msm8x16_wcd->io_lock);
-	if (MSM89XX_IS_PMIC_CDC_REG(reg))
-		ret = snd_soc_read(codec, reg);
-	else if (MSM89XX_IS_CDC_CORE_REG(reg))
-		ret = snd_soc_read(registered_digcodec, reg);
-	mutex_unlock(&msm8x16_wcd->io_lock);
-
-	return ret;
-}
-
-static int snd_soc_write_wrapper(struct snd_soc_codec *codec, u16 reg, u8 val)
-{
-	int ret = -EINVAL;
-	struct msm8x16_wcd *msm8x16_wcd = codec->control_data;
-
-	pr_err("%s reg = %x\n", __func__, reg);
-	mutex_lock(&msm8x16_wcd->io_lock);
-	if (MSM89XX_IS_PMIC_CDC_REG(reg))
-		ret = snd_soc_write(codec, reg, val);
-	else if (MSM89XX_IS_CDC_CORE_REG(reg))
-		ret = snd_soc_write(registered_digcodec, reg, val);
-	mutex_unlock(&msm8x16_wcd->io_lock);
-
-	return ret;
-}
-
-static int snd_soc_update_bits_wrapper(struct snd_soc_codec *codec,
-			u16 reg, u8 mask, u8 val)
-{
-	int ret = -EINVAL;
-	struct msm8x16_wcd *msm8x16_wcd = codec->control_data;
-
-	pr_err("%s reg = %x\n", __func__, reg);
-	mutex_lock(&msm8x16_wcd->io_lock);
-	if (MSM89XX_IS_PMIC_CDC_REG(reg))
-		ret = snd_soc_update_bits(codec, reg, mask, val);
-	else if (MSM89XX_IS_CDC_CORE_REG(reg))
-		ret = snd_soc_update_bits(registered_digcodec, reg, mask, val);
-	mutex_unlock(&msm8x16_wcd->io_lock);
-
-	return ret;
-}
-
-static void wcd_mbhc_meas_imped(struct snd_soc_codec *codec,
-				s16 *impedance_l, s16 *impedance_r)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	if ((msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_BOTH) ||
-		(msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_HPHL)) {
-		/* Enable ZDET_L_MEAS_EN */
-		snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
-				0x08, 0x08);
-		/* Wait for 2ms for measurement to complete */
-		usleep_range(2000, 2100);
-		/* Read Left impedance value from Result1 */
-		*impedance_l = snd_soc_read_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT);
-		/* Enable ZDET_R_MEAS_EN */
-		snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
-				0x08, 0x00);
-	}
-	if ((msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_BOTH) ||
-		(msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_HPHR)) {
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
-			0x04, 0x04);
-		/* Wait for 2ms for measurement to complete */
-		usleep_range(2000, 2100);
-		/* Read Right impedance value from Result1 */
-		*impedance_r = snd_soc_read_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT);
-		snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
-				0x04, 0x00);
-	}
-}
-
-static void msm8x16_set_ref_current(struct snd_soc_codec *codec,
-				enum wcd_curr_ref curr_ref)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	pr_err("%s: curr_ref: %d\n", __func__, curr_ref);
-
-	if (get_codec_version(msm8x16_wcd) < CAJON)
-		pr_err("%s: Setting ref current not required\n", __func__);
-
-	msm8x16_wcd->imped_i_ref = imped_i_ref[curr_ref];
-
-	switch (curr_ref) {
-	case I_h4_UA:
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MICB_2_EN,
-			0x07, 0x01);
-		break;
-	case I_pt5_UA:
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MICB_2_EN,
-			0x07, 0x04);
-		break;
-	case I_14_UA:
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MICB_2_EN,
-			0x07, 0x03);
-		break;
-	case I_l4_UA:
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MICB_2_EN,
-			0x07, 0x01);
-		break;
-	case I_1_UA:
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MICB_2_EN,
-			0x07, 0x00);
-		break;
-	default:
-		pr_err("%s: No ref current set\n", __func__);
-		break;
-	}
-}
-
-static bool msm8x16_adj_ref_current(struct snd_soc_codec *codec,
-					s16 *impedance_l, s16 *impedance_r)
-{
-	int i = 2;
-	s16 compare_imp = 0;
-
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	if (msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_HPHR)
-		compare_imp = *impedance_r;
-	else
-		compare_imp = *impedance_l;
-
-	if (get_codec_version(msm8x16_wcd) < CAJON) {
-		pr_err("%s: Reference current adjustment not required\n",
-			 __func__);
-		return false;
-	}
-
-	while (compare_imp < imped_i_ref[i].min_val) {
-		msm8x16_set_ref_current(codec,
-					imped_i_ref[++i].curr_ref);
-		wcd_mbhc_meas_imped(codec,
-				impedance_l, impedance_r);
-		compare_imp = (msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_HPHR)
-				? *impedance_r : *impedance_l;
-	}
-
-	return true;
-}
-
-void msm8x16_wcd_spk_ext_pa_cb(
-		int (*codec_spk_ext_pa)(struct snd_soc_codec *codec,
-			int enable), struct snd_soc_codec *codec)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	pr_err("%s: Enter\n", __func__);
-	msm8x16_wcd->codec_spk_ext_pa_cb = codec_spk_ext_pa;
-}
-
-void msm8x16_wcd_hph_comp_cb(
-	int (*codec_hph_comp_gpio)(bool enable), struct snd_soc_codec *codec)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	pr_err("%s: Enter\n", __func__);
-	msm8x16_wcd->codec_hph_comp_gpio = codec_hph_comp_gpio;
-}
-
-static void msm8x16_wcd_compute_impedance(struct snd_soc_codec *codec, s16 l,
-				s16 r, uint32_t *zl, uint32_t *zr, bool high)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-	uint32_t rl = 0, rr = 0;
-	struct wcd_imped_i_ref R = msm8x16_wcd->imped_i_ref;
-	int codec_ver = get_codec_version(msm8x16_wcd);
-
-	switch (codec_ver) {
-	case TOMBAK_1_0:
-	case TOMBAK_2_0:
-	case CONGA:
-		if (high) {
-			pr_err("%s: This plug has high range impedance\n",
-				 __func__);
-			rl = (uint32_t)(((100 * (l * 400 - 200))/96) - 230);
-			rr = (uint32_t)(((100 * (r * 400 - 200))/96) - 230);
-		} else {
-			pr_err("%s: This plug has low range impedance\n",
-				 __func__);
-			rl = (uint32_t)(((1000 * (l * 2 - 1))/1165) - (13/10));
-			rr = (uint32_t)(((1000 * (r * 2 - 1))/1165) - (13/10));
-		}
-		break;
-	case CAJON:
-	case CAJON_2_0:
-	case DIANGU:
-		if (msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_HPHL) {
-			rr = (uint32_t)(((DEFAULT_MULTIPLIER * (10 * r - 5)) -
-			   (DEFAULT_OFFSET * DEFAULT_GAIN))/DEFAULT_GAIN);
-			rl = (uint32_t)(((10000 * (R.multiplier * (10 * l - 5)))
-			      - R.offset * R.gain_adj)/(R.gain_adj * 100));
-		} else if (msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_HPHR) {
-			rr = (uint32_t)(((10000 * (R.multiplier * (10 * r - 5)))
-			      - R.offset * R.gain_adj)/(R.gain_adj * 100));
-			rl = (uint32_t)(((DEFAULT_MULTIPLIER * (10 * l - 5))-
-			   (DEFAULT_OFFSET * DEFAULT_GAIN))/DEFAULT_GAIN);
-		} else if (msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_NONE) {
-			rr = (uint32_t)(((DEFAULT_MULTIPLIER * (10 * r - 5)) -
-			   (DEFAULT_OFFSET * DEFAULT_GAIN))/DEFAULT_GAIN);
-			rl = (uint32_t)(((DEFAULT_MULTIPLIER * (10 * l - 5))-
-			   (DEFAULT_OFFSET * DEFAULT_GAIN))/DEFAULT_GAIN);
-		} else {
-			rr = (uint32_t)(((10000 * (R.multiplier * (10 * r - 5)))
-			      - R.offset * R.gain_adj)/(R.gain_adj * 100));
-			rl = (uint32_t)(((10000 * (R.multiplier * (10 * l - 5)))
-			      - R.offset * R.gain_adj)/(R.gain_adj * 100));
-		}
-		break;
-	default:
-		pr_err("%s: No codec mentioned\n", __func__);
-		break;
-	}
-	*zl = rl;
-	*zr = rr;
-}
-
-static struct firmware_cal *msm8x16_wcd_get_hwdep_fw_cal(
-		struct wcd_mbhc *mbhc,
-		enum wcd_cal_type type)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd;
-	struct firmware_cal *hwdep_cal;
-	struct snd_soc_codec *codec = mbhc->codec;
-
-	if (!codec) {
-		pr_err("%s: NULL codec pointer\n", __func__);
-		return NULL;
-	}
-	msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-	hwdep_cal = wcdcal_get_fw_cal(msm8x16_wcd->fw_data, type);
-	if (!hwdep_cal) {
-		dev_err(codec->dev, "%s: cal not sent by %d\n",
-				__func__, type);
-		return NULL;
-	}
-	return hwdep_cal;
-}
-
-static void wcd9xxx_spmi_irq_control(struct snd_soc_codec *codec,
-				     int irq, bool enable)
-{
-	if (enable)
-		wcd9xxx_spmi_enable_irq(irq);
-	else
-		wcd9xxx_spmi_disable_irq(irq);
-}
-
-static void msm8x16_mbhc_clk_setup(struct snd_soc_codec *codec,
-				   bool enable)
-{
-	if (enable)
-		snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
-				0x08, 0x08);
-	else
-		snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
-				0x08, 0x00);
-}
-
-static int msm8x16_mbhc_map_btn_code_to_num(struct snd_soc_codec *codec)
-{
-	int btn_code;
-	int btn;
-
-	btn_code = snd_soc_read_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT);
-
-	switch (btn_code) {
-	case 0:
-		btn = 0;
-		break;
-	case 1:
-		btn = 1;
-		break;
-	case 3:
-		btn = 2;
-		break;
-	case 7:
-		btn = 3;
-		break;
-	case 15:
-		btn = 4;
-		break;
-	default:
-		btn = -EINVAL;
-		break;
-	};
-
-	return btn;
-}
-
-static bool msm8x16_spmi_lock_sleep(struct wcd_mbhc *mbhc, bool lock)
-{
-	if (lock)
-		return wcd9xxx_spmi_lock_sleep();
-	wcd9xxx_spmi_unlock_sleep();
-	return 0;
-}
-
-static bool msm8x16_wcd_micb_en_status(struct wcd_mbhc *mbhc, int micb_num)
-{
-	if (micb_num == MIC_BIAS_1)
-		return (snd_soc_read_wrapper(mbhc->codec,
-				     MSM89XX_PMIC_ANALOG_MICB_1_EN) &
-			0x80);
-	if (micb_num == MIC_BIAS_2)
-		return (snd_soc_read_wrapper(mbhc->codec,
-				     MSM89XX_PMIC_ANALOG_MICB_2_EN) &
-			0x80);
-	return false;
-}
-
-static void msm8x16_wcd_enable_master_bias(struct snd_soc_codec *codec,
-					   bool enable)
-{
-	if (enable)
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL,
-			0x30, 0x30);
-	else
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL,
-			0x30, 0x00);
-}
-
-static void msm8x16_wcd_mbhc_common_micb_ctrl(struct snd_soc_codec *codec,
-					      int event, bool enable)
-{
-	u16 reg;
-	u8 mask;
-	u8 val;
-
-	switch (event) {
-	case MBHC_COMMON_MICB_PRECHARGE:
-		reg = MSM89XX_PMIC_ANALOG_MICB_1_CTL;
-		mask = 0x60;
-		val = (enable ? 0x60 : 0x00);
-		break;
-	case MBHC_COMMON_MICB_SET_VAL:
-		reg = MSM89XX_PMIC_ANALOG_MICB_1_VAL;
-		mask = 0xFF;
-		val = (enable ? 0xC0 : 0x00);
-		break;
-	case MBHC_COMMON_MICB_TAIL_CURR:
-		reg = MSM89XX_PMIC_ANALOG_MICB_1_EN;
-		mask = 0x04;
-		val = (enable ? 0x04 : 0x00);
-		break;
-	};
-	snd_soc_update_bits_wrapper(codec, reg, mask, val);
-}
-
-static void msm8x16_wcd_mbhc_internal_micbias_ctrl(struct snd_soc_codec *codec,
-						   int micbias_num, bool enable)
-{
-	if (micbias_num == 1) {
-		if (enable)
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_MICB_1_INT_RBIAS,
-				0x10, 0x10);
-		else
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_MICB_1_INT_RBIAS,
-				0x10, 0x00);
-	}
-}
-
-static bool msm8x16_wcd_mbhc_hph_pa_on_status(struct snd_soc_codec *codec)
-{
-	return (snd_soc_read_wrapper(codec, MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN) &
-		0x30) ? true : false;
-}
-
-static void msm8x16_wcd_mbhc_program_btn_thr(struct snd_soc_codec *codec,
-					     s16 *btn_low, s16 *btn_high,
-					     int num_btn, bool is_micbias)
-{
-	int i;
-	u32 course, fine, reg_val;
-	u16 reg_addr = MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL;
-	s16 *btn_voltage;
-
-	btn_voltage = ((is_micbias) ? btn_high : btn_low);
-
-	for (i = 0; i <  num_btn; i++) {
-		course = (btn_voltage[i] / MSM89XX_MBHC_BTN_COARSE_ADJ);
-		fine = ((btn_voltage[i] % MSM89XX_MBHC_BTN_COARSE_ADJ) /
-				MSM89XX_MBHC_BTN_FINE_ADJ);
-
-		reg_val = (course << 5) | (fine << 2);
-		snd_soc_update_bits_wrapper(codec, reg_addr, 0xFC, reg_val);
-		pr_err("%s: course: %d fine: %d reg_addr: %x reg_val: %x\n",
-			  __func__, course, fine, reg_addr, reg_val);
-		reg_addr++;
-	}
-}
-
-static void msm8x16_wcd_mbhc_calc_impedance(struct wcd_mbhc *mbhc, uint32_t *zl,
-					    uint32_t *zr)
-{
-	struct snd_soc_codec *codec = mbhc->codec;
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-	s16 impedance_l, impedance_r;
-	s16 impedance_l_fixed;
-	s16 reg0, reg1, reg2, reg3, reg4;
-	bool high = false;
-	bool min_range_used =  false;
-
-	WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
-	reg0 = snd_soc_read_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_DBNC_TIMER);
-	reg1 = snd_soc_read_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_BTN2_ZDETH_CTL);
-	reg2 = snd_soc_read_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2);
-	reg3 = snd_soc_read_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MICB_2_EN);
-	reg4 = snd_soc_read_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL);
-
-	msm8x16_wcd->imped_det_pin = WCD_MBHC_DET_BOTH;
-	mbhc->hph_type = WCD_MBHC_HPH_NONE;
-
-	/* disable FSM and micbias and enable pullup*/
-	snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
-			0x80, 0x00);
-	snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MICB_2_EN,
-			0xA5, 0x25);
-	/*
-	 * Enable legacy electrical detection current sources
-	 * and disable fast ramp and enable manual switching
-	 * of extra capacitance
-	 */
-	pr_err("%s: Setup for impedance det\n", __func__);
-
-	msm8x16_set_ref_current(codec, I_h4_UA);
-
-	snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2,
-			0x06, 0x02);
-	snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_DBNC_TIMER,
-			0x02, 0x02);
-	snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_BTN2_ZDETH_CTL,
-			0x02, 0x00);
-
-	pr_err("%s: Start performing impedance detection\n",
-		 __func__);
-
-	wcd_mbhc_meas_imped(codec, &impedance_l, &impedance_r);
-
-	if (impedance_l > 2 || impedance_r > 2) {
-		high = true;
-		if (!mbhc->mbhc_cfg->mono_stero_detection) {
-			/* Set ZDET_CHG to 0  to discharge ramp */
-			snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
-					0x02, 0x00);
-			/* wait 40ms for the discharge ramp to complete */
-			usleep_range(40000, 40100);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
-				0x03, 0x00);
-			msm8x16_wcd->imped_det_pin = (impedance_l > 2 &&
-						      impedance_r > 2) ?
-						      WCD_MBHC_DET_NONE :
-						      ((impedance_l > 2) ?
-						      WCD_MBHC_DET_HPHR :
-						      WCD_MBHC_DET_HPHL);
-			if (msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_NONE)
-				goto exit;
-		} else {
-			if (get_codec_version(msm8x16_wcd) >= CAJON) {
-				if (impedance_l == 63 && impedance_r == 63) {
-					pr_err("%s: HPHL and HPHR are floating\n",
-						 __func__);
-					msm8x16_wcd->imped_det_pin =
-							WCD_MBHC_DET_NONE;
-					mbhc->hph_type = WCD_MBHC_HPH_NONE;
-				} else if (impedance_l == 63
-					   && impedance_r < 63) {
-					pr_err("%s: Mono HS with HPHL floating\n",
-						 __func__);
-					msm8x16_wcd->imped_det_pin =
-							WCD_MBHC_DET_HPHR;
-					mbhc->hph_type = WCD_MBHC_HPH_MONO;
-				} else if (impedance_r == 63 &&
-					   impedance_l < 63) {
-					pr_err("%s: Mono HS with HPHR floating\n",
-						 __func__);
-					msm8x16_wcd->imped_det_pin =
-							WCD_MBHC_DET_HPHL;
-					mbhc->hph_type = WCD_MBHC_HPH_MONO;
-				} else if (impedance_l > 3 && impedance_r > 3 &&
-					(impedance_l == impedance_r)) {
-					snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2,
-					0x06, 0x06);
-					wcd_mbhc_meas_imped(codec, &impedance_l,
-							    &impedance_r);
-					if (impedance_r == impedance_l)
-						pr_err("%s: Mono Headset\n",
-							  __func__);
-						msm8x16_wcd->imped_det_pin =
-							WCD_MBHC_DET_NONE;
-						mbhc->hph_type =
-							WCD_MBHC_HPH_MONO;
-				} else {
-					pr_err("%s: STEREO headset is found\n",
-						 __func__);
-					msm8x16_wcd->imped_det_pin =
-							WCD_MBHC_DET_BOTH;
-					mbhc->hph_type = WCD_MBHC_HPH_STEREO;
-				}
-			}
-		}
-	}
-
-	msm8x16_set_ref_current(codec, I_pt5_UA);
-	msm8x16_set_ref_current(codec, I_14_UA);
-
-	/* Enable RAMP_L, RAMP_R & ZDET_CHG*/
-	snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
-			0x03, 0x03);
-	snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
-			0x02, 0x02);
-	/* wait for 50msec for the HW to apply ramp on HPHL and HPHR */
-	usleep_range(50000, 50100);
-	/* Enable ZDET_DISCHG_CAP_CTL  to add extra capacitance */
-	snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
-			0x01, 0x01);
-	/* wait for 5msec for the voltage to get stable */
-	usleep_range(5000, 5100);
-
-
-	wcd_mbhc_meas_imped(codec, &impedance_l, &impedance_r);
-
-	min_range_used = msm8x16_adj_ref_current(codec,
-						&impedance_l, &impedance_r);
-	if (!mbhc->mbhc_cfg->mono_stero_detection) {
-		/* Set ZDET_CHG to 0  to discharge ramp */
-		snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
-				0x02, 0x00);
-		/* wait for 40msec for the capacitor to discharge */
-		usleep_range(40000, 40100);
-		snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
-				0x03, 0x00);
-		goto exit;
-	}
-
-	/* we are setting ref current to the minimun range or the measured
-	 * value larger than the minimum value, so min_range_used is true.
-	 * If the headset is mono headset with either HPHL or HPHR floating
-	 * then we have already done the mono stereo detection and do not
-	 * need to continue further.
-	 */
-
-	if (!min_range_used ||
-	    msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_HPHL ||
-	    msm8x16_wcd->imped_det_pin == WCD_MBHC_DET_HPHR)
-		goto exit;
-
-
-	/* Disable Set ZDET_CONN_RAMP_L and enable ZDET_CONN_FIXED_L */
-	snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
-			0x02, 0x00);
-	snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_BTN1_ZDETM_CTL,
-			0x02, 0x02);
-	/* Set ZDET_CHG to 0  */
-	snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
-			0x02, 0x00);
-	/* wait for 40msec for the capacitor to discharge */
-	usleep_range(40000, 40100);
-
-	/* Set ZDET_CONN_RAMP_R to 0  */
-	snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
-			0x01, 0x00);
-	/* Enable ZDET_L_MEAS_EN */
-	snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
-			0x08, 0x08);
-	/* wait for 2msec for the HW to compute left inpedance value */
-	usleep_range(2000, 2100);
-	/* Read Left impedance value from Result1 */
-	impedance_l_fixed = snd_soc_read_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT);
-	/* Disable ZDET_L_MEAS_EN */
-	snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
-			0x08, 0x00);
-	/*
-	 * Assume impedance_l is L1, impedance_l_fixed is L2.
-	 * If the following condition is met, we can take this
-	 * headset as mono one with impedance of L2.
-	 * Otherwise, take it as stereo with impedance of L1.
-	 * Condition:
-	 * abs[(L2-0.5L1)/(L2+0.5L1)] < abs [(L2-L1)/(L2+L1)]
-	 */
-	if ((abs(impedance_l_fixed - impedance_l/2) *
-		(impedance_l_fixed + impedance_l)) >=
-		(abs(impedance_l_fixed - impedance_l) *
-		(impedance_l_fixed + impedance_l/2))) {
-		pr_err("%s: STEREO plug type detected\n",
-			 __func__);
-		mbhc->hph_type = WCD_MBHC_HPH_STEREO;
-	} else {
-		pr_err("%s: MONO plug type detected\n",
-			__func__);
-		mbhc->hph_type = WCD_MBHC_HPH_MONO;
-		impedance_l = impedance_l_fixed;
-	}
-	/* Enable ZDET_CHG  */
-	snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
-			0x02, 0x02);
-	/* wait for 10msec for the capacitor to charge */
-	usleep_range(10000, 10100);
-	snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
-			0x02, 0x02);
-	snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_BTN1_ZDETM_CTL,
-			0x02, 0x00);
-	/* Set ZDET_CHG to 0  to discharge HPHL */
-	snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
-			0x02, 0x00);
-	/* wait for 40msec for the capacitor to discharge */
-	usleep_range(40000, 40100);
-	snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
-			0x02, 0x00);
-
-exit:
-	snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL, reg4);
-	snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_MICB_2_EN, reg3);
-	snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_MBHC_BTN2_ZDETH_CTL, reg1);
-	snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_MBHC_DBNC_TIMER, reg0);
-	snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, reg2);
-	msm8x16_wcd_compute_impedance(codec, impedance_l, impedance_r,
-				      zl, zr, high);
-
-	pr_err("%s: RL %d ohm, RR %d ohm\n", __func__, *zl, *zr);
-	pr_err("%s: Impedance detection completed\n", __func__);
-}
-
-static int msm8x16_register_notifier(struct wcd_mbhc *mbhc,
-				     struct notifier_block *nblock,
-				     bool enable)
-{
-	struct snd_soc_codec *codec = mbhc->codec;
-	struct msm8x16_wcd_priv *msm8x16_wcd =
-		snd_soc_codec_get_drvdata(codec);
-
-	if (enable)
-		return blocking_notifier_chain_register(&msm8x16_wcd->notifier,
-							nblock);
-	return blocking_notifier_chain_unregister(
-			&msm8x16_wcd->notifier,	nblock);
-}
-
-static int msm8x16_wcd_request_irq(struct snd_soc_codec *codec,
-				   int irq, irq_handler_t handler,
-				   const char *name, void *data)
-{
-	return wcd9xxx_spmi_request_irq(irq, handler, name, data);
-}
-
-static int msm8x16_wcd_free_irq(struct snd_soc_codec *codec,
-				int irq, void *data)
-{
-	return wcd9xxx_spmi_free_irq(irq, data);
-}
-
-static const struct wcd_mbhc_cb mbhc_cb = {
-	.enable_mb_source = msm8x16_wcd_enable_ext_mb_source,
-	.trim_btn_reg = msm8x16_trim_btn_reg,
-	.compute_impedance = msm8x16_wcd_mbhc_calc_impedance,
-	.set_micbias_value = msm8x16_wcd_set_micb_v,
-	.set_auto_zeroing = msm8x16_wcd_set_auto_zeroing,
-	.get_hwdep_fw_cal = msm8x16_wcd_get_hwdep_fw_cal,
-	.set_cap_mode = msm8x16_wcd_configure_cap,
-	.register_notifier = msm8x16_register_notifier,
-	.request_irq = msm8x16_wcd_request_irq,
-	.irq_control = wcd9xxx_spmi_irq_control,
-	.free_irq = msm8x16_wcd_free_irq,
-	.clk_setup = msm8x16_mbhc_clk_setup,
-	.map_btn_code_to_num = msm8x16_mbhc_map_btn_code_to_num,
-	.lock_sleep = msm8x16_spmi_lock_sleep,
-	.micbias_enable_status = msm8x16_wcd_micb_en_status,
-	.mbhc_bias = msm8x16_wcd_enable_master_bias,
-	.mbhc_common_micb_ctrl = msm8x16_wcd_mbhc_common_micb_ctrl,
-	.micb_internal = msm8x16_wcd_mbhc_internal_micbias_ctrl,
-	.hph_pa_on_status = msm8x16_wcd_mbhc_hph_pa_on_status,
-	.set_btn_thr = msm8x16_wcd_mbhc_program_btn_thr,
-	.extn_use_mb = msm8x16_wcd_use_mb,
-};
-
-static const uint32_t wcd_imped_val[] = {4, 8, 12, 13, 16,
-					20, 24, 28, 32,
-					36, 40, 44, 48};
-
-void msm8x16_notifier_call(struct snd_soc_codec *codec,
-				  const enum wcd_notify_event event)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	pr_err("%s: notifier call event %d\n", __func__, event);
-	blocking_notifier_call_chain(&msm8x16_wcd->notifier, event,
-				     &msm8x16_wcd->mbhc);
-}
-
-static void msm8x16_wcd_boost_on(struct snd_soc_codec *codec)
-{
-	u8 dest = 0x00;
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-
-	if ((dest & MASK_MSB_BIT) == 0) {
-		pr_err("PMIC MBG not ON, enable codec hw_en MB bit again\n");
-		snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL, 0x30);
-		/* Allow 1ms for PMIC MBG state to be updated */
-		usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
-	}
-	snd_soc_update_bits_wrapper(codec,
-		MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL3,
-		0x0F, 0x0F);
-	snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_SEC_ACCESS,
-		0xA5);
-	snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3,
-		0x0F);
-	snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL,
-		0x30);
-	if (get_codec_version(msm8x16_wcd) < CAJON_2_0) {
-		snd_soc_write_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_CURRENT_LIMIT,
-			0x82);
-	} else {
-		snd_soc_write_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_CURRENT_LIMIT,
-			0xA2);
-	}
-	snd_soc_update_bits_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
-		0x69, 0x69);
-	snd_soc_update_bits_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_SPKR_DRV_DBG,
-		0x01, 0x01);
-	snd_soc_update_bits_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_SLOPE_COMP_IP_ZERO,
-		0x88, 0x88);
-	snd_soc_update_bits_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL,
-		0x03, 0x03);
-	snd_soc_update_bits_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL,
-		0xE1, 0xE1);
-	if (get_codec_version(msm8x16_wcd) < CAJON_2_0) {
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
-			0x20, 0x20);
-		usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
-			0xDF, 0xDF);
-		usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
-	} else {
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
-			0x40, 0x00);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
-			0x20, 0x20);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
-			0x80, 0x80);
-		usleep_range(500, 510);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
-			0x40, 0x40);
-		usleep_range(500, 510);
-	}
-}
-
-static void msm8x16_wcd_boost_off(struct snd_soc_codec *codec)
-{
-	snd_soc_update_bits_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
-		0xDF, 0x5F);
-	snd_soc_update_bits_wrapper(codec,
-		MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
-		0x20, 0x00);
-}
-
-static void msm8x16_wcd_bypass_on(struct snd_soc_codec *codec)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	if (get_codec_version(msm8x16_wcd) < CAJON_2_0) {
-		snd_soc_write_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_SEC_ACCESS,
-			0xA5);
-		snd_soc_write_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3,
-			0x07);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_BYPASS_MODE,
-			0x02, 0x02);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_BYPASS_MODE,
-			0x01, 0x00);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_BYPASS_MODE,
-			0x40, 0x40);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_BYPASS_MODE,
-			0x80, 0x80);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
-			0xDF, 0xDF);
-	} else {
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
-			0x20, 0x20);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_BYPASS_MODE,
-			0x20, 0x20);
-	}
-}
-
-static void msm8x16_wcd_bypass_off(struct snd_soc_codec *codec)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	if (get_codec_version(msm8x16_wcd) < CAJON_2_0) {
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
-			0x80, 0x00);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_BYPASS_MODE,
-			0x80, 0x00);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_BYPASS_MODE,
-			0x02, 0x00);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_BYPASS_MODE,
-			0x40, 0x00);
-	} else {
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_BYPASS_MODE,
-			0x20, 0x00);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
-			0x20, 0x00);
-	}
-}
-
-static void msm8x16_wcd_boost_mode_sequence(struct snd_soc_codec *codec,
-					int flag)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	if (flag == EAR_PMU) {
-		switch (msm8x16_wcd->boost_option) {
-		case BOOST_SWITCH:
-			if (msm8x16_wcd->ear_pa_boost_set) {
-				msm8x16_wcd_boost_off(codec);
-				msm8x16_wcd_bypass_on(codec);
-			}
-			break;
-		case BOOST_ALWAYS:
-			msm8x16_wcd_boost_on(codec);
-			break;
-		case BYPASS_ALWAYS:
-			msm8x16_wcd_bypass_on(codec);
-			break;
-		case BOOST_ON_FOREVER:
-			msm8x16_wcd_boost_on(codec);
-			break;
-		default:
-			pr_err("%s: invalid boost option: %d\n", __func__,
-						msm8x16_wcd->boost_option);
-			break;
-		}
-	} else if (flag == EAR_PMD) {
-		switch (msm8x16_wcd->boost_option) {
-		case BOOST_SWITCH:
-			if (msm8x16_wcd->ear_pa_boost_set)
-				msm8x16_wcd_bypass_off(codec);
-			break;
-		case BOOST_ALWAYS:
-			msm8x16_wcd_boost_off(codec);
-			/* 80ms for EAR boost to settle down */
-			msleep(80);
-			break;
-		case BYPASS_ALWAYS:
-			/* nothing to do as bypass on always */
-			break;
-		case BOOST_ON_FOREVER:
-			/* nothing to do as boost on forever */
-			break;
-		default:
-			pr_err("%s: invalid boost option: %d\n", __func__,
-						msm8x16_wcd->boost_option);
-			break;
-		}
-	} else if (flag == SPK_PMU) {
-		switch (msm8x16_wcd->boost_option) {
-		case BOOST_SWITCH:
-			if (msm8x16_wcd->spk_boost_set) {
-				msm8x16_wcd_bypass_off(codec);
-				msm8x16_wcd_boost_on(codec);
-			}
-			break;
-		case BOOST_ALWAYS:
-			msm8x16_wcd_boost_on(codec);
-			break;
-		case BYPASS_ALWAYS:
-			msm8x16_wcd_bypass_on(codec);
-			break;
-		case BOOST_ON_FOREVER:
-			msm8x16_wcd_boost_on(codec);
-			break;
-		default:
-			pr_err("%s: invalid boost option: %d\n", __func__,
-						msm8x16_wcd->boost_option);
-			break;
-		}
-	} else if (flag == SPK_PMD) {
-		switch (msm8x16_wcd->boost_option) {
-		case BOOST_SWITCH:
-			if (msm8x16_wcd->spk_boost_set) {
-				msm8x16_wcd_boost_off(codec);
-				/*
-				 * Add 40 ms sleep for the spk
-				 * boost to settle down
-				 */
-				msleep(40);
-			}
-			break;
-		case BOOST_ALWAYS:
-			msm8x16_wcd_boost_off(codec);
-			/*
-			 * Add 40 ms sleep for the spk
-			 * boost to settle down
-			 */
-			msleep(40);
-			break;
-		case BYPASS_ALWAYS:
-			/* nothing to do as bypass on always */
-			break;
-		case BOOST_ON_FOREVER:
-			/* nothing to do as boost on forever */
-			break;
-		default:
-			pr_err("%s: invalid boost option: %d\n", __func__,
-						msm8x16_wcd->boost_option);
-			break;
-		}
-	}
-}
-
-static int msm8x16_wcd_dt_parse_vreg_info(struct device *dev,
-	struct msm8x16_wcd_regulator *vreg, const char *vreg_name,
-	bool ondemand)
-{
-	int len, ret = 0;
-	const __be32 *prop;
-	char prop_name[CODEC_DT_MAX_PROP_SIZE];
-	struct device_node *regnode = NULL;
-	u32 prop_val;
-
-	snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE, "%s-supply",
-		vreg_name);
-	regnode = of_parse_phandle(dev->of_node, prop_name, 0);
-
-	if (!regnode) {
-		dev_err(dev, "Looking up %s property in node %s failed\n",
-			prop_name, dev->of_node->full_name);
-		return -ENODEV;
-	}
-
-	dev_err(dev, "Looking up %s property in node %s\n",
-		prop_name, dev->of_node->full_name);
-
-	vreg->name = vreg_name;
-	vreg->ondemand = ondemand;
-
-	snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE,
-		"qcom,%s-voltage", vreg_name);
-	prop = of_get_property(dev->of_node, prop_name, &len);
-
-	if (!prop || (len != (2 * sizeof(__be32)))) {
-		dev_err(dev, "%s %s property\n",
-			prop ? "invalid format" : "no", prop_name);
-		return -EINVAL;
-	}
-	vreg->min_uv = be32_to_cpup(&prop[0]);
-	vreg->max_uv = be32_to_cpup(&prop[1]);
-
-	snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE,
-		"qcom,%s-current", vreg_name);
-
-	ret = of_property_read_u32(dev->of_node, prop_name, &prop_val);
-	if (ret) {
-		dev_err(dev, "Looking up %s property in node %s failed",
-			prop_name, dev->of_node->full_name);
-		return -EFAULT;
-	}
-	vreg->optimum_ua = prop_val;
-
-	dev_err(dev, "%s: vol=[%d %d]uV, curr=[%d]uA, ond %d\n\n", vreg->name,
-		 vreg->min_uv, vreg->max_uv, vreg->optimum_ua, vreg->ondemand);
-	return 0;
-}
-
-static void msm8x16_wcd_dt_parse_boost_info(struct snd_soc_codec *codec)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd_priv =
-		snd_soc_codec_get_drvdata(codec);
-	const char *prop_name = "qcom,cdc-boost-voltage";
-	int boost_voltage, ret;
-
-	ret = of_property_read_u32(codec->dev->of_node, prop_name,
-			&boost_voltage);
-	if (ret) {
-		dev_err(codec->dev, "Looking up %s property in node %s failed\n",
-			prop_name, codec->dev->of_node->full_name);
-		boost_voltage = DEFAULT_BOOST_VOLTAGE;
-	}
-	if (boost_voltage < MIN_BOOST_VOLTAGE ||
-			boost_voltage > MAX_BOOST_VOLTAGE) {
-		dev_err(codec->dev,
-				"Incorrect boost voltage. Reverting to default\n");
-		boost_voltage = DEFAULT_BOOST_VOLTAGE;
-	}
-
-	msm8x16_wcd_priv->boost_voltage =
-		VOLTAGE_CONVERTER(boost_voltage, MIN_BOOST_VOLTAGE,
-				BOOST_VOLTAGE_STEP);
-	dev_err(codec->dev, "Boost voltage value is: %d\n",
-			boost_voltage);
-}
-
-static void msm8x16_wcd_dt_parse_micbias_info(struct device *dev,
-			struct wcd9xxx_micbias_setting *micbias)
-{
-	const char *prop_name = "qcom,cdc-micbias-cfilt-mv";
-	int ret;
-
-	ret = of_property_read_u32(dev->of_node, prop_name,
-			&micbias->cfilt1_mv);
-	if (ret) {
-		dev_err(dev, "Looking up %s property in node %s failed",
-			prop_name, dev->of_node->full_name);
-		micbias->cfilt1_mv = MICBIAS_DEFAULT_VAL;
-	}
-}
-
-static struct msm8x16_wcd_pdata *msm8x16_wcd_populate_dt_pdata(
-						struct device *dev)
-{
-	struct msm8x16_wcd_pdata *pdata;
-	int ret, static_cnt, ond_cnt, idx, i;
-	const char *name = NULL;
-	const char *static_prop_name = "qcom,cdc-static-supplies";
-	const char *ond_prop_name = "qcom,cdc-on-demand-supplies";
-
-	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
-	if (!pdata)
-		return NULL;
-
-	static_cnt = of_property_count_strings(dev->of_node, static_prop_name);
-	if (static_cnt < 0) {
-		dev_err(dev, "%s: Failed to get static supplies %d\n", __func__,
-			static_cnt);
-		ret = -EINVAL;
-		goto err;
-	}
-
-	/* On-demand supply list is an optional property */
-	ond_cnt = of_property_count_strings(dev->of_node, ond_prop_name);
-	if (ond_cnt < 0)
-		ond_cnt = 0;
-
-	WARN_ON(static_cnt <= 0 || ond_cnt < 0);
-	if ((static_cnt + ond_cnt) > ARRAY_SIZE(pdata->regulator)) {
-		dev_err(dev, "%s: Num of supplies %u > max supported %zd\n",
-				__func__, (static_cnt + ond_cnt),
-					ARRAY_SIZE(pdata->regulator));
-		ret = -EINVAL;
-		goto err;
-	}
-
-	for (idx = 0; idx < static_cnt; idx++) {
-		ret = of_property_read_string_index(dev->of_node,
-						    static_prop_name, idx,
-						    &name);
-		if (ret) {
-			dev_err(dev, "%s: of read string %s idx %d error %d\n",
-				__func__, static_prop_name, idx, ret);
-			goto err;
-		}
-
-		dev_err(dev, "%s: Found static cdc supply %s\n", __func__,
-			name);
-		ret = msm8x16_wcd_dt_parse_vreg_info(dev,
-						&pdata->regulator[idx],
-						name, false);
-		if (ret) {
-			dev_err(dev, "%s:err parsing vreg for %s idx %d\n",
-				__func__, name, idx);
-			goto err;
-		}
-	}
-
-	for (i = 0; i < ond_cnt; i++, idx++) {
-		ret = of_property_read_string_index(dev->of_node, ond_prop_name,
-						    i, &name);
-		if (ret) {
-			dev_err(dev, "%s: err parsing on_demand for %s idx %d\n",
-				__func__, ond_prop_name, i);
-			goto err;
-		}
-
-		dev_err(dev, "%s: Found on-demand cdc supply %s\n", __func__,
-			name);
-		ret = msm8x16_wcd_dt_parse_vreg_info(dev,
-						&pdata->regulator[idx],
-						name, true);
-		if (ret) {
-			dev_err(dev, "%s: err parsing vreg on_demand for %s idx %d\n",
-				__func__, name, idx);
-			goto err;
-		}
-	}
-	msm8x16_wcd_dt_parse_micbias_info(dev, &pdata->micbias);
-	return pdata;
-err:
-	devm_kfree(dev, pdata);
-	dev_err(dev, "%s: Failed to populate DT data ret = %d\n",
-		__func__, ret);
-	return NULL;
-}
-
-static int msm8x16_wcd_codec_enable_on_demand_supply(
-		struct snd_soc_dapm_widget *w,
-		struct snd_kcontrol *kcontrol, int event)
-{
-	int ret = 0;
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-	struct on_demand_supply *supply;
-
-	if (w->shift >= ON_DEMAND_SUPPLIES_MAX) {
-		dev_err(codec->dev, "%s: error index > MAX Demand supplies",
-			__func__);
-		ret = -EINVAL;
-		goto out;
-	}
-	dev_err(codec->dev, "%s: supply: %s event: %d ref: %d\n",
-		__func__, on_demand_supply_name[w->shift], event,
-		atomic_read(&msm8x16_wcd->on_demand_list[w->shift].ref));
-
-	supply = &msm8x16_wcd->on_demand_list[w->shift];
-	WARN_ONCE(!supply->supply, "%s isn't defined\n",
-		  on_demand_supply_name[w->shift]);
-	if (!supply->supply) {
-		dev_err(codec->dev, "%s: err supply not present ond for %d",
-			__func__, w->shift);
-		goto out;
-	}
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		if (atomic_inc_return(&supply->ref) == 1)
-			ret = regulator_enable(supply->supply);
-		if (ret)
-			dev_err(codec->dev, "%s: Failed to enable %s\n",
-				__func__,
-				on_demand_supply_name[w->shift]);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		if (atomic_read(&supply->ref) == 0) {
-			dev_err(codec->dev, "%s: %s supply has been disabled.\n",
-				 __func__, on_demand_supply_name[w->shift]);
-			goto out;
-		}
-		if (atomic_dec_return(&supply->ref) == 0)
-			ret = regulator_disable(supply->supply);
-			if (ret)
-				dev_err(codec->dev, "%s: Failed to disable %s\n",
-					__func__,
-					on_demand_supply_name[w->shift]);
-		break;
-	default:
-		break;
-	}
-out:
-	return ret;
-}
-
-static int msm8x16_wcd_codec_enable_clock_block(struct snd_soc_codec *codec,
-						int enable)
-{
-	struct msm_asoc_mach_data *pdata = NULL;
-
-	pdata = snd_soc_card_get_drvdata(codec->component.card);
-	if (enable) {
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_CDC_CORE_CLK_MCLK_CTL, 0x01, 0x01);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_CDC_CORE_CLK_PDM_CTL, 0x03, 0x03);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL, 0x30, 0x30);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80, 0x80);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_DIGITAL_CDC_TOP_CLK_CTL, 0x0C, 0x0C);
-		if (pdata->mclk_freq == MCLK_RATE_12P288MHZ)
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_TOP_CTL, 0x01, 0x00);
-		else if (pdata->mclk_freq == MCLK_RATE_9P6MHZ)
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_TOP_CTL, 0x01, 0x01);
-	} else {
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_DIGITAL_CDC_TOP_CLK_CTL, 0x0C, 0x00);
-		snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_CLK_PDM_CTL, 0x03, 0x00);
-
-	}
-	return 0;
-}
-
-static int msm8x16_wcd_codec_enable_charge_pump(struct snd_soc_dapm_widget *w,
-		struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	dev_err(codec->dev, "%s: event = %d\n", __func__, event);
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		msm8x16_wcd_codec_enable_clock_block(codec, 1);
-		if (!(strcmp(w->name, "EAR CP"))) {
-			snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
-					0x80, 0x80);
-			msm8x16_wcd_boost_mode_sequence(codec, EAR_PMU);
-		} else if (get_codec_version(msm8x16_wcd) == DIANGU) {
-			snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
-					0x80, 0x80);
-		} else {
-			snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
-					0xC0, 0xC0);
-		}
-		break;
-	case SND_SOC_DAPM_POST_PMU:
-		usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
-		if (!(strcmp(w->name, "EAR CP"))) {
-			snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
-					0x80, 0x00);
-			if (msm8x16_wcd->boost_option != BOOST_ALWAYS) {
-				dev_err(codec->dev,
-					"%s: boost_option:%d, tear down ear\n",
-					__func__, msm8x16_wcd->boost_option);
-				msm8x16_wcd_boost_mode_sequence(codec, EAR_PMD);
-			}
-			/*
-			 * Reset pa select bit from ear to hph after ear pa
-			 * is disabled and HPH DAC disable to reduce ear
-			 * turn off pop and avoid HPH pop in concurrency
-			 */
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_RX_EAR_CTL, 0x80, 0x00);
-		} else {
-			if (get_codec_version(msm8x16_wcd) < DIANGU)
-				snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
-					0x40, 0x00);
-			if (msm8x16_wcd->rx_bias_count == 0)
-				snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
-					0x80, 0x00);
-			dev_err(codec->dev, "%s: rx_bias_count = %d\n",
-					__func__, msm8x16_wcd->rx_bias_count);
-		}
-		break;
-	}
-	return 0;
-}
-
-static int msm8x16_wcd_ear_pa_boost_get(struct snd_kcontrol *kcontrol,
-				struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	ucontrol->value.integer.value[0] =
-		(msm8x16_wcd->ear_pa_boost_set ? 1 : 0);
-	dev_err(codec->dev, "%s: msm8x16_wcd->ear_pa_boost_set = %d\n",
-			__func__, msm8x16_wcd->ear_pa_boost_set);
-	return 0;
-}
-
-static int msm8x16_wcd_ear_pa_boost_set(struct snd_kcontrol *kcontrol,
-				struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct msm8x16_wcd_priv *msm8x16_wcd =
-			snd_soc_codec_get_drvdata(codec);
-
-	dev_err(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
-		__func__, ucontrol->value.integer.value[0]);
-	msm8x16_wcd->ear_pa_boost_set =
-		(ucontrol->value.integer.value[0] ? true : false);
-	return 0;
-}
-
-static int msm8x16_wcd_pa_gain_get(struct snd_kcontrol *kcontrol,
-				struct snd_ctl_elem_value *ucontrol)
-{
-	u8 ear_pa_gain;
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-
-	ear_pa_gain = snd_soc_read_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_RX_EAR_CTL);
-
-	ear_pa_gain = (ear_pa_gain >> 5) & 0x1;
-
-	if (ear_pa_gain == 0x00) {
-		ucontrol->value.integer.value[0] = 0;
-	} else if (ear_pa_gain == 0x01) {
-		ucontrol->value.integer.value[0] = 1;
-	} else  {
-		dev_err(codec->dev, "%s: ERROR: Unsupported Ear Gain = 0x%x\n",
-			__func__, ear_pa_gain);
-		return -EINVAL;
-	}
-
-	ucontrol->value.integer.value[0] = ear_pa_gain;
-	dev_err(codec->dev, "%s: ear_pa_gain = 0x%x\n",
-		__func__, ear_pa_gain);
-	return 0;
-}
-
-static int msm8x16_wcd_loopback_mode_get(struct snd_kcontrol *kcontrol,
-				struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct msm_asoc_mach_data *pdata = NULL;
-
-	pdata = snd_soc_card_get_drvdata(codec->component.card);
-	dev_err(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
-		__func__, ucontrol->value.integer.value[0]);
-
-	return pdata->lb_mode;
-}
-
-static int msm8x16_wcd_loopback_mode_put(struct snd_kcontrol *kcontrol,
-				struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct msm_asoc_mach_data *pdata = NULL;
-
-	pdata = snd_soc_card_get_drvdata(codec->component.card);
-	dev_err(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
-		__func__, ucontrol->value.integer.value[0]);
-
-	switch (ucontrol->value.integer.value[0]) {
-	case 0:
-		pdata->lb_mode = false;
-		break;
-	case 1:
-		pdata->lb_mode = true;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int msm8x16_wcd_pa_gain_put(struct snd_kcontrol *kcontrol,
-				struct snd_ctl_elem_value *ucontrol)
-{
-	u8 ear_pa_gain;
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-
-	dev_err(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
-		__func__, ucontrol->value.integer.value[0]);
-
-	switch (ucontrol->value.integer.value[0]) {
-	case 0:
-		ear_pa_gain = 0x00;
-		break;
-	case 1:
-		ear_pa_gain = 0x20;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	snd_soc_update_bits_wrapper(codec, MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
-			    0x20, ear_pa_gain);
-	return 0;
-}
-
-static int msm8x16_wcd_hph_mode_get(struct snd_kcontrol *kcontrol,
-				struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	if (msm8x16_wcd->hph_mode == NORMAL_MODE) {
-		ucontrol->value.integer.value[0] = 0;
-	} else if (msm8x16_wcd->hph_mode == HD2_MODE) {
-		ucontrol->value.integer.value[0] = 1;
-	} else  {
-		dev_err(codec->dev, "%s: ERROR: Default HPH Mode= %d\n",
-			__func__, msm8x16_wcd->hph_mode);
-	}
-
-	dev_err(codec->dev, "%s: msm8x16_wcd->hph_mode = %d\n", __func__,
-			msm8x16_wcd->hph_mode);
-	return 0;
-}
-
-static int msm8x16_wcd_hph_mode_set(struct snd_kcontrol *kcontrol,
-				struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	dev_err(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
-		__func__, ucontrol->value.integer.value[0]);
-
-	switch (ucontrol->value.integer.value[0]) {
-	case 0:
-		msm8x16_wcd->hph_mode = NORMAL_MODE;
-		break;
-	case 1:
-		if (get_codec_version(msm8x16_wcd) >= DIANGU)
-			msm8x16_wcd->hph_mode = HD2_MODE;
-		break;
-	default:
-		msm8x16_wcd->hph_mode = NORMAL_MODE;
-		break;
-	}
-	dev_err(codec->dev, "%s: msm8x16_wcd->hph_mode_set = %d\n",
-		__func__, msm8x16_wcd->hph_mode);
-	return 0;
-}
-
-static int msm8x16_wcd_boost_option_get(struct snd_kcontrol *kcontrol,
-				struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	if (msm8x16_wcd->boost_option == BOOST_SWITCH) {
-		ucontrol->value.integer.value[0] = 0;
-	} else if (msm8x16_wcd->boost_option == BOOST_ALWAYS) {
-		ucontrol->value.integer.value[0] = 1;
-	} else if (msm8x16_wcd->boost_option == BYPASS_ALWAYS) {
-		ucontrol->value.integer.value[0] = 2;
-	} else if (msm8x16_wcd->boost_option == BOOST_ON_FOREVER) {
-		ucontrol->value.integer.value[0] = 3;
-	} else  {
-		dev_err(codec->dev, "%s: ERROR: Unsupported Boost option= %d\n",
-			__func__, msm8x16_wcd->boost_option);
-		return -EINVAL;
-	}
-
-	dev_err(codec->dev, "%s: msm8x16_wcd->boost_option = %d\n", __func__,
-			msm8x16_wcd->boost_option);
-	return 0;
-}
-
-static int msm8x16_wcd_boost_option_set(struct snd_kcontrol *kcontrol,
-				struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	dev_err(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
-		__func__, ucontrol->value.integer.value[0]);
-
-	switch (ucontrol->value.integer.value[0]) {
-	case 0:
-		msm8x16_wcd->boost_option = BOOST_SWITCH;
-		break;
-	case 1:
-		msm8x16_wcd->boost_option = BOOST_ALWAYS;
-		break;
-	case 2:
-		msm8x16_wcd->boost_option = BYPASS_ALWAYS;
-		msm8x16_wcd_bypass_on(codec);
-		break;
-	case 3:
-		msm8x16_wcd->boost_option = BOOST_ON_FOREVER;
-		msm8x16_wcd_boost_on(codec);
-		break;
-	default:
-		pr_err("%s: invalid boost option: %d\n", __func__,
-					msm8x16_wcd->boost_option);
-		return -EINVAL;
-	}
-	dev_err(codec->dev, "%s: msm8x16_wcd->boost_option_set = %d\n",
-		__func__, msm8x16_wcd->boost_option);
-	return 0;
-}
-
-static int msm8x16_wcd_ext_spk_boost_get(struct snd_kcontrol *kcontrol,
-				struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	if (msm8x16_wcd->ext_spk_boost_set == false)
-		ucontrol->value.integer.value[0] = 0;
-	else
-		ucontrol->value.integer.value[0] = 1;
-
-	dev_err(codec->dev, "%s: msm8x16_wcd->ext_spk_boost_set = %d\n",
-				__func__, msm8x16_wcd->ext_spk_boost_set);
-	return 0;
-}
-
-static int msm8x16_wcd_ext_spk_boost_set(struct snd_kcontrol *kcontrol,
-				struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	dev_err(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
-		__func__, ucontrol->value.integer.value[0]);
-
-	switch (ucontrol->value.integer.value[0]) {
-	case 0:
-		msm8x16_wcd->ext_spk_boost_set = false;
-		break;
-	case 1:
-		msm8x16_wcd->ext_spk_boost_set = true;
-		break;
-	default:
-		return -EINVAL;
-	}
-	dev_err(codec->dev, "%s: msm8x16_wcd->spk_boost_set = %d\n",
-		__func__, msm8x16_wcd->spk_boost_set);
-	return 0;
-}
-static int msm8x16_wcd_get_iir_enable_audio_mixer(
-					struct snd_kcontrol *kcontrol,
-					struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	int iir_idx = ((struct soc_multi_mixer_control *)
-					kcontrol->private_value)->reg;
-	int band_idx = ((struct soc_multi_mixer_control *)
-					kcontrol->private_value)->shift;
-
-	ucontrol->value.integer.value[0] =
-		(snd_soc_read_wrapper(codec,
-			    (MSM89XX_CDC_CORE_IIR1_CTL + 64 * iir_idx)) &
-		(1 << band_idx)) != 0;
-
-	dev_err(codec->dev, "%s: IIR #%d band #%d enable %d\n", __func__,
-		iir_idx, band_idx,
-		(uint32_t)ucontrol->value.integer.value[0]);
-	return 0;
-}
-
-static int msm8x16_wcd_put_iir_enable_audio_mixer(
-					struct snd_kcontrol *kcontrol,
-					struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	int iir_idx = ((struct soc_multi_mixer_control *)
-					kcontrol->private_value)->reg;
-	int band_idx = ((struct soc_multi_mixer_control *)
-					kcontrol->private_value)->shift;
-	int value = ucontrol->value.integer.value[0];
-
-	/* Mask first 5 bits, 6-8 are reserved */
-	snd_soc_update_bits_wrapper(codec,
-		(MSM89XX_CDC_CORE_IIR1_CTL + 64 * iir_idx),
-			    (1 << band_idx), (value << band_idx));
-
-	dev_err(codec->dev, "%s: IIR #%d band #%d enable %d\n", __func__,
-	  iir_idx, band_idx,
-		((snd_soc_read_wrapper(codec,
-		(MSM89XX_CDC_CORE_IIR1_CTL + 64 * iir_idx)) &
-	  (1 << band_idx)) != 0));
-
-	return 0;
-}
-static uint32_t get_iir_band_coeff(struct snd_soc_codec *codec,
-				   int iir_idx, int band_idx,
-				   int coeff_idx)
-{
-	uint32_t value = 0;
-
-	/* Address does not automatically update if reading */
-	snd_soc_write_wrapper(codec,
-		(MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL + 64 * iir_idx),
-		((band_idx * BAND_MAX + coeff_idx)
-		* sizeof(uint32_t)) & 0x7F);
-
-	value |= snd_soc_read_wrapper(codec,
-		(MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx));
-
-	snd_soc_write_wrapper(codec,
-		(MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL + 64 * iir_idx),
-		((band_idx * BAND_MAX + coeff_idx)
-		* sizeof(uint32_t) + 1) & 0x7F);
-
-	value |= (snd_soc_read_wrapper(codec,
-		(MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx)) << 8);
-
-	snd_soc_write_wrapper(codec,
-		(MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL + 64 * iir_idx),
-		((band_idx * BAND_MAX + coeff_idx)
-		* sizeof(uint32_t) + 2) & 0x7F);
-
-	value |= (snd_soc_read_wrapper(codec,
-		(MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx)) << 16);
-
-	snd_soc_write_wrapper(codec,
-		(MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL + 64 * iir_idx),
-		((band_idx * BAND_MAX + coeff_idx)
-		* sizeof(uint32_t) + 3) & 0x7F);
-
-	/* Mask bits top 2 bits since they are reserved */
-	value |= ((snd_soc_read_wrapper(codec,
-			(MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL
-		+ 64 * iir_idx)) & 0x3f) << 24);
-
-	return value;
-
-}
-
-static int msm8x16_wcd_get_iir_band_audio_mixer(
-					struct snd_kcontrol *kcontrol,
-					struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	int iir_idx = ((struct soc_multi_mixer_control *)
-					kcontrol->private_value)->reg;
-	int band_idx = ((struct soc_multi_mixer_control *)
-					kcontrol->private_value)->shift;
-
-	ucontrol->value.integer.value[0] =
-		get_iir_band_coeff(codec, iir_idx, band_idx, 0);
-	ucontrol->value.integer.value[1] =
-		get_iir_band_coeff(codec, iir_idx, band_idx, 1);
-	ucontrol->value.integer.value[2] =
-		get_iir_band_coeff(codec, iir_idx, band_idx, 2);
-	ucontrol->value.integer.value[3] =
-		get_iir_band_coeff(codec, iir_idx, band_idx, 3);
-	ucontrol->value.integer.value[4] =
-		get_iir_band_coeff(codec, iir_idx, band_idx, 4);
-
-	dev_err(codec->dev, "%s: IIR #%d band #%d b0 = 0x%x\n"
-		"%s: IIR #%d band #%d b1 = 0x%x\n"
-		"%s: IIR #%d band #%d b2 = 0x%x\n"
-		"%s: IIR #%d band #%d a1 = 0x%x\n"
-		"%s: IIR #%d band #%d a2 = 0x%x\n",
-		__func__, iir_idx, band_idx,
-		(uint32_t)ucontrol->value.integer.value[0],
-		__func__, iir_idx, band_idx,
-		(uint32_t)ucontrol->value.integer.value[1],
-		__func__, iir_idx, band_idx,
-		(uint32_t)ucontrol->value.integer.value[2],
-		__func__, iir_idx, band_idx,
-		(uint32_t)ucontrol->value.integer.value[3],
-		__func__, iir_idx, band_idx,
-		(uint32_t)ucontrol->value.integer.value[4]);
-	return 0;
-}
-
-static void set_iir_band_coeff(struct snd_soc_codec *codec,
-				int iir_idx, int band_idx,
-				uint32_t value)
-{
-	snd_soc_write_wrapper(codec,
-		(MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx),
-		(value & 0xFF));
-
-	snd_soc_write_wrapper(codec,
-		(MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx),
-		(value >> 8) & 0xFF);
-
-	snd_soc_write_wrapper(codec,
-		(MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx),
-		(value >> 16) & 0xFF);
-
-	/* Mask top 2 bits, 7-8 are reserved */
-	snd_soc_write_wrapper(codec,
-		(MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx),
-		(value >> 24) & 0x3F);
-
-}
-
-static int msm8x16_wcd_put_iir_band_audio_mixer(
-					struct snd_kcontrol *kcontrol,
-					struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	int iir_idx = ((struct soc_multi_mixer_control *)
-					kcontrol->private_value)->reg;
-	int band_idx = ((struct soc_multi_mixer_control *)
-					kcontrol->private_value)->shift;
-
-	/* Mask top bit it is reserved */
-	/* Updates addr automatically for each B2 write */
-	snd_soc_write_wrapper(codec,
-		(MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL + 64 * iir_idx),
-		(band_idx * BAND_MAX * sizeof(uint32_t)) & 0x7F);
-
-
-	set_iir_band_coeff(codec, iir_idx, band_idx,
-			   ucontrol->value.integer.value[0]);
-	set_iir_band_coeff(codec, iir_idx, band_idx,
-			   ucontrol->value.integer.value[1]);
-	set_iir_band_coeff(codec, iir_idx, band_idx,
-			   ucontrol->value.integer.value[2]);
-	set_iir_band_coeff(codec, iir_idx, band_idx,
-			   ucontrol->value.integer.value[3]);
-	set_iir_band_coeff(codec, iir_idx, band_idx,
-			   ucontrol->value.integer.value[4]);
-
-	dev_err(codec->dev, "%s: IIR #%d band #%d b0 = 0x%x\n"
-		"%s: IIR #%d band #%d b1 = 0x%x\n"
-		"%s: IIR #%d band #%d b2 = 0x%x\n"
-		"%s: IIR #%d band #%d a1 = 0x%x\n"
-		"%s: IIR #%d band #%d a2 = 0x%x\n",
-		__func__, iir_idx, band_idx,
-		get_iir_band_coeff(codec, iir_idx, band_idx, 0),
-		__func__, iir_idx, band_idx,
-		get_iir_band_coeff(codec, iir_idx, band_idx, 1),
-		__func__, iir_idx, band_idx,
-		get_iir_band_coeff(codec, iir_idx, band_idx, 2),
-		__func__, iir_idx, band_idx,
-		get_iir_band_coeff(codec, iir_idx, band_idx, 3),
-		__func__, iir_idx, band_idx,
-		get_iir_band_coeff(codec, iir_idx, band_idx, 4));
-	return 0;
-}
-
-static int msm8x16_wcd_compander_get(struct snd_kcontrol *kcontrol,
-				struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-	int comp_idx = ((struct soc_multi_mixer_control *)
-					kcontrol->private_value)->reg;
-	int rx_idx = ((struct soc_multi_mixer_control *)
-					kcontrol->private_value)->shift;
-
-	dev_err(codec->dev, "%s: msm8x16_wcd->comp[%d]_enabled[%d] = %d\n",
-			__func__, comp_idx, rx_idx,
-			msm8x16_wcd->comp_enabled[rx_idx]);
-
-	ucontrol->value.integer.value[0] = msm8x16_wcd->comp_enabled[rx_idx];
-
-	dev_err(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
-		__func__, ucontrol->value.integer.value[0]);
-
-	return 0;
-}
-
-static int msm8x16_wcd_compander_set(struct snd_kcontrol *kcontrol,
-				struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-	int comp_idx = ((struct soc_multi_mixer_control *)
-					kcontrol->private_value)->reg;
-	int rx_idx = ((struct soc_multi_mixer_control *)
-					kcontrol->private_value)->shift;
-	int value = ucontrol->value.integer.value[0];
-
-	dev_err(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
-		__func__, ucontrol->value.integer.value[0]);
-
-	if (get_codec_version(msm8x16_wcd) >= DIANGU) {
-		if (!value)
-			msm8x16_wcd->comp_enabled[rx_idx] = 0;
-		else
-			msm8x16_wcd->comp_enabled[rx_idx] = comp_idx;
-	}
-
-	dev_err(codec->dev, "%s: msm8x16_wcd->comp[%d]_enabled[%d] = %d\n",
-		__func__, comp_idx, rx_idx,
-		msm8x16_wcd->comp_enabled[rx_idx]);
-
-	return 0;
-}
-
-static const char * const msm8x16_wcd_loopback_mode_ctrl_text[] = {
-		"DISABLE", "ENABLE"};
-static const struct soc_enum msm8x16_wcd_loopback_mode_ctl_enum[] = {
-		SOC_ENUM_SINGLE_EXT(2, msm8x16_wcd_loopback_mode_ctrl_text),
-};
-
-static const char * const msm8x16_wcd_ear_pa_boost_ctrl_text[] = {
-		"DISABLE", "ENABLE"};
-static const struct soc_enum msm8x16_wcd_ear_pa_boost_ctl_enum[] = {
-		SOC_ENUM_SINGLE_EXT(2, msm8x16_wcd_ear_pa_boost_ctrl_text),
-};
-
-static const char * const msm8x16_wcd_ear_pa_gain_text[] = {
-		"POS_1P5_DB", "POS_6_DB"};
-static const struct soc_enum msm8x16_wcd_ear_pa_gain_enum[] = {
-		SOC_ENUM_SINGLE_EXT(2, msm8x16_wcd_ear_pa_gain_text),
-};
-
-static const char * const msm8x16_wcd_boost_option_ctrl_text[] = {
-		"BOOST_SWITCH", "BOOST_ALWAYS", "BYPASS_ALWAYS",
-		"BOOST_ON_FOREVER"};
-static const struct soc_enum msm8x16_wcd_boost_option_ctl_enum[] = {
-		SOC_ENUM_SINGLE_EXT(4, msm8x16_wcd_boost_option_ctrl_text),
-};
-static const char * const msm8x16_wcd_spk_boost_ctrl_text[] = {
-		"DISABLE", "ENABLE"};
-static const struct soc_enum msm8x16_wcd_spk_boost_ctl_enum[] = {
-		SOC_ENUM_SINGLE_EXT(2, msm8x16_wcd_spk_boost_ctrl_text),
-};
-
-static const char * const msm8x16_wcd_ext_spk_boost_ctrl_text[] = {
-		"DISABLE", "ENABLE"};
-static const struct soc_enum msm8x16_wcd_ext_spk_boost_ctl_enum[] = {
-		SOC_ENUM_SINGLE_EXT(2, msm8x16_wcd_ext_spk_boost_ctrl_text),
-};
-
-static const char * const msm8x16_wcd_hph_mode_ctrl_text[] = {
-		"NORMAL", "HD2"};
-static const struct soc_enum msm8x16_wcd_hph_mode_ctl_enum[] = {
-		SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(msm8x16_wcd_hph_mode_ctrl_text),
-			msm8x16_wcd_hph_mode_ctrl_text),
-};
-
-/*cut of frequency for high pass filter*/
-static const char * const cf_text[] = {
-	"MIN_3DB_4Hz", "MIN_3DB_75Hz", "MIN_3DB_150Hz"
-};
-
-static const struct soc_enum cf_dec1_enum =
-	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_TX1_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_dec2_enum =
-	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_TX2_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_rxmix1_enum =
-	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_RX1_B4_CTL, 0, 3, cf_text);
-
-static const struct soc_enum cf_rxmix2_enum =
-	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_RX2_B4_CTL, 0, 3, cf_text);
-
-static const struct soc_enum cf_rxmix3_enum =
-	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_RX3_B4_CTL, 0, 3, cf_text);
-
-static const struct snd_kcontrol_new msm8x16_wcd_snd_controls[] = {
-
-	SOC_ENUM_EXT("RX HPH Mode", msm8x16_wcd_hph_mode_ctl_enum[0],
-		msm8x16_wcd_hph_mode_get, msm8x16_wcd_hph_mode_set),
-
-	SOC_ENUM_EXT("Boost Option", msm8x16_wcd_boost_option_ctl_enum[0],
-		msm8x16_wcd_boost_option_get, msm8x16_wcd_boost_option_set),
-
-	SOC_ENUM_EXT("EAR PA Boost", msm8x16_wcd_ear_pa_boost_ctl_enum[0],
-		msm8x16_wcd_ear_pa_boost_get, msm8x16_wcd_ear_pa_boost_set),
-
-	SOC_ENUM_EXT("EAR PA Gain", msm8x16_wcd_ear_pa_gain_enum[0],
-		msm8x16_wcd_pa_gain_get, msm8x16_wcd_pa_gain_put),
-
-	SOC_ENUM_EXT("Ext Spk Boost", msm8x16_wcd_ext_spk_boost_ctl_enum[0],
-		msm8x16_wcd_ext_spk_boost_get, msm8x16_wcd_ext_spk_boost_set),
-
-	SOC_ENUM_EXT("LOOPBACK Mode", msm8x16_wcd_loopback_mode_ctl_enum[0],
-		msm8x16_wcd_loopback_mode_get, msm8x16_wcd_loopback_mode_put),
-
-	SOC_SINGLE_TLV("ADC1 Volume", MSM89XX_PMIC_ANALOG_TX_1_EN, 3,
-					8, 0, analog_gain),
-	SOC_SINGLE_TLV("ADC2 Volume", MSM89XX_PMIC_ANALOG_TX_2_EN, 3,
-					8, 0, analog_gain),
-	SOC_SINGLE_TLV("ADC3 Volume", MSM89XX_PMIC_ANALOG_TX_3_EN, 3,
-					8, 0, analog_gain),
-
-	SOC_SINGLE_SX_TLV("RX1 Digital Volume",
-			  MSM89XX_CDC_CORE_RX1_VOL_CTL_B2_CTL,
-			0,  -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("RX2 Digital Volume",
-			  MSM89XX_CDC_CORE_RX2_VOL_CTL_B2_CTL,
-			0,  -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("RX3 Digital Volume",
-			  MSM89XX_CDC_CORE_RX3_VOL_CTL_B2_CTL,
-			0,  -84, 40, digital_gain),
-
-	SOC_SINGLE_SX_TLV("DEC1 Volume",
-			  MSM89XX_CDC_CORE_TX1_VOL_CTL_GAIN,
-			0,  -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("DEC2 Volume",
-			  MSM89XX_CDC_CORE_TX2_VOL_CTL_GAIN,
-			0,  -84, 40, digital_gain),
-
-	SOC_SINGLE_SX_TLV("IIR1 INP1 Volume",
-			  MSM89XX_CDC_CORE_IIR1_GAIN_B1_CTL,
-			0,  -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("IIR1 INP2 Volume",
-			  MSM89XX_CDC_CORE_IIR1_GAIN_B2_CTL,
-			0,  -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("IIR1 INP3 Volume",
-			  MSM89XX_CDC_CORE_IIR1_GAIN_B3_CTL,
-			0,  -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("IIR1 INP4 Volume",
-			  MSM89XX_CDC_CORE_IIR1_GAIN_B4_CTL,
-			0,  -84,	40, digital_gain),
-	SOC_SINGLE_SX_TLV("IIR2 INP1 Volume",
-			  MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL,
-			0,  -84, 40, digital_gain),
-
-	SOC_ENUM("TX1 HPF cut off", cf_dec1_enum),
-	SOC_ENUM("TX2 HPF cut off", cf_dec2_enum),
-
-	SOC_SINGLE("TX1 HPF Switch",
-		MSM89XX_CDC_CORE_TX1_MUX_CTL, 3, 1, 0),
-	SOC_SINGLE("TX2 HPF Switch",
-		MSM89XX_CDC_CORE_TX2_MUX_CTL, 3, 1, 0),
-
-	SOC_SINGLE("RX1 HPF Switch",
-		MSM89XX_CDC_CORE_RX1_B5_CTL, 2, 1, 0),
-	SOC_SINGLE("RX2 HPF Switch",
-		MSM89XX_CDC_CORE_RX2_B5_CTL, 2, 1, 0),
-	SOC_SINGLE("RX3 HPF Switch",
-		MSM89XX_CDC_CORE_RX3_B5_CTL, 2, 1, 0),
-
-	SOC_ENUM("RX1 HPF cut off", cf_rxmix1_enum),
-	SOC_ENUM("RX2 HPF cut off", cf_rxmix2_enum),
-	SOC_ENUM("RX3 HPF cut off", cf_rxmix3_enum),
-
-	SOC_SINGLE_EXT("IIR1 Enable Band1", IIR1, BAND1, 1, 0,
-	msm8x16_wcd_get_iir_enable_audio_mixer,
-	msm8x16_wcd_put_iir_enable_audio_mixer),
-	SOC_SINGLE_EXT("IIR1 Enable Band2", IIR1, BAND2, 1, 0,
-	msm8x16_wcd_get_iir_enable_audio_mixer,
-	msm8x16_wcd_put_iir_enable_audio_mixer),
-	SOC_SINGLE_EXT("IIR1 Enable Band3", IIR1, BAND3, 1, 0,
-	msm8x16_wcd_get_iir_enable_audio_mixer,
-	msm8x16_wcd_put_iir_enable_audio_mixer),
-	SOC_SINGLE_EXT("IIR1 Enable Band4", IIR1, BAND4, 1, 0,
-	msm8x16_wcd_get_iir_enable_audio_mixer,
-	msm8x16_wcd_put_iir_enable_audio_mixer),
-	SOC_SINGLE_EXT("IIR1 Enable Band5", IIR1, BAND5, 1, 0,
-	msm8x16_wcd_get_iir_enable_audio_mixer,
-	msm8x16_wcd_put_iir_enable_audio_mixer),
-	SOC_SINGLE_EXT("IIR2 Enable Band1", IIR2, BAND1, 1, 0,
-	msm8x16_wcd_get_iir_enable_audio_mixer,
-	msm8x16_wcd_put_iir_enable_audio_mixer),
-	SOC_SINGLE_EXT("IIR2 Enable Band2", IIR2, BAND2, 1, 0,
-	msm8x16_wcd_get_iir_enable_audio_mixer,
-	msm8x16_wcd_put_iir_enable_audio_mixer),
-	SOC_SINGLE_EXT("IIR2 Enable Band3", IIR2, BAND3, 1, 0,
-	msm8x16_wcd_get_iir_enable_audio_mixer,
-	msm8x16_wcd_put_iir_enable_audio_mixer),
-	SOC_SINGLE_EXT("IIR2 Enable Band4", IIR2, BAND4, 1, 0,
-	msm8x16_wcd_get_iir_enable_audio_mixer,
-	msm8x16_wcd_put_iir_enable_audio_mixer),
-	SOC_SINGLE_EXT("IIR2 Enable Band5", IIR2, BAND5, 1, 0,
-	msm8x16_wcd_get_iir_enable_audio_mixer,
-	msm8x16_wcd_put_iir_enable_audio_mixer),
-
-	SOC_SINGLE_MULTI_EXT("IIR1 Band1", IIR1, BAND1, 255, 0, 5,
-	msm8x16_wcd_get_iir_band_audio_mixer,
-	msm8x16_wcd_put_iir_band_audio_mixer),
-	SOC_SINGLE_MULTI_EXT("IIR1 Band2", IIR1, BAND2, 255, 0, 5,
-	msm8x16_wcd_get_iir_band_audio_mixer,
-	msm8x16_wcd_put_iir_band_audio_mixer),
-	SOC_SINGLE_MULTI_EXT("IIR1 Band3", IIR1, BAND3, 255, 0, 5,
-	msm8x16_wcd_get_iir_band_audio_mixer,
-	msm8x16_wcd_put_iir_band_audio_mixer),
-	SOC_SINGLE_MULTI_EXT("IIR1 Band4", IIR1, BAND4, 255, 0, 5,
-	msm8x16_wcd_get_iir_band_audio_mixer,
-	msm8x16_wcd_put_iir_band_audio_mixer),
-	SOC_SINGLE_MULTI_EXT("IIR1 Band5", IIR1, BAND5, 255, 0, 5,
-	msm8x16_wcd_get_iir_band_audio_mixer,
-	msm8x16_wcd_put_iir_band_audio_mixer),
-	SOC_SINGLE_MULTI_EXT("IIR2 Band1", IIR2, BAND1, 255, 0, 5,
-	msm8x16_wcd_get_iir_band_audio_mixer,
-	msm8x16_wcd_put_iir_band_audio_mixer),
-	SOC_SINGLE_MULTI_EXT("IIR2 Band2", IIR2, BAND2, 255, 0, 5,
-	msm8x16_wcd_get_iir_band_audio_mixer,
-	msm8x16_wcd_put_iir_band_audio_mixer),
-	SOC_SINGLE_MULTI_EXT("IIR2 Band3", IIR2, BAND3, 255, 0, 5,
-	msm8x16_wcd_get_iir_band_audio_mixer,
-	msm8x16_wcd_put_iir_band_audio_mixer),
-	SOC_SINGLE_MULTI_EXT("IIR2 Band4", IIR2, BAND4, 255, 0, 5,
-	msm8x16_wcd_get_iir_band_audio_mixer,
-	msm8x16_wcd_put_iir_band_audio_mixer),
-	SOC_SINGLE_MULTI_EXT("IIR2 Band5", IIR2, BAND5, 255, 0, 5,
-	msm8x16_wcd_get_iir_band_audio_mixer,
-	msm8x16_wcd_put_iir_band_audio_mixer),
-
-	SOC_SINGLE_EXT("COMP0 RX1", COMPANDER_1, MSM89XX_RX1, 1, 0,
-	msm8x16_wcd_compander_get, msm8x16_wcd_compander_set),
-
-	SOC_SINGLE_EXT("COMP0 RX2", COMPANDER_1, MSM89XX_RX2, 1, 0,
-	msm8x16_wcd_compander_get, msm8x16_wcd_compander_set),
-};
-
-static int tombak_hph_impedance_get(struct snd_kcontrol *kcontrol,
-				struct snd_ctl_elem_value *ucontrol)
-{
-	int ret;
-	uint32_t zl, zr;
-	bool hphr;
-	struct soc_multi_mixer_control *mc;
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct msm8x16_wcd_priv *priv = snd_soc_codec_get_drvdata(codec);
-
-	mc = (struct soc_multi_mixer_control *)(kcontrol->private_value);
-
-	hphr = mc->shift;
-	ret = wcd_mbhc_get_impedance(&priv->mbhc, &zl, &zr);
-	if (ret)
-		pr_err("%s: Failed to get mbhc imped", __func__);
-	pr_err("%s: zl %u, zr %u\n", __func__, zl, zr);
-	ucontrol->value.integer.value[0] = hphr ? zr : zl;
-
-	return 0;
-}
-
-static const struct snd_kcontrol_new impedance_detect_controls[] = {
-	SOC_SINGLE_EXT("HPHL Impedance", 0, 0, UINT_MAX, 0,
-			tombak_hph_impedance_get, NULL),
-	SOC_SINGLE_EXT("HPHR Impedance", 0, 1, UINT_MAX, 0,
-			tombak_hph_impedance_get, NULL),
-};
-
-static int tombak_get_hph_type(struct snd_kcontrol *kcontrol,
-				struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct msm8x16_wcd_priv *priv = snd_soc_codec_get_drvdata(codec);
-	struct wcd_mbhc *mbhc;
-
-	if (!priv) {
-		pr_err("%s: msm8x16-wcd private data is NULL\n",
-			 __func__);
-		return -EINVAL;
-	}
-
-	mbhc = &priv->mbhc;
-	if (!mbhc) {
-		pr_err("%s: mbhc not initialized\n", __func__);
-		return -EINVAL;
-	}
-
-	ucontrol->value.integer.value[0] = (u32) mbhc->hph_type;
-	pr_err("%s: hph_type = %u\n", __func__, mbhc->hph_type);
-
-	return 0;
-}
-
-static const struct snd_kcontrol_new hph_type_detect_controls[] = {
-	SOC_SINGLE_EXT("HPH Type", 0, 0, UINT_MAX, 0,
-	tombak_get_hph_type, NULL),
-};
-
-static const char * const rx_mix1_text[] = {
-	"ZERO", "IIR1", "IIR2", "RX1", "RX2", "RX3"
-};
-
-static const char * const rx_mix2_text[] = {
-	"ZERO", "IIR1", "IIR2"
-};
-
-static const char * const dec_mux_text[] = {
-	"ZERO", "ADC1", "ADC2", "ADC3", "DMIC1", "DMIC2"
-};
-
-static const char * const dec3_mux_text[] = {
-	"ZERO", "DMIC3"
-};
-
-static const char * const dec4_mux_text[] = {
-	"ZERO", "DMIC4"
-};
-
-static const char * const adc2_mux_text[] = {
-	"ZERO", "INP2", "INP3"
-};
-
-static const char * const ext_spk_text[] = {
-	"Off", "On"
-};
-
-static const char * const wsa_spk_text[] = {
-	"ZERO", "WSA"
-};
-
-static const char * const rdac2_mux_text[] = {
-	"ZERO", "RX2", "RX1"
-};
-
-static const char * const iir_inp1_text[] = {
-	"ZERO", "DEC1", "DEC2", "RX1", "RX2", "RX3"
-};
-
-static const struct soc_enum adc2_enum =
-	SOC_ENUM_SINGLE(SND_SOC_NOPM, 0,
-		ARRAY_SIZE(adc2_mux_text), adc2_mux_text);
-
-static const struct soc_enum ext_spk_enum =
-	SOC_ENUM_SINGLE(SND_SOC_NOPM, 0,
-		ARRAY_SIZE(ext_spk_text), ext_spk_text);
-
-static const struct soc_enum wsa_spk_enum =
-	SOC_ENUM_SINGLE(SND_SOC_NOPM, 0,
-		ARRAY_SIZE(wsa_spk_text), wsa_spk_text);
-
-/* RX1 MIX1 */
-static const struct soc_enum rx_mix1_inp1_chain_enum =
-	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX1_B1_CTL,
-		0, 6, rx_mix1_text);
-
-static const struct soc_enum rx_mix1_inp2_chain_enum =
-	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX1_B1_CTL,
-		3, 6, rx_mix1_text);
-
-static const struct soc_enum rx_mix1_inp3_chain_enum =
-	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX1_B2_CTL,
-		0, 6, rx_mix1_text);
-
-/* RX1 MIX2 */
-static const struct soc_enum rx_mix2_inp1_chain_enum =
-	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX1_B3_CTL,
-		0, 3, rx_mix2_text);
-
-/* RX2 MIX1 */
-static const struct soc_enum rx2_mix1_inp1_chain_enum =
-	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX2_B1_CTL,
-		0, 6, rx_mix1_text);
-
-static const struct soc_enum rx2_mix1_inp2_chain_enum =
-	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX2_B1_CTL,
-		3, 6, rx_mix1_text);
-
-static const struct soc_enum rx2_mix1_inp3_chain_enum =
-	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX2_B1_CTL,
-		0, 6, rx_mix1_text);
-
-/* RX2 MIX2 */
-static const struct soc_enum rx2_mix2_inp1_chain_enum =
-	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX2_B3_CTL,
-		0, 3, rx_mix2_text);
-
-/* RX3 MIX1 */
-static const struct soc_enum rx3_mix1_inp1_chain_enum =
-	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX3_B1_CTL,
-		0, 6, rx_mix1_text);
-
-static const struct soc_enum rx3_mix1_inp2_chain_enum =
-	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX3_B1_CTL,
-		3, 6, rx_mix1_text);
-
-static const struct soc_enum rx3_mix1_inp3_chain_enum =
-	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX3_B1_CTL,
-		0, 6, rx_mix1_text);
-
-/* DEC */
-static const struct soc_enum dec1_mux_enum =
-	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_TX_B1_CTL,
-		0, 6, dec_mux_text);
-
-static const struct soc_enum dec2_mux_enum =
-	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_TX_B1_CTL,
-		3, 6, dec_mux_text);
-
-static const struct soc_enum dec3_mux_enum =
-	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_TX3_MUX_CTL, 0,
-				ARRAY_SIZE(dec3_mux_text), dec3_mux_text);
-
-static const struct soc_enum dec4_mux_enum =
-	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_TX4_MUX_CTL, 0,
-				ARRAY_SIZE(dec4_mux_text), dec4_mux_text);
-
-static const struct soc_enum rdac2_mux_enum =
-	SOC_ENUM_SINGLE(MSM89XX_PMIC_DIGITAL_CDC_CONN_HPHR_DAC_CTL,
-		0, 3, rdac2_mux_text);
-
-static const struct soc_enum iir1_inp1_mux_enum =
-	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_EQ1_B1_CTL,
-		0, 6, iir_inp1_text);
-
-static const struct soc_enum iir2_inp1_mux_enum =
-	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_EQ2_B1_CTL,
-		0, 6, iir_inp1_text);
-
-static const struct snd_kcontrol_new ext_spk_mux =
-	SOC_DAPM_ENUM("Ext Spk Switch Mux", ext_spk_enum);
-
-static const struct snd_kcontrol_new rx_mix1_inp1_mux =
-	SOC_DAPM_ENUM("RX1 MIX1 INP1 Mux", rx_mix1_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx_mix1_inp2_mux =
-	SOC_DAPM_ENUM("RX1 MIX1 INP2 Mux", rx_mix1_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx_mix1_inp3_mux =
-	SOC_DAPM_ENUM("RX1 MIX1 INP3 Mux", rx_mix1_inp3_chain_enum);
-
-static const struct snd_kcontrol_new rx2_mix1_inp1_mux =
-	SOC_DAPM_ENUM("RX2 MIX1 INP1 Mux", rx2_mix1_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx2_mix1_inp2_mux =
-	SOC_DAPM_ENUM("RX2 MIX1 INP2 Mux", rx2_mix1_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx2_mix1_inp3_mux =
-	SOC_DAPM_ENUM("RX2 MIX1 INP3 Mux", rx2_mix1_inp3_chain_enum);
-
-static const struct snd_kcontrol_new rx3_mix1_inp1_mux =
-	SOC_DAPM_ENUM("RX3 MIX1 INP1 Mux", rx3_mix1_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx3_mix1_inp2_mux =
-	SOC_DAPM_ENUM("RX3 MIX1 INP2 Mux", rx3_mix1_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx3_mix1_inp3_mux =
-	SOC_DAPM_ENUM("RX3 MIX1 INP3 Mux", rx3_mix1_inp3_chain_enum);
-
-static const struct snd_kcontrol_new rx1_mix2_inp1_mux =
-	SOC_DAPM_ENUM("RX1 MIX2 INP1 Mux", rx_mix2_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx2_mix2_inp1_mux =
-	SOC_DAPM_ENUM("RX2 MIX2 INP1 Mux", rx2_mix2_inp1_chain_enum);
-
-static const struct snd_kcontrol_new tx_adc2_mux =
-	SOC_DAPM_ENUM("ADC2 MUX Mux", adc2_enum);
-
-static int msm8x16_wcd_put_dec_enum(struct snd_kcontrol *kcontrol,
-			      struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_dapm_widget_list *wlist =
-			dapm_kcontrol_get_wlist(kcontrol);
-	struct snd_soc_dapm_widget *w = wlist->widgets[0];
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
-	unsigned int dec_mux, decimator;
-	char *dec_name = NULL;
-	char *widget_name = NULL;
-	char *temp;
-	u16 tx_mux_ctl_reg;
-	u8 adc_dmic_sel = 0x0;
-	int ret = 0;
-	char *dec_num;
-
-	if (ucontrol->value.enumerated.item[0] > e->items) {
-		dev_err(codec->dev, "%s: Invalid enum value: %d\n",
-			__func__, ucontrol->value.enumerated.item[0]);
-		return -EINVAL;
-	}
-	dec_mux = ucontrol->value.enumerated.item[0];
-
-	widget_name = kstrndup(w->name, 15, GFP_KERNEL);
-	if (!widget_name) {
-		dev_err(codec->dev, "%s: failed to copy string\n",
-			__func__);
-		return -ENOMEM;
-	}
-	temp = widget_name;
-
-	dec_name = strsep(&widget_name, " ");
-	widget_name = temp;
-	if (!dec_name) {
-		dev_err(codec->dev, "%s: Invalid decimator = %s\n",
-			__func__, w->name);
-		ret =  -EINVAL;
-		goto out;
-	}
-
-	dec_num = strpbrk(dec_name, "12");
-	if (dec_num == NULL) {
-		dev_err(codec->dev, "%s: Invalid DEC selected\n", __func__);
-		ret = -EINVAL;
-		goto out;
-	}
-
-	ret = kstrtouint(dec_num, 10, &decimator);
-	if (ret < 0) {
-		dev_err(codec->dev, "%s: Invalid decimator = %s\n",
-			__func__, dec_name);
-		ret =  -EINVAL;
-		goto out;
-	}
-
-	dev_err(w->dapm->dev, "%s(): widget = %s decimator = %u dec_mux = %u\n"
-		, __func__, w->name, decimator, dec_mux);
-
-	switch (decimator) {
-	case 1:
-	case 2:
-		if ((dec_mux == 4) || (dec_mux == 5))
-			adc_dmic_sel = 0x1;
-		else
-			adc_dmic_sel = 0x0;
-		break;
-	default:
-		dev_err(codec->dev, "%s: Invalid Decimator = %u\n",
-			__func__, decimator);
-		ret = -EINVAL;
-		goto out;
-	}
-
-	tx_mux_ctl_reg =
-		MSM89XX_CDC_CORE_TX1_MUX_CTL + 32 * (decimator - 1);
-
-	snd_soc_update_bits_wrapper(codec, tx_mux_ctl_reg, 0x1, adc_dmic_sel);
-
-	ret = snd_soc_dapm_put_enum_double(kcontrol, ucontrol);
-
-out:
-	kfree(widget_name);
-	return ret;
-}
-
-#define MSM89XX_DEC_ENUM(xname, xenum) \
-{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
-	.info = snd_soc_info_enum_double, \
-	.get = snd_soc_dapm_get_enum_double, \
-	.put = msm8x16_wcd_put_dec_enum, \
-	.private_value = (unsigned long)&xenum }
-
-static const struct snd_kcontrol_new dec1_mux =
-	MSM89XX_DEC_ENUM("DEC1 MUX Mux", dec1_mux_enum);
-
-static const struct snd_kcontrol_new dec2_mux =
-	MSM89XX_DEC_ENUM("DEC2 MUX Mux", dec2_mux_enum);
-
-static const struct snd_kcontrol_new dec3_mux =
-	SOC_DAPM_ENUM("DEC3 MUX Mux", dec3_mux_enum);
-
-static const struct snd_kcontrol_new dec4_mux =
-	SOC_DAPM_ENUM("DEC4 MUX Mux", dec4_mux_enum);
-
-static const struct snd_kcontrol_new rdac2_mux =
-	SOC_DAPM_ENUM("RDAC2 MUX Mux", rdac2_mux_enum);
-
-static const struct snd_kcontrol_new iir1_inp1_mux =
-	SOC_DAPM_ENUM("IIR1 INP1 Mux", iir1_inp1_mux_enum);
-
-static const char * const ear_text[] = {
-	"ZERO", "Switch",
-};
-
-static const struct soc_enum ear_enum =
-	SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(ear_text), ear_text);
-
-static const struct snd_kcontrol_new ear_pa_mux[] = {
-	SOC_DAPM_ENUM("EAR_S", ear_enum)
-};
-
-static const struct snd_kcontrol_new wsa_spk_mux[] = {
-	SOC_DAPM_ENUM("WSA Spk Switch", wsa_spk_enum)
-};
-
-static const struct snd_kcontrol_new iir2_inp1_mux =
-	SOC_DAPM_ENUM("IIR2 INP1 Mux", iir2_inp1_mux_enum);
-
-static const char * const hph_text[] = {
-	"ZERO", "Switch",
-};
-
-static const struct soc_enum hph_enum =
-	SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(hph_text), hph_text);
-
-static const struct snd_kcontrol_new hphl_mux[] = {
-	SOC_DAPM_ENUM("HPHL", hph_enum)
-};
-
-static const struct snd_kcontrol_new hphr_mux[] = {
-	SOC_DAPM_ENUM("HPHR", hph_enum)
-};
-
-static const struct snd_kcontrol_new spkr_mux[] = {
-	SOC_DAPM_ENUM("SPK", hph_enum)
-};
-
-static const char * const lo_text[] = {
-	"ZERO", "Switch",
-};
-
-static const struct soc_enum lo_enum =
-	SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(hph_text), hph_text);
-
-static const struct snd_kcontrol_new lo_mux[] = {
-	SOC_DAPM_ENUM("LINE_OUT", lo_enum)
-};
-
-static void msm8x16_wcd_codec_enable_adc_block(struct snd_soc_codec *codec,
-					 int enable)
-{
-	struct msm8x16_wcd_priv *wcd8x16 = snd_soc_codec_get_drvdata(codec);
-
-	dev_err(codec->dev, "%s %d\n", __func__, enable);
-
-	if (enable) {
-		wcd8x16->adc_count++;
-		snd_soc_update_bits_wrapper(codec,
-				    MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL,
-				    0x20, 0x20);
-		snd_soc_update_bits_wrapper(codec,
-				    MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
-				    0x10, 0x10);
-	} else {
-		wcd8x16->adc_count--;
-		if (!wcd8x16->adc_count) {
-			snd_soc_update_bits_wrapper(codec,
-				    MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
-				    0x10, 0x00);
-			snd_soc_update_bits_wrapper(codec,
-				    MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL,
-					    0x20, 0x0);
-		}
-	}
-}
-
-static int msm8x16_wcd_codec_enable_adc(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	u16 adc_reg;
-	u8 init_bit_shift;
-
-	dev_err(codec->dev, "%s %d\n", __func__, event);
-
-	adc_reg = MSM89XX_PMIC_ANALOG_TX_1_2_TEST_CTL_2;
-
-	if (w->reg == MSM89XX_PMIC_ANALOG_TX_1_EN)
-		init_bit_shift = 5;
-	else if ((w->reg == MSM89XX_PMIC_ANALOG_TX_2_EN) ||
-		 (w->reg == MSM89XX_PMIC_ANALOG_TX_3_EN))
-		init_bit_shift = 4;
-	else {
-		dev_err(codec->dev, "%s: Error, invalid adc register\n",
-			__func__);
-		return -EINVAL;
-	}
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		msm8x16_wcd_codec_enable_adc_block(codec, 1);
-		if (w->reg == MSM89XX_PMIC_ANALOG_TX_2_EN)
-			snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MICB_1_CTL, 0x02, 0x02);
-		/*
-		 * Add delay of 10 ms to give sufficient time for the voltage
-		 * to shoot up and settle so that the txfe init does not
-		 * happen when the input voltage is changing too much.
-		 */
-		usleep_range(10000, 10010);
-		snd_soc_update_bits_wrapper(codec,
-			adc_reg, 1 << init_bit_shift,
-			1 << init_bit_shift);
-		if (w->reg == MSM89XX_PMIC_ANALOG_TX_1_EN)
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_DIGITAL_CDC_CONN_TX1_CTL,
-				0x03, 0x00);
-		else if ((w->reg == MSM89XX_PMIC_ANALOG_TX_2_EN) ||
-			(w->reg == MSM89XX_PMIC_ANALOG_TX_3_EN))
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_DIGITAL_CDC_CONN_TX2_CTL,
-				0x03, 0x00);
-		usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
-		break;
-	case SND_SOC_DAPM_POST_PMU:
-		/*
-		 * Add delay of 12 ms before deasserting the init
-		 * to reduce the tx pop
-		 */
-	usleep_range(12000, 12010);
-		snd_soc_update_bits_wrapper(codec,
-			adc_reg, 1 << init_bit_shift, 0x00);
-		usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		msm8x16_wcd_codec_enable_adc_block(codec, 0);
-		if (w->reg == MSM89XX_PMIC_ANALOG_TX_2_EN)
-			snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_MICB_1_CTL, 0x02, 0x00);
-		if (w->reg == MSM89XX_PMIC_ANALOG_TX_1_EN)
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_DIGITAL_CDC_CONN_TX1_CTL,
-				0x03, 0x02);
-		else if ((w->reg == MSM89XX_PMIC_ANALOG_TX_2_EN) ||
-			(w->reg == MSM89XX_PMIC_ANALOG_TX_3_EN))
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_DIGITAL_CDC_CONN_TX2_CTL,
-				0x03, 0x02);
-
-		break;
-	}
-	return 0;
-}
-
-static int msm8x16_wcd_codec_enable_spk_pa(struct snd_soc_dapm_widget *w,
-				     struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	dev_err(codec->dev, "%s %d %s\n", __func__, event, w->name);
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x10, 0x10);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL, 0x01, 0x01);
-		switch (msm8x16_wcd->boost_option) {
-		case BOOST_SWITCH:
-			if (!msm8x16_wcd->spk_boost_set)
-				snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL,
-					0x10, 0x10);
-			break;
-		case BOOST_ALWAYS:
-		case BOOST_ON_FOREVER:
-			break;
-		case BYPASS_ALWAYS:
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL,
-				0x10, 0x10);
-			break;
-		default:
-			pr_err("%s: invalid boost option: %d\n", __func__,
-						msm8x16_wcd->boost_option);
-			break;
-		}
-		usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL, 0xE0, 0xE0);
-		if (get_codec_version(msm8x16_wcd) != TOMBAK_1_0)
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_RX_EAR_CTL, 0x01, 0x01);
-		break;
-	case SND_SOC_DAPM_POST_PMU:
-		usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
-		switch (msm8x16_wcd->boost_option) {
-		case BOOST_SWITCH:
-			if (msm8x16_wcd->spk_boost_set)
-				snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
-					0xEF, 0xEF);
-			else
-				snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL,
-					0x10, 0x00);
-			break;
-		case BOOST_ALWAYS:
-		case BOOST_ON_FOREVER:
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
-				0xEF, 0xEF);
-			break;
-		case BYPASS_ALWAYS:
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x10, 0x00);
-			break;
-		default:
-			pr_err("%s: invalid boost option: %d\n", __func__,
-						msm8x16_wcd->boost_option);
-			break;
-		}
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_CDC_CORE_RX3_B6_CTL, 0x01, 0x00);
-		snd_soc_update_bits_wrapper(codec, w->reg, 0x80, 0x80);
-		break;
-	case SND_SOC_DAPM_PRE_PMD:
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_CDC_CORE_RX3_B6_CTL, 0x01, 0x01);
-		msm8x16_wcd->mute_mask |= SPKR_PA_DISABLE;
-		/*
-		 * Add 1 ms sleep for the mute to take effect
-		 */
-		usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x10, 0x10);
-		if (get_codec_version(msm8x16_wcd) < CAJON_2_0)
-			msm8x16_wcd_boost_mode_sequence(codec, SPK_PMD);
-		snd_soc_update_bits_wrapper(codec, w->reg, 0x80, 0x00);
-		switch (msm8x16_wcd->boost_option) {
-		case BOOST_SWITCH:
-			if (msm8x16_wcd->spk_boost_set)
-				snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
-					0xEF, 0x69);
-			break;
-		case BOOST_ALWAYS:
-		case BOOST_ON_FOREVER:
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
-				0xEF, 0x69);
-			break;
-		case BYPASS_ALWAYS:
-			break;
-		default:
-			pr_err("%s: invalid boost option: %d\n", __func__,
-						msm8x16_wcd->boost_option);
-			break;
-		}
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL, 0xE0, 0x00);
-		usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL, 0x01, 0x00);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x10, 0x00);
-		if (get_codec_version(msm8x16_wcd) != TOMBAK_1_0)
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_RX_EAR_CTL, 0x01, 0x00);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x10, 0x00);
-		if (get_codec_version(msm8x16_wcd) >= CAJON_2_0)
-			msm8x16_wcd_boost_mode_sequence(codec, SPK_PMD);
-		break;
-	}
-	return 0;
-}
-
-static int msm8x16_wcd_codec_enable_dig_clk(struct snd_soc_dapm_widget *w,
-				     struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-	struct msm_asoc_mach_data *pdata = NULL;
-
-	pdata = snd_soc_card_get_drvdata(codec->component.card);
-
-	dev_err(codec->dev, "%s event %d w->name %s\n", __func__,
-			event, w->name);
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		msm8x16_wcd_codec_enable_clock_block(codec, 1);
-		snd_soc_update_bits_wrapper(codec, w->reg, 0x80, 0x80);
-		msm8x16_wcd_boost_mode_sequence(codec, SPK_PMU);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		if (msm8x16_wcd->rx_bias_count == 0)
-			snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
-					0x80, 0x00);
-	}
-	return 0;
-}
-
-static int msm8x16_wcd_codec_enable_dmic(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-	u8  dmic_clk_en;
-	u16 dmic_clk_reg;
-	s32 *dmic_clk_cnt;
-	unsigned int dmic;
-	int ret;
-	char *dec_num = strpbrk(w->name, "12");
-
-	if (dec_num == NULL) {
-		dev_err(codec->dev, "%s: Invalid DMIC\n", __func__);
-		return -EINVAL;
-	}
-
-	ret = kstrtouint(dec_num, 10, &dmic);
-	if (ret < 0) {
-		dev_err(codec->dev,
-			"%s: Invalid DMIC line on the codec\n", __func__);
-		return -EINVAL;
-	}
-
-	switch (dmic) {
-	case 1:
-	case 2:
-		dmic_clk_en = 0x01;
-		dmic_clk_cnt = &(msm8x16_wcd->dmic_1_2_clk_cnt);
-		dmic_clk_reg = MSM89XX_CDC_CORE_CLK_DMIC_B1_CTL;
-		dev_err(codec->dev,
-			"%s() event %d DMIC%d dmic_1_2_clk_cnt %d\n",
-			__func__, event,  dmic, *dmic_clk_cnt);
-		break;
-	default:
-		dev_err(codec->dev, "%s: Invalid DMIC Selection\n", __func__);
-		return -EINVAL;
-	}
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		(*dmic_clk_cnt)++;
-		if (*dmic_clk_cnt == 1) {
-			snd_soc_update_bits_wrapper(codec, dmic_clk_reg,
-					0x0E, 0x02);
-			snd_soc_update_bits_wrapper(codec, dmic_clk_reg,
-					dmic_clk_en, dmic_clk_en);
-		}
-		if (dmic == 1)
-			snd_soc_update_bits_wrapper(codec,
-			MSM89XX_CDC_CORE_TX1_DMIC_CTL, 0x07, 0x01);
-		if (dmic == 2)
-			snd_soc_update_bits_wrapper(codec,
-			MSM89XX_CDC_CORE_TX2_DMIC_CTL, 0x07, 0x01);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		(*dmic_clk_cnt)--;
-		if (*dmic_clk_cnt  == 0)
-			snd_soc_update_bits_wrapper(codec, dmic_clk_reg,
-					dmic_clk_en, 0);
-		break;
-	}
-	return 0;
-}
-
-static bool msm8x16_wcd_use_mb(struct snd_soc_codec *codec)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	if (get_codec_version(msm8x16_wcd) < CAJON)
-		return true;
-	else
-		return false;
-}
-
-static void msm8x16_wcd_set_auto_zeroing(struct snd_soc_codec *codec,
-					bool enable)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	if (get_codec_version(msm8x16_wcd) < CONGA) {
-		if (enable)
-			/*
-			 * Set autozeroing for special headset detection and
-			 * buttons to work.
-			 */
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_MICB_2_EN,
-				0x18, 0x10);
-		else
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_MICB_2_EN,
-				0x18, 0x00);
-
-	} else {
-		pr_err("%s: Auto Zeroing is not required from CONGA\n",
-				__func__);
-	}
-}
-
-static void msm8x16_trim_btn_reg(struct snd_soc_codec *codec)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	if (get_codec_version(msm8x16_wcd) == TOMBAK_1_0) {
-		pr_err("%s: This device needs to be trimmed\n", __func__);
-		/*
-		 * Calculate the trim value for each device used
-		 * till is comes in production by hardware team
-		 */
-		snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_SEC_ACCESS,
-				0xA5, 0xA5);
-		snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_TRIM_CTRL2,
-				0xFF, 0x30);
-	} else {
-		pr_err("%s: This device is trimmed at ATE\n", __func__);
-	}
-}
-static int msm8x16_wcd_enable_ext_mb_source(struct wcd_mbhc *mbhc,
-					    bool turn_on)
-{
-	int ret = 0;
-	static int count;
-	struct snd_soc_codec *codec = mbhc->codec;
-	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
-
-	dev_err(codec->dev, "%s turn_on: %d count: %d\n", __func__, turn_on,
-			count);
-	if (turn_on) {
-		if (!count) {
-			ret = snd_soc_dapm_force_enable_pin(dapm,
-				"MICBIAS_REGULATOR");
-			snd_soc_dapm_sync(dapm);
-		}
-		count++;
-	} else {
-		if (count > 0)
-			count--;
-		if (!count) {
-			ret = snd_soc_dapm_disable_pin(dapm,
-				"MICBIAS_REGULATOR");
-			snd_soc_dapm_sync(dapm);
-		}
-	}
-
-	if (ret)
-		dev_err(codec->dev, "%s: Failed to %s external micbias source\n",
-			__func__, turn_on ? "enable" : "disabled");
-	else
-		dev_err(codec->dev, "%s: %s external micbias source\n",
-			 __func__, turn_on ? "Enabled" : "Disabled");
-
-	return ret;
-}
-
-static int msm8x16_wcd_codec_enable_micbias(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct msm8x16_wcd_priv *msm8x16_wcd =
-				snd_soc_codec_get_drvdata(codec);
-	u16 micb_int_reg;
-	char *internal1_text = "Internal1";
-	char *internal2_text = "Internal2";
-	char *internal3_text = "Internal3";
-	char *external2_text = "External2";
-	char *external_text = "External";
-	bool micbias2;
-
-	dev_err(codec->dev, "%s %d\n", __func__, event);
-	switch (w->reg) {
-	case MSM89XX_PMIC_ANALOG_MICB_1_EN:
-	case MSM89XX_PMIC_ANALOG_MICB_2_EN:
-		micb_int_reg = MSM89XX_PMIC_ANALOG_MICB_1_INT_RBIAS;
-		break;
-	default:
-		dev_err(codec->dev,
-			"%s: Error, invalid micbias register 0x%x\n",
-			__func__, w->reg);
-		return -EINVAL;
-	}
-
-	micbias2 = (snd_soc_read_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_MICB_2_EN) & 0x80);
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		if (strnstr(w->name, internal1_text, strlen(w->name))) {
-			if (get_codec_version(msm8x16_wcd) >= CAJON)
-				snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_ANALOG_TX_1_2_ATEST_CTL_2,
-					0x02, 0x02);
-			snd_soc_update_bits_wrapper(codec,
-				micb_int_reg, 0x80, 0x80);
-		} else if (strnstr(w->name, internal2_text, strlen(w->name))) {
-			snd_soc_update_bits_wrapper(codec,
-				micb_int_reg, 0x10, 0x10);
-			snd_soc_update_bits_wrapper(codec,
-				w->reg, 0x60, 0x00);
-		} else if (strnstr(w->name, internal3_text, strlen(w->name))) {
-			snd_soc_update_bits_wrapper(codec,
-				micb_int_reg, 0x2, 0x2);
-		/*
-		 * update MSM89XX_PMIC_ANALOG_TX_1_2_ATEST_CTL_2
-		 * for external bias only, not for external2.
-		 */
-		} else if (!strnstr(w->name, external2_text, strlen(w->name)) &&
-					strnstr(w->name, external_text,
-						strlen(w->name))) {
-			snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_ANALOG_TX_1_2_ATEST_CTL_2,
-					0x02, 0x02);
-		}
-		if (!strnstr(w->name, external_text, strlen(w->name)))
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_MICB_1_EN, 0x05, 0x04);
-		if (w->reg == MSM89XX_PMIC_ANALOG_MICB_1_EN)
-			msm8x16_wcd_configure_cap(codec, true, micbias2);
-
-		break;
-	case SND_SOC_DAPM_POST_PMU:
-		if (get_codec_version(msm8x16_wcd) <= TOMBAK_2_0)
-			usleep_range(20000, 20100);
-		if (strnstr(w->name, internal1_text, strlen(w->name))) {
-			snd_soc_update_bits_wrapper(codec,
-				micb_int_reg, 0x40, 0x40);
-		} else if (strnstr(w->name, internal2_text,  strlen(w->name))) {
-			snd_soc_update_bits_wrapper(codec,
-				 micb_int_reg, 0x08, 0x08);
-			msm8x16_notifier_call(codec,
-					WCD_EVENT_POST_MICBIAS_2_ON);
-		} else if (strnstr(w->name, internal3_text, 30)) {
-			snd_soc_update_bits_wrapper(codec,
-				 micb_int_reg, 0x01, 0x01);
-		} else if (strnstr(w->name, external2_text, strlen(w->name))) {
-			msm8x16_notifier_call(codec,
-					WCD_EVENT_POST_MICBIAS_2_ON);
-		}
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		if (strnstr(w->name, internal1_text, strlen(w->name))) {
-			snd_soc_update_bits_wrapper(codec,
-				micb_int_reg, 0xC0, 0x40);
-		} else if (strnstr(w->name, internal2_text, strlen(w->name))) {
-			msm8x16_notifier_call(codec,
-				WCD_EVENT_POST_MICBIAS_2_OFF);
-		} else if (strnstr(w->name, internal3_text, 30)) {
-			snd_soc_update_bits_wrapper(codec,
-				micb_int_reg, 0x2, 0x0);
-		} else if (strnstr(w->name, external2_text, strlen(w->name))) {
-			/*
-			 * send micbias turn off event to mbhc driver and then
-			 * break, as no need to set MICB_1_EN register.
-			 */
-			msm8x16_notifier_call(codec,
-					WCD_EVENT_POST_MICBIAS_2_OFF);
-			break;
-		}
-		if (w->reg == MSM89XX_PMIC_ANALOG_MICB_1_EN)
-			msm8x16_wcd_configure_cap(codec, false, micbias2);
-		break;
-	}
-	return 0;
-}
-
-static void tx_hpf_corner_freq_callback(struct work_struct *work)
-{
-	struct delayed_work *hpf_delayed_work;
-	struct hpf_work *hpf_work;
-	struct msm8x16_wcd_priv *msm8x16_wcd;
-	struct snd_soc_codec *codec;
-	u16 tx_mux_ctl_reg;
-	u8 hpf_cut_of_freq;
-
-	hpf_delayed_work = to_delayed_work(work);
-	hpf_work = container_of(hpf_delayed_work, struct hpf_work, dwork);
-	msm8x16_wcd = hpf_work->msm8x16_wcd;
-	codec = hpf_work->msm8x16_wcd->codec;
-	hpf_cut_of_freq = hpf_work->tx_hpf_cut_of_freq;
-
-	tx_mux_ctl_reg = MSM89XX_CDC_CORE_TX1_MUX_CTL +
-			(hpf_work->decimator - 1) * 32;
-
-	dev_err(codec->dev, "%s(): decimator %u hpf_cut_of_freq 0x%x\n",
-		 __func__, hpf_work->decimator, (unsigned int)hpf_cut_of_freq);
-	snd_soc_update_bits_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_TX_1_2_TXFE_CLKDIV, 0xFF, 0x51);
-
-	snd_soc_update_bits_wrapper(codec,
-		tx_mux_ctl_reg, 0x30, hpf_cut_of_freq << 4);
-}
-
-
-#define  TX_MUX_CTL_CUT_OFF_FREQ_MASK	0x30
-#define  CF_MIN_3DB_4HZ			0x0
-#define  CF_MIN_3DB_75HZ		0x1
-#define  CF_MIN_3DB_150HZ		0x2
-
-static int msm8x16_wcd_codec_set_iir_gain(struct snd_soc_dapm_widget *w,
-		struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	int value = 0, reg;
-
-	switch (event) {
-	case SND_SOC_DAPM_POST_PMU:
-		if (w->shift == 0)
-			reg = MSM89XX_CDC_CORE_IIR1_GAIN_B1_CTL;
-		else if (w->shift == 1)
-			reg = MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL;
-		value = snd_soc_read_wrapper(codec, reg);
-		snd_soc_write_wrapper(codec, reg, value);
-		break;
-	default:
-		pr_err("%s: event = %d not expected\n", __func__, event);
-	}
-	return 0;
-}
-
-static int msm8x16_wcd_codec_enable_dec(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct msm_asoc_mach_data *pdata = NULL;
-	unsigned int decimator;
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-	char *dec_name = NULL;
-	char *widget_name = NULL;
-	char *temp;
-	int ret = 0, i;
-	u16 dec_reset_reg, tx_vol_ctl_reg, tx_mux_ctl_reg;
-	u8 dec_hpf_cut_of_freq;
-	int offset;
-	char *dec_num;
-
-	pdata = snd_soc_card_get_drvdata(codec->component.card);
-	dev_err(codec->dev, "%s %d\n", __func__, event);
-
-	widget_name = kstrndup(w->name, 15, GFP_KERNEL);
-	if (!widget_name)
-		return -ENOMEM;
-	temp = widget_name;
-
-	dec_name = strsep(&widget_name, " ");
-	widget_name = temp;
-	if (!dec_name) {
-		dev_err(codec->dev,
-			"%s: Invalid decimator = %s\n", __func__, w->name);
-		ret = -EINVAL;
-		goto out;
-	}
-
-	dec_num = strpbrk(dec_name, "1234");
-	if (dec_num == NULL) {
-		dev_err(codec->dev, "%s: Invalid Decimator\n", __func__);
-		ret = -EINVAL;
-		goto out;
-	}
-
-	ret = kstrtouint(dec_num, 10, &decimator);
-	if (ret < 0) {
-		dev_err(codec->dev,
-			"%s: Invalid decimator = %s\n", __func__, dec_name);
-		ret = -EINVAL;
-		goto out;
-	}
-
-	dev_err(codec->dev,
-		"%s(): widget = %s dec_name = %s decimator = %u\n", __func__,
-		w->name, dec_name, decimator);
-
-	if (w->reg == MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL) {
-		dec_reset_reg = MSM89XX_CDC_CORE_CLK_TX_RESET_B1_CTL;
-		offset = 0;
-	} else {
-		dev_err(codec->dev, "%s: Error, incorrect dec\n", __func__);
-		ret = -EINVAL;
-		goto out;
-	}
-
-	tx_vol_ctl_reg = MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG +
-			 32 * (decimator - 1);
-	tx_mux_ctl_reg = MSM89XX_CDC_CORE_TX1_MUX_CTL +
-			  32 * (decimator - 1);
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		if (decimator == 3 || decimator == 4) {
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_CLK_WSA_VI_B1_CTL,
-				0xFF, 0x5);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_TX1_DMIC_CTL +
-					(decimator - 1) * 0x20, 0x7, 0x2);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_TX1_DMIC_CTL +
-					(decimator - 1) * 0x20, 0x7, 0x2);
-		}
-		/* Enableable TX digital mute */
-		snd_soc_update_bits_wrapper(codec, tx_vol_ctl_reg, 0x01, 0x01);
-		for (i = 0; i < NUM_DECIMATORS; i++) {
-			if (decimator == i + 1)
-				msm8x16_wcd->dec_active[i] = true;
-		}
-
-		dec_hpf_cut_of_freq =
-			snd_soc_read_wrapper(codec, tx_mux_ctl_reg);
-
-		dec_hpf_cut_of_freq = (dec_hpf_cut_of_freq & 0x30) >> 4;
-
-		tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq =
-			dec_hpf_cut_of_freq;
-
-		if (dec_hpf_cut_of_freq != CF_MIN_3DB_150HZ) {
-
-			/* set cut of freq to CF_MIN_3DB_150HZ (0x1); */
-			snd_soc_update_bits_wrapper(codec, tx_mux_ctl_reg, 0x30,
-					    CF_MIN_3DB_150HZ << 4);
-		}
-		snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_TX_1_2_TXFE_CLKDIV,
-				0xFF, 0x42);
-
-		break;
-	case SND_SOC_DAPM_POST_PMU:
-		/* enable HPF */
-		snd_soc_update_bits_wrapper(codec, tx_mux_ctl_reg, 0x08, 0x00);
-
-		if (tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq !=
-				CF_MIN_3DB_150HZ) {
-
-			schedule_delayed_work(&tx_hpf_work[decimator - 1].dwork,
-					msecs_to_jiffies(300));
-		}
-		/* apply the digital gain after the decimator is enabled*/
-		if ((w->shift) < ARRAY_SIZE(tx_digital_gain_reg))
-			snd_soc_write_wrapper(codec,
-				  tx_digital_gain_reg[w->shift + offset],
-				  snd_soc_read_wrapper(codec,
-				  tx_digital_gain_reg[w->shift + offset])
-				  );
-		if (pdata->lb_mode) {
-			pr_err("%s: loopback mode unmute the DEC\n",
-							__func__);
-			snd_soc_update_bits_wrapper(codec,
-				tx_vol_ctl_reg, 0x01, 0x00);
-		}
-		break;
-	case SND_SOC_DAPM_PRE_PMD:
-		snd_soc_update_bits_wrapper(codec, tx_vol_ctl_reg, 0x01, 0x01);
-		msleep(20);
-		snd_soc_update_bits_wrapper(codec, tx_mux_ctl_reg, 0x08, 0x08);
-		cancel_delayed_work_sync(&tx_hpf_work[decimator - 1].dwork);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		snd_soc_update_bits_wrapper(codec,
-			dec_reset_reg, 1 << w->shift, 1 << w->shift);
-		snd_soc_update_bits_wrapper(codec,
-			dec_reset_reg, 1 << w->shift, 0x0);
-		snd_soc_update_bits_wrapper(codec,
-			tx_mux_ctl_reg, 0x08, 0x08);
-		snd_soc_update_bits_wrapper(codec,
-			tx_mux_ctl_reg, 0x30,
-			(tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq) << 4);
-		snd_soc_update_bits_wrapper(codec,
-			tx_vol_ctl_reg, 0x01, 0x00);
-		for (i = 0; i < NUM_DECIMATORS; i++) {
-			if (decimator == i + 1)
-				msm8x16_wcd->dec_active[i] = false;
-		}
-		if (decimator == 3 || decimator == 4) {
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_CLK_WSA_VI_B1_CTL,
-				0xFF, 0x0);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_TX1_DMIC_CTL +
-					(decimator - 1) * 0x20, 0x7, 0x0);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_TX1_DMIC_CTL +
-					(decimator - 1) * 0x20, 0x7, 0x0);
-		}
-		break;
-	}
-out:
-	kfree(widget_name);
-	return ret;
-}
-
-static int msm89xx_wcd_codec_enable_vdd_spkr(struct snd_soc_dapm_widget *w,
-				       struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-	int ret = 0;
-
-	if (!msm8x16_wcd->ext_spk_boost_set) {
-		dev_err(codec->dev, "%s: ext_boost not supported/disabled\n",
-								__func__);
-		return 0;
-	}
-	dev_err(codec->dev, "%s: %s %d\n", __func__, w->name, event);
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		if (msm8x16_wcd->spkdrv_reg) {
-			ret = regulator_enable(msm8x16_wcd->spkdrv_reg);
-			if (ret)
-				dev_err(codec->dev,
-					"%s Failed to enable spkdrv reg %s\n",
-					__func__, MSM89XX_VDD_SPKDRV_NAME);
-		}
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		if (msm8x16_wcd->spkdrv_reg) {
-			ret = regulator_disable(msm8x16_wcd->spkdrv_reg);
-			if (ret)
-				dev_err(codec->dev,
-					"%s: Failed to disable spkdrv_reg %s\n",
-					__func__, MSM89XX_VDD_SPKDRV_NAME);
-		}
-		break;
-	}
-	return 0;
-}
-
-static int msm8x16_wcd_codec_config_compander(struct snd_soc_codec *codec,
-					int interp_n, int event)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	dev_err(codec->dev, "%s: event %d shift %d, enabled %d\n",
-		__func__, event, interp_n,
-		msm8x16_wcd->comp_enabled[interp_n]);
-
-	/* compander is not enabled */
-	if (!msm8x16_wcd->comp_enabled[interp_n])
-		return 0;
-
-	switch (msm8x16_wcd->comp_enabled[interp_n]) {
-	case COMPANDER_1:
-		if (SND_SOC_DAPM_EVENT_ON(event)) {
-			/* Enable Compander Clock */
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_COMP0_B2_CTL, 0x0F, 0x09);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_CLK_RX_B2_CTL, 0x01, 0x01);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_COMP0_B1_CTL,
-				1 << interp_n, 1 << interp_n);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_COMP0_B3_CTL, 0xFF, 0x01);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_COMP0_B2_CTL, 0xF0, 0x50);
-			/* add sleep for compander to settle */
-			usleep_range(1000, 1100);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_COMP0_B3_CTL, 0xFF, 0x28);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_COMP0_B2_CTL, 0xF0, 0xB0);
-
-			/* Enable Compander GPIO */
-			if (msm8x16_wcd->codec_hph_comp_gpio)
-				msm8x16_wcd->codec_hph_comp_gpio(1);
-		} else if (SND_SOC_DAPM_EVENT_OFF(event)) {
-			/* Disable Compander GPIO */
-			if (msm8x16_wcd->codec_hph_comp_gpio)
-				msm8x16_wcd->codec_hph_comp_gpio(0);
-
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_COMP0_B2_CTL, 0x0F, 0x05);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_COMP0_B1_CTL,
-				1 << interp_n, 0);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_CLK_RX_B2_CTL, 0x01, 0x00);
-		}
-		break;
-	default:
-		dev_err(codec->dev, "%s: Invalid compander %d\n", __func__,
-				msm8x16_wcd->comp_enabled[interp_n]);
-		break;
-	};
-
-	return 0;
-}
-
-static int msm8x16_wcd_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
-						 struct snd_kcontrol *kcontrol,
-						 int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	dev_err(codec->dev, "%s %d %s\n", __func__, event, w->name);
-
-	switch (event) {
-	case SND_SOC_DAPM_POST_PMU:
-		msm8x16_wcd_codec_config_compander(codec, w->shift, event);
-		/* apply the digital gain after the interpolator is enabled*/
-		if ((w->shift) < ARRAY_SIZE(rx_digital_gain_reg))
-			snd_soc_write_wrapper(codec,
-				  rx_digital_gain_reg[w->shift],
-				  snd_soc_read_wrapper(codec,
-				  rx_digital_gain_reg[w->shift])
-				  );
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		msm8x16_wcd_codec_config_compander(codec, w->shift, event);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_CDC_CORE_CLK_RX_RESET_CTL,
-			1 << w->shift, 1 << w->shift);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_CDC_CORE_CLK_RX_RESET_CTL,
-			1 << w->shift, 0x0);
-		/*
-		 * disable the mute enabled during the PMD of this device
-		 */
-		if ((w->shift == 0) &&
-			(msm8x16_wcd->mute_mask & HPHL_PA_DISABLE)) {
-			pr_err("disabling HPHL mute\n");
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_RX1_B6_CTL, 0x01, 0x00);
-			if (get_codec_version(msm8x16_wcd) >= CAJON)
-				snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_ANALOG_RX_HPH_BIAS_CNP,
-					0xF0, 0x20);
-			msm8x16_wcd->mute_mask &= ~(HPHL_PA_DISABLE);
-		} else if ((w->shift == 1) &&
-				(msm8x16_wcd->mute_mask & HPHR_PA_DISABLE)) {
-			pr_err("disabling HPHR mute\n");
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_RX2_B6_CTL, 0x01, 0x00);
-			if (get_codec_version(msm8x16_wcd) >= CAJON)
-				snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_ANALOG_RX_HPH_BIAS_CNP,
-					0xF0, 0x20);
-			msm8x16_wcd->mute_mask &= ~(HPHR_PA_DISABLE);
-		} else if ((w->shift == 2) &&
-				(msm8x16_wcd->mute_mask & SPKR_PA_DISABLE)) {
-			pr_err("disabling SPKR mute\n");
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_RX3_B6_CTL, 0x01, 0x00);
-			msm8x16_wcd->mute_mask &= ~(SPKR_PA_DISABLE);
-		} else if ((w->shift == 0) &&
-				(msm8x16_wcd->mute_mask & EAR_PA_DISABLE)) {
-			pr_err("disabling EAR mute\n");
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_RX1_B6_CTL, 0x01, 0x00);
-			msm8x16_wcd->mute_mask &= ~(EAR_PA_DISABLE);
-		}
-	}
-	return 0;
-}
-
-
-/* The register address is the same as other codec so it can use resmgr */
-static int msm8x16_wcd_codec_enable_rx_bias(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	dev_err(codec->dev, "%s %d\n", __func__, event);
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		msm8x16_wcd->rx_bias_count++;
-		if (msm8x16_wcd->rx_bias_count == 1) {
-			snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC,
-					0x80, 0x80);
-			snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC,
-					0x01, 0x01);
-		}
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		msm8x16_wcd->rx_bias_count--;
-		if (msm8x16_wcd->rx_bias_count == 0) {
-			snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC,
-					0x01, 0x00);
-			snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC,
-					0x80, 0x00);
-		}
-		break;
-	}
-	dev_err(codec->dev, "%s rx_bias_count = %d\n",
-			__func__, msm8x16_wcd->rx_bias_count);
-	return 0;
-}
-
-static uint32_t wcd_get_impedance_value(uint32_t imped)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(wcd_imped_val) - 1; i++) {
-		if (imped >= wcd_imped_val[i] &&
-			imped < wcd_imped_val[i + 1])
-			break;
-	}
-
-	pr_err("%s: selected impedance value = %d\n",
-		 __func__, wcd_imped_val[i]);
-	return wcd_imped_val[i];
-}
-
-void wcd_imped_config(struct snd_soc_codec *codec,
-			uint32_t imped, bool set_gain)
-{
-	uint32_t value;
-	int codec_version;
-	struct msm8x16_wcd_priv *msm8x16_wcd =
-				snd_soc_codec_get_drvdata(codec);
-
-	value = wcd_get_impedance_value(imped);
-
-	if (value < wcd_imped_val[0]) {
-		pr_err("%s, detected impedance is less than 4 Ohm\n",
-			 __func__);
-		return;
-	}
-	if (value >= wcd_imped_val[ARRAY_SIZE(wcd_imped_val) - 1]) {
-		pr_err("%s, invalid imped, greater than 48 Ohm\n = %d\n",
-			__func__, value);
-		return;
-	}
-
-	codec_version = get_codec_version(msm8x16_wcd);
-
-	if (set_gain) {
-		switch (codec_version) {
-		case TOMBAK_1_0:
-		case TOMBAK_2_0:
-		case CONGA:
-			/*
-			 * For 32Ohm load and higher loads, Set 0x19E
-			 * bit 5 to 1 (POS_6_DB_DI). For loads lower
-			 * than 32Ohm (such as 16Ohm load), Set 0x19E
-			 * bit 5 to 0 (POS_1P5_DB_DI)
-			 */
-			if (value >= 32)
-				snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
-					0x20, 0x20);
-			else
-				snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
-					0x20, 0x00);
-			break;
-		case CAJON:
-		case CAJON_2_0:
-		case DIANGU:
-			if (value >= 13) {
-				snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
-					0x20, 0x20);
-				snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_ANALOG_NCP_VCTRL,
-					0x07, 0x07);
-			} else {
-				snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
-					0x20, 0x00);
-				snd_soc_update_bits_wrapper(codec,
-					MSM89XX_PMIC_ANALOG_NCP_VCTRL,
-					0x07, 0x04);
-			}
-			break;
-		}
-	} else {
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
-			0x20, 0x00);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_NCP_VCTRL,
-			0x07, 0x04);
-	}
-
-	pr_err("%s: Exit\n", __func__);
-}
-
-static int msm8x16_wcd_hphl_dac_event(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	uint32_t impedl, impedr;
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-	int ret;
-
-	dev_err(codec->dev, "%s %s %d\n", __func__, w->name, event);
-	ret = wcd_mbhc_get_impedance(&msm8x16_wcd->mbhc,
-			&impedl, &impedr);
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		if (get_codec_version(msm8x16_wcd) > CAJON)
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN,
-				0x08, 0x08);
-		if (get_codec_version(msm8x16_wcd) == CAJON ||
-			get_codec_version(msm8x16_wcd) == CAJON_2_0) {
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_RX_HPH_L_TEST,
-				0x80, 0x80);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_RX_HPH_R_TEST,
-				0x80, 0x80);
-		}
-		if (get_codec_version(msm8x16_wcd) > CAJON)
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN,
-				0x08, 0x00);
-		if (msm8x16_wcd->hph_mode == HD2_MODE) {
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_RX1_B3_CTL, 0x1C, 0x14);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_RX1_B4_CTL, 0x18, 0x10);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_RX1_B3_CTL, 0x80, 0x80);
-		}
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_RX_HPH_L_PA_DAC_CTL, 0x02, 0x02);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL, 0x01, 0x01);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x02, 0x02);
-		if (!ret)
-			wcd_imped_config(codec, impedl, true);
-		else
-			dev_err(codec->dev, "Failed to get mbhc impedance %d\n",
-				ret);
-		break;
-	case SND_SOC_DAPM_POST_PMU:
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_RX_HPH_L_PA_DAC_CTL, 0x02, 0x00);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		wcd_imped_config(codec, impedl, false);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x02, 0x00);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL, 0x01, 0x00);
-		if (msm8x16_wcd->hph_mode == HD2_MODE) {
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_RX1_B3_CTL, 0x1C, 0x00);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_RX1_B4_CTL, 0x18, 0xFF);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_RX1_B3_CTL, 0x80, 0x00);
-		}
-		break;
-	}
-	return 0;
-}
-
-static int msm8x16_wcd_lo_dac_event(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
-	dev_err(codec->dev, "%s %s %d\n", __func__, w->name, event);
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x10, 0x10);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x20, 0x20);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x80, 0x80);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x08, 0x08);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x40, 0x40);
-		break;
-	case SND_SOC_DAPM_POST_PMU:
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x80, 0x80);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x08, 0x00);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x40, 0x40);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		usleep_range(20000, 20100);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x80, 0x00);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x40, 0x00);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x08, 0x00);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x80, 0x00);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x40, 0x00);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x20, 0x00);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x10, 0x00);
-		break;
-	}
-	return 0;
-}
-
-static int msm8x16_wcd_hphr_dac_event(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	dev_err(codec->dev, "%s %s %d\n", __func__, w->name, event);
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		if (msm8x16_wcd->hph_mode == HD2_MODE) {
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_RX2_B3_CTL, 0x1C, 0x14);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_RX2_B4_CTL, 0x18, 0x10);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_RX2_B3_CTL, 0x80, 0x80);
-		}
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_RX_HPH_R_PA_DAC_CTL, 0x02, 0x02);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL, 0x02, 0x02);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x01, 0x01);
-		break;
-	case SND_SOC_DAPM_POST_PMU:
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_RX_HPH_R_PA_DAC_CTL, 0x02, 0x00);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x01, 0x00);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL, 0x02, 0x00);
-		if (msm8x16_wcd->hph_mode == HD2_MODE) {
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_RX2_B3_CTL, 0x1C, 0x00);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_RX2_B4_CTL, 0x18, 0xFF);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_RX2_B3_CTL, 0x80, 0x00);
-		}
-		break;
-	}
-	return 0;
-}
-
-static int msm8x16_wcd_hph_pa_event(struct snd_soc_dapm_widget *w,
-			      struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	dev_err(codec->dev, "%s: %s event = %d\n", __func__, w->name, event);
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		if (w->shift == 5)
-			msm8x16_notifier_call(codec,
-					WCD_EVENT_PRE_HPHL_PA_ON);
-		else if (w->shift == 4)
-			msm8x16_notifier_call(codec,
-					WCD_EVENT_PRE_HPHR_PA_ON);
-		snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_NCP_FBCTRL, 0x20, 0x20);
-		break;
-
-	case SND_SOC_DAPM_POST_PMU:
-		usleep_range(7000, 7100);
-		if (w->shift == 5) {
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_RX_HPH_L_TEST, 0x04, 0x04);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_RX1_B6_CTL, 0x01, 0x00);
-		} else if (w->shift == 4) {
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_RX_HPH_R_TEST, 0x04, 0x04);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_RX2_B6_CTL, 0x01, 0x00);
-		}
-		break;
-
-	case SND_SOC_DAPM_PRE_PMD:
-		if (w->shift == 5) {
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_RX1_B6_CTL, 0x01, 0x01);
-			msleep(20);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_RX_HPH_L_TEST, 0x04, 0x00);
-			msm8x16_wcd->mute_mask |= HPHL_PA_DISABLE;
-			msm8x16_notifier_call(codec,
-					WCD_EVENT_PRE_HPHL_PA_OFF);
-		} else if (w->shift == 4) {
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_RX2_B6_CTL, 0x01, 0x01);
-			msleep(20);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_RX_HPH_R_TEST, 0x04, 0x00);
-			msm8x16_wcd->mute_mask |= HPHR_PA_DISABLE;
-			msm8x16_notifier_call(codec,
-					WCD_EVENT_PRE_HPHR_PA_OFF);
-		}
-		if (get_codec_version(msm8x16_wcd) >= CAJON) {
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_RX_HPH_BIAS_CNP,
-				0xF0, 0x30);
-		}
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		if (w->shift == 5) {
-			clear_bit(WCD_MBHC_HPHL_PA_OFF_ACK,
-				&msm8x16_wcd->mbhc.hph_pa_dac_state);
-			msm8x16_notifier_call(codec,
-					WCD_EVENT_POST_HPHL_PA_OFF);
-		} else if (w->shift == 4) {
-			clear_bit(WCD_MBHC_HPHR_PA_OFF_ACK,
-				&msm8x16_wcd->mbhc.hph_pa_dac_state);
-			msm8x16_notifier_call(codec,
-					WCD_EVENT_POST_HPHR_PA_OFF);
-		}
-		usleep_range(4000, 4100);
-		usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
-
-		dev_err(codec->dev,
-			"%s: sleep 10 ms after %s PA disable.\n", __func__,
-			w->name);
-		usleep_range(10000, 10100);
-		break;
-	}
-	return 0;
-}
-
-static const struct snd_soc_dapm_route audio_map[] = {
-	{"RX_I2S_CLK", NULL, "CDC_CONN"},
-	{"I2S RX1", NULL, "RX_I2S_CLK"},
-	{"I2S RX2", NULL, "RX_I2S_CLK"},
-	{"I2S RX3", NULL, "RX_I2S_CLK"},
-
-	{"I2S TX1", NULL, "TX_I2S_CLK"},
-	{"I2S TX2", NULL, "TX_I2S_CLK"},
-	{"AIF2 VI", NULL, "TX_I2S_CLK"},
-
-	{"I2S TX1", NULL, "DEC1 MUX"},
-	{"I2S TX2", NULL, "DEC2 MUX"},
-	{"AIF2 VI", NULL, "DEC3 MUX"},
-	{"AIF2 VI", NULL, "DEC4 MUX"},
-
-	/* RDAC Connections */
-	{"HPHR DAC", NULL, "RDAC2 MUX"},
-	{"RDAC2 MUX", "RX1", "RX1 CHAIN"},
-	{"RDAC2 MUX", "RX2", "RX2 CHAIN"},
-
-	/* WSA */
-	{"WSA_SPK OUT", NULL, "WSA Spk Switch"},
-	{"WSA Spk Switch", "WSA", "EAR PA"},
-
-	/* Earpiece (RX MIX1) */
-	{"EAR", NULL, "EAR_S"},
-	{"EAR_S", "Switch", "EAR PA"},
-	{"EAR PA", NULL, "RX_BIAS"},
-	{"EAR PA", NULL, "HPHL DAC"},
-	{"EAR PA", NULL, "HPHR DAC"},
-	{"EAR PA", NULL, "EAR CP"},
-
-	/* Headset (RX MIX1 and RX MIX2) */
-	{"HEADPHONE", NULL, "HPHL PA"},
-	{"HEADPHONE", NULL, "HPHR PA"},
-
-	{"Ext Spk", NULL, "Ext Spk Switch"},
-	{"Ext Spk Switch", "On", "HPHL PA"},
-	{"Ext Spk Switch", "On", "HPHR PA"},
-
-	{"HPHL PA", NULL, "HPHL"},
-	{"HPHR PA", NULL, "HPHR"},
-	{"HPHL", "Switch", "HPHL DAC"},
-	{"HPHR", "Switch", "HPHR DAC"},
-	{"HPHL PA", NULL, "CP"},
-	{"HPHL PA", NULL, "RX_BIAS"},
-	{"HPHR PA", NULL, "CP"},
-	{"HPHR PA", NULL, "RX_BIAS"},
-	{"HPHL DAC", NULL, "RX1 CHAIN"},
-
-	{"SPK_OUT", NULL, "SPK PA"},
-	{"SPK PA", NULL, "SPK_RX_BIAS"},
-	{"SPK PA", NULL, "SPK"},
-	{"SPK", "Switch", "SPK DAC"},
-	{"SPK DAC", NULL, "RX3 CHAIN"},
-	{"SPK DAC", NULL, "VDD_SPKDRV"},
-
-	/* lineout */
-	{"LINEOUT", NULL, "LINEOUT PA"},
-	{"LINEOUT PA", NULL, "SPK_RX_BIAS"},
-	{"LINEOUT PA", NULL, "LINE_OUT"},
-	{"LINE_OUT", "Switch", "LINEOUT DAC"},
-	{"LINEOUT DAC", NULL, "RX3 CHAIN"},
-
-	/* lineout to WSA */
-	{"WSA_SPK OUT", NULL, "LINEOUT PA"},
-
-	{"RX1 CHAIN", NULL, "RX1 CLK"},
-	{"RX2 CHAIN", NULL, "RX2 CLK"},
-	{"RX3 CHAIN", NULL, "RX3 CLK"},
-	{"RX1 CHAIN", NULL, "RX1 MIX2"},
-	{"RX2 CHAIN", NULL, "RX2 MIX2"},
-	{"RX3 CHAIN", NULL, "RX3 MIX1"},
-
-	{"RX1 MIX1", NULL, "RX1 MIX1 INP1"},
-	{"RX1 MIX1", NULL, "RX1 MIX1 INP2"},
-	{"RX1 MIX1", NULL, "RX1 MIX1 INP3"},
-	{"RX2 MIX1", NULL, "RX2 MIX1 INP1"},
-	{"RX2 MIX1", NULL, "RX2 MIX1 INP2"},
-	{"RX3 MIX1", NULL, "RX3 MIX1 INP1"},
-	{"RX3 MIX1", NULL, "RX3 MIX1 INP2"},
-	{"RX1 MIX2", NULL, "RX1 MIX1"},
-	{"RX1 MIX2", NULL, "RX1 MIX2 INP1"},
-	{"RX2 MIX2", NULL, "RX2 MIX1"},
-	{"RX2 MIX2", NULL, "RX2 MIX2 INP1"},
-
-	{"RX1 MIX1 INP1", "RX1", "I2S RX1"},
-	{"RX1 MIX1 INP1", "RX2", "I2S RX2"},
-	{"RX1 MIX1 INP1", "RX3", "I2S RX3"},
-	{"RX1 MIX1 INP1", "IIR1", "IIR1"},
-	{"RX1 MIX1 INP1", "IIR2", "IIR2"},
-	{"RX1 MIX1 INP2", "RX1", "I2S RX1"},
-	{"RX1 MIX1 INP2", "RX2", "I2S RX2"},
-	{"RX1 MIX1 INP2", "RX3", "I2S RX3"},
-	{"RX1 MIX1 INP2", "IIR1", "IIR1"},
-	{"RX1 MIX1 INP2", "IIR2", "IIR2"},
-	{"RX1 MIX1 INP3", "RX1", "I2S RX1"},
-	{"RX1 MIX1 INP3", "RX2", "I2S RX2"},
-	{"RX1 MIX1 INP3", "RX3", "I2S RX3"},
-
-	{"RX2 MIX1 INP1", "RX1", "I2S RX1"},
-	{"RX2 MIX1 INP1", "RX2", "I2S RX2"},
-	{"RX2 MIX1 INP1", "RX3", "I2S RX3"},
-	{"RX2 MIX1 INP1", "IIR1", "IIR1"},
-	{"RX2 MIX1 INP1", "IIR2", "IIR2"},
-	{"RX2 MIX1 INP2", "RX1", "I2S RX1"},
-	{"RX2 MIX1 INP2", "RX2", "I2S RX2"},
-	{"RX2 MIX1 INP2", "RX3", "I2S RX3"},
-	{"RX2 MIX1 INP2", "IIR1", "IIR1"},
-	{"RX2 MIX1 INP2", "IIR2", "IIR2"},
-
-	{"RX3 MIX1 INP1", "RX1", "I2S RX1"},
-	{"RX3 MIX1 INP1", "RX2", "I2S RX2"},
-	{"RX3 MIX1 INP1", "RX3", "I2S RX3"},
-	{"RX3 MIX1 INP1", "IIR1", "IIR1"},
-	{"RX3 MIX1 INP1", "IIR2", "IIR2"},
-	{"RX3 MIX1 INP2", "RX1", "I2S RX1"},
-	{"RX3 MIX1 INP2", "RX2", "I2S RX2"},
-	{"RX3 MIX1 INP2", "RX3", "I2S RX3"},
-	{"RX3 MIX1 INP2", "IIR1", "IIR1"},
-	{"RX3 MIX1 INP2", "IIR2", "IIR2"},
-
-	{"RX1 MIX2 INP1", "IIR1", "IIR1"},
-	{"RX2 MIX2 INP1", "IIR1", "IIR1"},
-	{"RX1 MIX2 INP1", "IIR2", "IIR2"},
-	{"RX2 MIX2 INP1", "IIR2", "IIR2"},
-
-	/* Decimator Inputs */
-	{"DEC1 MUX", "DMIC1", "DMIC1"},
-	{"DEC1 MUX", "DMIC2", "DMIC2"},
-	{"DEC1 MUX", "ADC1", "ADC1"},
-	{"DEC1 MUX", "ADC2", "ADC2"},
-	{"DEC1 MUX", "ADC3", "ADC3"},
-	{"DEC1 MUX", NULL, "CDC_CONN"},
-
-	{"DEC2 MUX", "DMIC1", "DMIC1"},
-	{"DEC2 MUX", "DMIC2", "DMIC2"},
-	{"DEC2 MUX", "ADC1", "ADC1"},
-	{"DEC2 MUX", "ADC2", "ADC2"},
-	{"DEC2 MUX", "ADC3", "ADC3"},
-	{"DEC2 MUX", NULL, "CDC_CONN"},
-
-	{"DEC3 MUX", "DMIC3", "DMIC3"},
-	{"DEC4 MUX", "DMIC4", "DMIC4"},
-	{"DEC3 MUX", NULL, "CDC_CONN"},
-	{"DEC4 MUX", NULL, "CDC_CONN"},
-	/* ADC Connections */
-	{"ADC2", NULL, "ADC2 MUX"},
-	{"ADC3", NULL, "ADC2 MUX"},
-	{"ADC2 MUX", "INP2", "ADC2_INP2"},
-	{"ADC2 MUX", "INP3", "ADC2_INP3"},
-
-	{"ADC1", NULL, "AMIC1"},
-	{"ADC2_INP2", NULL, "AMIC2"},
-	{"ADC2_INP3", NULL, "AMIC3"},
-
-	/* TODO: Fix this */
-	{"IIR1", NULL, "IIR1 INP1 MUX"},
-	{"IIR1 INP1 MUX", "DEC1", "DEC1 MUX"},
-	{"IIR1 INP1 MUX", "DEC2", "DEC2 MUX"},
-	{"IIR2", NULL, "IIR2 INP1 MUX"},
-	{"IIR2 INP1 MUX", "DEC1", "DEC1 MUX"},
-	{"IIR2 INP1 MUX", "DEC2", "DEC2 MUX"},
-	{"MIC BIAS Internal1", NULL, "INT_LDO_H"},
-	{"MIC BIAS Internal2", NULL, "INT_LDO_H"},
-	{"MIC BIAS External", NULL, "INT_LDO_H"},
-	{"MIC BIAS External2", NULL, "INT_LDO_H"},
-	{"MIC BIAS Internal1", NULL, "MICBIAS_REGULATOR"},
-	{"MIC BIAS Internal2", NULL, "MICBIAS_REGULATOR"},
-	{"MIC BIAS External", NULL, "MICBIAS_REGULATOR"},
-	{"MIC BIAS External2", NULL, "MICBIAS_REGULATOR"},
-};
-
-static int msm8x16_wcd_startup(struct snd_pcm_substream *substream,
-		struct snd_soc_dai *dai)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd =
-		snd_soc_codec_get_drvdata(dai->codec);
-
-	dev_err(dai->codec->dev, "%s(): substream = %s  stream = %d\n",
-		__func__,
-		substream->name, substream->stream);
-	/*
-	 * If status_mask is BU_DOWN it means SSR is not complete.
-	 * So retun error.
-	 */
-	if (test_bit(BUS_DOWN, &msm8x16_wcd->status_mask)) {
-		dev_err(dai->codec->dev, "Error, Device is not up post SSR\n");
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static void msm8x16_wcd_shutdown(struct snd_pcm_substream *substream,
-		struct snd_soc_dai *dai)
-{
-	dev_err(dai->codec->dev,
-		"%s(): substream = %s  stream = %d\n", __func__,
-		substream->name, substream->stream);
-}
-
-int msm8x16_wcd_mclk_enable(struct snd_soc_codec *codec,
-			    int mclk_enable, bool dapm)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	dev_err(codec->dev, "%s: mclk_enable = %u, dapm = %d\n",
-		__func__, mclk_enable, dapm);
-	if (mclk_enable) {
-		msm8x16_wcd->int_mclk0_enabled = true;
-		msm8x16_wcd_codec_enable_clock_block(codec, 1);
-	} else {
-		if (!msm8x16_wcd->int_mclk0_enabled) {
-			dev_err(codec->dev, "Error, MCLK already diabled\n");
-			return -EINVAL;
-		}
-		msm8x16_wcd->int_mclk0_enabled = false;
-		msm8x16_wcd_codec_enable_clock_block(codec, 0);
-	}
-	return 0;
-}
-
-static int msm8x16_wcd_set_dai_sysclk(struct snd_soc_dai *dai,
-		int clk_id, unsigned int freq, int dir)
-{
-	dev_err(dai->codec->dev, "%s\n", __func__);
-	return 0;
-}
-
-static int msm8x16_wcd_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
-{
-	dev_err(dai->codec->dev, "%s\n", __func__);
-	return 0;
-}
-
-static int msm8x16_wcd_set_channel_map(struct snd_soc_dai *dai,
-				unsigned int tx_num, unsigned int *tx_slot,
-				unsigned int rx_num, unsigned int *rx_slot)
-
-{
-	dev_err(dai->codec->dev, "%s\n", __func__);
-	return 0;
-}
-
-static int msm8x16_wcd_get_channel_map(struct snd_soc_dai *dai,
-				 unsigned int *tx_num, unsigned int *tx_slot,
-				 unsigned int *rx_num, unsigned int *rx_slot)
-
-{
-	dev_err(dai->codec->dev, "%s\n", __func__);
-	return 0;
-}
-
-static int msm8x16_wcd_set_interpolator_rate(struct snd_soc_dai *dai,
-	u8 rx_fs_rate_reg_val, u32 sample_rate)
-{
-	snd_soc_update_bits_wrapper(dai->codec,
-			MSM89XX_CDC_CORE_RX1_B5_CTL, 0xF0, rx_fs_rate_reg_val);
-	snd_soc_update_bits_wrapper(dai->codec,
-			MSM89XX_CDC_CORE_RX2_B5_CTL, 0xF0, rx_fs_rate_reg_val);
-	return 0;
-}
-
-static int msm8x16_wcd_set_decimator_rate(struct snd_soc_dai *dai,
-	u8 tx_fs_rate_reg_val, u32 sample_rate)
-{
-	return 0;
-}
-
-static int msm8x16_wcd_hw_params(struct snd_pcm_substream *substream,
-			    struct snd_pcm_hw_params *params,
-			    struct snd_soc_dai *dai)
-{
-	u8 tx_fs_rate, rx_fs_rate, rx_clk_fs_rate;
-	int ret;
-
-	dev_err(dai->codec->dev,
-		"%s: dai_name = %s DAI-ID %x rate %d num_ch %d format %d\n",
-		__func__, dai->name, dai->id, params_rate(params),
-		params_channels(params), params_format(params));
-
-	switch (params_rate(params)) {
-	case 8000:
-		tx_fs_rate = 0x00;
-		rx_fs_rate = 0x00;
-		rx_clk_fs_rate = 0x00;
-		break;
-	case 16000:
-		tx_fs_rate = 0x20;
-		rx_fs_rate = 0x20;
-		rx_clk_fs_rate = 0x01;
-		break;
-	case 32000:
-		tx_fs_rate = 0x40;
-		rx_fs_rate = 0x40;
-		rx_clk_fs_rate = 0x02;
-		break;
-	case 48000:
-		tx_fs_rate = 0x60;
-		rx_fs_rate = 0x60;
-		rx_clk_fs_rate = 0x03;
-		break;
-	case 96000:
-		tx_fs_rate = 0x80;
-		rx_fs_rate = 0x80;
-		rx_clk_fs_rate = 0x04;
-		break;
-	case 192000:
-		tx_fs_rate = 0xA0;
-		rx_fs_rate = 0xA0;
-		rx_clk_fs_rate = 0x05;
-		break;
-	default:
-		dev_err(dai->codec->dev,
-			"%s: Invalid sampling rate %d\n", __func__,
-			params_rate(params));
-		return -EINVAL;
-	}
-
-	snd_soc_update_bits_wrapper(dai->codec,
-			MSM89XX_CDC_CORE_CLK_RX_I2S_CTL, 0x0F, rx_clk_fs_rate);
-
-	switch (substream->stream) {
-	case SNDRV_PCM_STREAM_CAPTURE:
-		ret = msm8x16_wcd_set_decimator_rate(dai, tx_fs_rate,
-					       params_rate(params));
-		if (ret < 0) {
-			dev_err(dai->codec->dev,
-				"%s: set decimator rate failed %d\n", __func__,
-				ret);
-			return ret;
-		}
-		break;
-	case SNDRV_PCM_STREAM_PLAYBACK:
-		ret = msm8x16_wcd_set_interpolator_rate(dai, rx_fs_rate,
-						  params_rate(params));
-		if (ret < 0) {
-			dev_err(dai->codec->dev,
-				"%s: set decimator rate failed %d\n", __func__,
-				ret);
-			return ret;
-		}
-		break;
-	default:
-		dev_err(dai->codec->dev,
-			"%s: Invalid stream type %d\n", __func__,
-			substream->stream);
-		return -EINVAL;
-	}
-	switch (params_format(params)) {
-	case SNDRV_PCM_FORMAT_S16_LE:
-		snd_soc_update_bits_wrapper(dai->codec,
-				MSM89XX_CDC_CORE_CLK_RX_I2S_CTL, 0x20, 0x20);
-		break;
-	case SNDRV_PCM_FORMAT_S24_LE:
-		snd_soc_update_bits_wrapper(dai->codec,
-				MSM89XX_CDC_CORE_CLK_RX_I2S_CTL, 0x20, 0x00);
-		break;
-	default:
-		dev_err(dai->codec->dev, "%s: wrong format selected\n",
-				__func__);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-int msm8x16_wcd_digital_mute(struct snd_soc_dai *dai, int mute)
-{
-	struct snd_soc_codec *codec = NULL;
-	u16 tx_vol_ctl_reg = 0;
-	u8 decimator = 0, i;
-	struct msm8x16_wcd_priv *msm8x16_wcd;
-
-	pr_err("%s: Digital Mute val = %d\n", __func__, mute);
-
-	if (!dai || !dai->codec) {
-		pr_err("%s: Invalid params\n", __func__);
-		return -EINVAL;
-	}
-	codec = dai->codec;
-	msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	if ((dai->id != AIF1_CAP) && (dai->id != AIF2_VIFEED)) {
-		dev_err(codec->dev, "%s: Not capture use case skip\n",
-		__func__);
-		return 0;
-	}
-
-	mute = (mute) ? 1 : 0;
-	if (!mute) {
-		/*
-		 * 15 ms is an emperical value for the mute time
-		 * that was arrived by checking the pop level
-		 * to be inaudible
-		 */
-		usleep_range(15000, 15010);
-	}
-
-	for (i = 0; i < NUM_DECIMATORS; i++) {
-		if (msm8x16_wcd->dec_active[i])
-			decimator = i + 1;
-		if (decimator && decimator <= NUM_DECIMATORS) {
-			pr_err("%s: Mute = %d Decimator = %d", __func__,
-					mute, decimator);
-			tx_vol_ctl_reg = MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG +
-				32 * (decimator - 1);
-			snd_soc_update_bits_wrapper(codec,
-				tx_vol_ctl_reg, 0x01, mute);
-		}
-		decimator = 0;
-	}
-	return 0;
-}
-
-static struct snd_soc_dai_ops msm8x16_wcd_dai_ops = {
-	.startup = msm8x16_wcd_startup,
-	.shutdown = msm8x16_wcd_shutdown,
-	.hw_params = msm8x16_wcd_hw_params,
-	.set_sysclk = msm8x16_wcd_set_dai_sysclk,
-	.set_fmt = msm8x16_wcd_set_dai_fmt,
-	.set_channel_map = msm8x16_wcd_set_channel_map,
-	.get_channel_map = msm8x16_wcd_get_channel_map,
-	.digital_mute = msm8x16_wcd_digital_mute,
-};
-
-static struct snd_soc_dai_driver msm8x16_wcd_i2s_dai[] = {
-	{
-		.name = "msm8x16_wcd_i2s_rx1",
-		.id = AIF1_PB,
-		.playback = {
-			.stream_name = "AIF1 Playback",
-			.rates = MSM89XX_RATES,
-			.formats = MSM89XX_FORMATS,
-			.rate_max = 192000,
-			.rate_min = 8000,
-			.channels_min = 1,
-			.channels_max = 3,
-		},
-		.ops = &msm8x16_wcd_dai_ops,
-	},
-	{
-		.name = "msm8x16_wcd_i2s_tx1",
-		.id = AIF1_CAP,
-		.capture = {
-			.stream_name = "AIF1 Capture",
-			.rates = MSM89XX_RATES,
-			.formats = MSM89XX_FORMATS,
-			.rate_max = 192000,
-			.rate_min = 8000,
-			.channels_min = 1,
-			.channels_max = 4,
-		},
-		.ops = &msm8x16_wcd_dai_ops,
-	},
-	{
-		.name = "cajon_vifeedback",
-		.id = AIF2_VIFEED,
-		.capture = {
-			.stream_name = "VIfeed",
-			.rates = MSM89XX_RATES,
-			.formats = MSM89XX_FORMATS,
-			.rate_max = 48000,
-			.rate_min = 48000,
-			.channels_min = 2,
-			.channels_max = 2,
-		},
-		.ops = &msm8x16_wcd_dai_ops,
-	},
-};
-
-static int msm8x16_wcd_codec_enable_rx_chain(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
-	switch (event) {
-	case SND_SOC_DAPM_POST_PMU:
-		dev_err(codec->dev,
-			"%s: PMU:Sleeping 20ms after disabling mute\n",
-			__func__);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		dev_err(codec->dev,
-			"%s: PMD:Sleeping 20ms after disabling mute\n",
-			__func__);
-		snd_soc_update_bits_wrapper(codec, w->reg,
-			    1 << w->shift, 0x00);
-		msleep(20);
-		break;
-	}
-	return 0;
-}
-
-static int msm8x16_wcd_codec_enable_lo_pa(struct snd_soc_dapm_widget *w,
-				     struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
-	dev_err(codec->dev, "%s: %d %s\n", __func__, event, w->name);
-	switch (event) {
-	case SND_SOC_DAPM_POST_PMU:
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_CDC_CORE_RX3_B6_CTL, 0x01, 0x00);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_CDC_CORE_RX3_B6_CTL, 0x01, 0x00);
-		break;
-	}
-
-	return 0;
-}
-
-static int msm8x16_wcd_codec_enable_spk_ext_pa(struct snd_soc_dapm_widget *w,
-		struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	dev_err(codec->dev, "%s: %s event = %d\n", __func__, w->name, event);
-	switch (event) {
-	case SND_SOC_DAPM_POST_PMU:
-		dev_err(codec->dev,
-			"%s: enable external speaker PA\n", __func__);
-		if (msm8x16_wcd->codec_spk_ext_pa_cb)
-			msm8x16_wcd->codec_spk_ext_pa_cb(codec, 1);
-		break;
-	case SND_SOC_DAPM_PRE_PMD:
-		dev_err(codec->dev,
-			"%s: enable external speaker PA\n", __func__);
-		if (msm8x16_wcd->codec_spk_ext_pa_cb)
-			msm8x16_wcd->codec_spk_ext_pa_cb(codec, 0);
-		break;
-	}
-	return 0;
-}
-
-static int msm8x16_wcd_codec_enable_ear_pa(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		dev_err(codec->dev,
-			"%s: Sleeping 20ms after select EAR PA\n",
-			__func__);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_RX_EAR_CTL, 0x80, 0x80);
-		if (get_codec_version(msm8x16_wcd) < CONGA)
-			snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_RX_HPH_CNP_WG_TIME, 0xFF, 0x2A);
-		break;
-	case SND_SOC_DAPM_POST_PMU:
-		dev_err(codec->dev,
-			"%s: Sleeping 20ms after enabling EAR PA\n",
-			__func__);
-		snd_soc_update_bits_wrapper(codec,
-			 MSM89XX_PMIC_ANALOG_RX_EAR_CTL, 0x40, 0x40);
-		usleep_range(7000, 7100);
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_CDC_CORE_RX1_B6_CTL, 0x01, 0x00);
-		break;
-	case SND_SOC_DAPM_PRE_PMD:
-		snd_soc_update_bits_wrapper(codec,
-			MSM89XX_CDC_CORE_RX1_B6_CTL, 0x01, 0x01);
-		msleep(20);
-		msm8x16_wcd->mute_mask |= EAR_PA_DISABLE;
-		if (msm8x16_wcd->boost_option == BOOST_ALWAYS) {
-			dev_err(codec->dev,
-				"%s: boost_option:%d, tear down ear\n",
-				__func__, msm8x16_wcd->boost_option);
-			msm8x16_wcd_boost_mode_sequence(codec, EAR_PMD);
-		}
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		dev_err(codec->dev,
-			"%s: Sleeping 7ms after disabling EAR PA\n",
-			__func__);
-		snd_soc_update_bits_wrapper(codec,
-			 MSM89XX_PMIC_ANALOG_RX_EAR_CTL, 0x40, 0x00);
-		usleep_range(7000, 7100);
-		if (get_codec_version(msm8x16_wcd) < CONGA)
-			snd_soc_update_bits_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_RX_HPH_CNP_WG_TIME, 0xFF, 0x16);
-		break;
-	}
-	return 0;
-}
-
-static const struct snd_soc_dapm_widget msm8x16_wcd_dapm_widgets[] = {
-	/*RX stuff */
-	SND_SOC_DAPM_OUTPUT("EAR"),
-	SND_SOC_DAPM_OUTPUT("WSA_SPK OUT"),
-
-	SND_SOC_DAPM_PGA_E("EAR PA", SND_SOC_NOPM,
-			0, 0, NULL, 0, msm8x16_wcd_codec_enable_ear_pa,
-			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-			SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_MUX("EAR_S", SND_SOC_NOPM, 0, 0,
-		ear_pa_mux),
-
-	SND_SOC_DAPM_MUX("WSA Spk Switch", SND_SOC_NOPM, 0, 0,
-		wsa_spk_mux),
-
-	SND_SOC_DAPM_AIF_IN("I2S RX1", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
-
-	SND_SOC_DAPM_AIF_IN("I2S RX2", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
-
-	SND_SOC_DAPM_AIF_IN("I2S RX3", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
-
-	SND_SOC_DAPM_SUPPLY("INT_LDO_H", SND_SOC_NOPM, 1, 0, NULL, 0),
-
-	SND_SOC_DAPM_SPK("Ext Spk", msm8x16_wcd_codec_enable_spk_ext_pa),
-
-	SND_SOC_DAPM_OUTPUT("HEADPHONE"),
-	SND_SOC_DAPM_PGA_E("HPHL PA", MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN,
-		5, 0, NULL, 0,
-		msm8x16_wcd_hph_pa_event, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
-		SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MUX("HPHL", SND_SOC_NOPM, 0, 0,
-		hphl_mux),
-
-	SND_SOC_DAPM_MIXER_E("HPHL DAC",
-		MSM89XX_PMIC_ANALOG_RX_HPH_L_PA_DAC_CTL, 3, 0, NULL,
-		0, msm8x16_wcd_hphl_dac_event,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_PGA_E("HPHR PA", MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN,
-		4, 0, NULL, 0,
-		msm8x16_wcd_hph_pa_event, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
-		SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MUX("HPHR", SND_SOC_NOPM, 0, 0,
-		hphr_mux),
-
-	SND_SOC_DAPM_MIXER_E("HPHR DAC",
-		MSM89XX_PMIC_ANALOG_RX_HPH_R_PA_DAC_CTL, 3, 0, NULL,
-		0, msm8x16_wcd_hphr_dac_event,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MUX("SPK", SND_SOC_NOPM, 0, 0,
-		spkr_mux),
-
-	SND_SOC_DAPM_DAC("SPK DAC", NULL,
-		MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 7, 0),
-
-	SND_SOC_DAPM_MUX("LINE_OUT",
-		SND_SOC_NOPM, 0, 0, lo_mux),
-
-	SND_SOC_DAPM_DAC_E("LINEOUT DAC", NULL,
-		SND_SOC_NOPM, 0, 0, msm8x16_wcd_lo_dac_event,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_POST_PMD),
-
-	/* Speaker */
-	SND_SOC_DAPM_OUTPUT("SPK_OUT"),
-
-	/* Lineout */
-	SND_SOC_DAPM_OUTPUT("LINEOUT"),
-
-	SND_SOC_DAPM_PGA_E("SPK PA", MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
-			6, 0, NULL, 0, msm8x16_wcd_codec_enable_spk_pa,
-			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-			SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_PGA_E("LINEOUT PA", MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL,
-			5, 0, NULL, 0, msm8x16_wcd_codec_enable_lo_pa,
-			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_SUPPLY("VDD_SPKDRV", SND_SOC_NOPM, 0, 0,
-			    msm89xx_wcd_codec_enable_vdd_spkr,
-			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MUX("Ext Spk Switch", SND_SOC_NOPM, 0, 0,
-		&ext_spk_mux),
-
-	SND_SOC_DAPM_MIXER("RX1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
-	SND_SOC_DAPM_MIXER("RX2 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
-
-	SND_SOC_DAPM_MIXER_E("RX1 MIX2",
-		MSM89XX_CDC_CORE_CLK_RX_B1_CTL, MSM89XX_RX1, 0, NULL,
-		0, msm8x16_wcd_codec_enable_interpolator,
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_MIXER_E("RX2 MIX2",
-		MSM89XX_CDC_CORE_CLK_RX_B1_CTL, MSM89XX_RX2, 0, NULL,
-		0, msm8x16_wcd_codec_enable_interpolator,
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_MIXER_E("RX3 MIX1",
-		MSM89XX_CDC_CORE_CLK_RX_B1_CTL, MSM89XX_RX3, 0, NULL,
-		0, msm8x16_wcd_codec_enable_interpolator,
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_SUPPLY("RX1 CLK", MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
-		0, 0, NULL, 0),
-	SND_SOC_DAPM_SUPPLY("RX2 CLK", MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
-		1, 0, NULL, 0),
-	SND_SOC_DAPM_SUPPLY("RX3 CLK", MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
-		2, 0, msm8x16_wcd_codec_enable_dig_clk, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_MIXER_E("RX1 CHAIN", MSM89XX_CDC_CORE_RX1_B6_CTL, 0, 0,
-		NULL, 0,
-		msm8x16_wcd_codec_enable_rx_chain,
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_MIXER_E("RX2 CHAIN", MSM89XX_CDC_CORE_RX2_B6_CTL, 0, 0,
-		NULL, 0,
-		msm8x16_wcd_codec_enable_rx_chain,
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_MIXER_E("RX3 CHAIN", MSM89XX_CDC_CORE_RX3_B6_CTL, 0, 0,
-		NULL, 0,
-		msm8x16_wcd_codec_enable_rx_chain,
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MUX("RX1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
-		&rx_mix1_inp1_mux),
-	SND_SOC_DAPM_MUX("RX1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
-		&rx_mix1_inp2_mux),
-	SND_SOC_DAPM_MUX("RX1 MIX1 INP3", SND_SOC_NOPM, 0, 0,
-		&rx_mix1_inp3_mux),
-
-	SND_SOC_DAPM_MUX("RX2 MIX1 INP1", SND_SOC_NOPM, 0, 0,
-		&rx2_mix1_inp1_mux),
-	SND_SOC_DAPM_MUX("RX2 MIX1 INP2", SND_SOC_NOPM, 0, 0,
-		&rx2_mix1_inp2_mux),
-	SND_SOC_DAPM_MUX("RX2 MIX1 INP3", SND_SOC_NOPM, 0, 0,
-		&rx2_mix1_inp3_mux),
-
-	SND_SOC_DAPM_MUX("RX3 MIX1 INP1", SND_SOC_NOPM, 0, 0,
-		&rx3_mix1_inp1_mux),
-	SND_SOC_DAPM_MUX("RX3 MIX1 INP2", SND_SOC_NOPM, 0, 0,
-		&rx3_mix1_inp2_mux),
-	SND_SOC_DAPM_MUX("RX3 MIX1 INP3", SND_SOC_NOPM, 0, 0,
-		&rx3_mix1_inp3_mux),
-
-	SND_SOC_DAPM_MUX("RX1 MIX2 INP1", SND_SOC_NOPM, 0, 0,
-		&rx1_mix2_inp1_mux),
-	SND_SOC_DAPM_MUX("RX2 MIX2 INP1", SND_SOC_NOPM, 0, 0,
-		&rx2_mix2_inp1_mux),
-
-	SND_SOC_DAPM_SUPPLY("MICBIAS_REGULATOR", SND_SOC_NOPM,
-		ON_DEMAND_MICBIAS, 0,
-		msm8x16_wcd_codec_enable_on_demand_supply,
-		SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_SUPPLY("CP", MSM89XX_PMIC_ANALOG_NCP_EN, 0, 0,
-		msm8x16_wcd_codec_enable_charge_pump, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMU |	SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_SUPPLY("EAR CP", MSM89XX_PMIC_ANALOG_NCP_EN, 4, 0,
-		msm8x16_wcd_codec_enable_charge_pump, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_SUPPLY_S("RX_BIAS", 1, SND_SOC_NOPM,
-		0, 0, msm8x16_wcd_codec_enable_rx_bias,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_SUPPLY_S("SPK_RX_BIAS", 1, SND_SOC_NOPM, 0, 0,
-		msm8x16_wcd_codec_enable_rx_bias, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMD),
-
-	/* TX */
-
-	SND_SOC_DAPM_SUPPLY_S("CDC_CONN", -2, MSM89XX_CDC_CORE_CLK_OTHR_CTL,
-		2, 0, NULL, 0),
-
-
-	SND_SOC_DAPM_INPUT("AMIC1"),
-	SND_SOC_DAPM_MICBIAS_E("MIC BIAS Internal1",
-		MSM89XX_PMIC_ANALOG_MICB_1_EN, 7, 0,
-		msm8x16_wcd_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_MICBIAS_E("MIC BIAS Internal2",
-		MSM89XX_PMIC_ANALOG_MICB_2_EN, 7, 0,
-		msm8x16_wcd_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_MICBIAS_E("MIC BIAS Internal3",
-		MSM89XX_PMIC_ANALOG_MICB_1_EN, 7, 0,
-		msm8x16_wcd_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_ADC_E("ADC1", NULL, MSM89XX_PMIC_ANALOG_TX_1_EN, 7, 0,
-		msm8x16_wcd_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_ADC_E("ADC2_INP2",
-		NULL, MSM89XX_PMIC_ANALOG_TX_2_EN, 7, 0,
-		msm8x16_wcd_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_ADC_E("ADC2_INP3",
-		NULL, MSM89XX_PMIC_ANALOG_TX_3_EN, 7, 0,
-		msm8x16_wcd_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MIXER("ADC2", SND_SOC_NOPM, 0, 0, NULL, 0),
-	SND_SOC_DAPM_MIXER("ADC3", SND_SOC_NOPM, 0, 0, NULL, 0),
-
-	SND_SOC_DAPM_MUX("ADC2 MUX", SND_SOC_NOPM, 0, 0,
-		&tx_adc2_mux),
-
-	SND_SOC_DAPM_MICBIAS_E("MIC BIAS External",
-		MSM89XX_PMIC_ANALOG_MICB_1_EN, 7, 0,
-		msm8x16_wcd_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MICBIAS_E("MIC BIAS External2",
-		MSM89XX_PMIC_ANALOG_MICB_2_EN, 7, 0,
-		msm8x16_wcd_codec_enable_micbias, SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_POST_PMD),
-
-
-	SND_SOC_DAPM_INPUT("AMIC3"),
-
-	SND_SOC_DAPM_MUX_E("DEC1 MUX",
-		MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL, 0, 0,
-		&dec1_mux, msm8x16_wcd_codec_enable_dec,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MUX_E("DEC2 MUX",
-		MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL, 1, 0,
-		&dec2_mux, msm8x16_wcd_codec_enable_dec,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MUX_E("DEC3 MUX",
-		MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL, 2, 0,
-		&dec3_mux, msm8x16_wcd_codec_enable_dec,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MUX_E("DEC4 MUX",
-		MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL, 3, 0,
-		&dec4_mux, msm8x16_wcd_codec_enable_dec,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MUX("RDAC2 MUX", SND_SOC_NOPM, 0, 0, &rdac2_mux),
-
-	SND_SOC_DAPM_INPUT("AMIC2"),
-
-	SND_SOC_DAPM_AIF_OUT("I2S TX1", "AIF1 Capture", 0, SND_SOC_NOPM,
-		0, 0),
-	SND_SOC_DAPM_AIF_OUT("I2S TX2", "AIF1 Capture", 0, SND_SOC_NOPM,
-		0, 0),
-	SND_SOC_DAPM_AIF_OUT("I2S TX3", "AIF1 Capture", 0, SND_SOC_NOPM,
-		0, 0),
-
-	SND_SOC_DAPM_AIF_OUT("AIF2 VI", "VIfeed", 0, SND_SOC_NOPM,
-		0, 0),
-	/* Digital Mic Inputs */
-	SND_SOC_DAPM_ADC_E("DMIC1", NULL, SND_SOC_NOPM, 0, 0,
-		msm8x16_wcd_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_ADC_E("DMIC2", NULL, SND_SOC_NOPM, 0, 0,
-		msm8x16_wcd_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_INPUT("DMIC3"),
-
-	SND_SOC_DAPM_INPUT("DMIC4"),
-
-	/* Sidetone */
-	SND_SOC_DAPM_MUX("IIR1 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp1_mux),
-	SND_SOC_DAPM_PGA_E("IIR1", MSM89XX_CDC_CORE_CLK_SD_CTL, 0, 0, NULL, 0,
-		msm8x16_wcd_codec_set_iir_gain, SND_SOC_DAPM_POST_PMU),
-
-	SND_SOC_DAPM_MUX("IIR2 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir2_inp1_mux),
-	SND_SOC_DAPM_PGA_E("IIR2", MSM89XX_CDC_CORE_CLK_SD_CTL, 1, 0, NULL, 0,
-		msm8x16_wcd_codec_set_iir_gain, SND_SOC_DAPM_POST_PMU),
-
-	SND_SOC_DAPM_SUPPLY("RX_I2S_CLK",
-		MSM89XX_CDC_CORE_CLK_RX_I2S_CTL,	4, 0, NULL, 0),
-	SND_SOC_DAPM_SUPPLY("TX_I2S_CLK",
-		MSM89XX_CDC_CORE_CLK_TX_I2S_CTL, 4, 0,
-		NULL, 0),
-};
-
-static const struct msm8x16_wcd_reg_mask_val msm8x16_wcd_reg_defaults[] = {
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x03),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_CURRENT_LIMIT, 0x82),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL, 0xE1),
-};
-
-static const struct msm8x16_wcd_reg_mask_val msm8x16_wcd_reg_defaults_2_0[] = {
-	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_SEC_ACCESS, 0xA5),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL3, 0x0F),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_TX_1_2_OPAMP_BIAS, 0x4F),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_FBCTRL, 0x28),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL, 0x69),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_DBG, 0x01),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_BOOST_EN_CTL, 0x5F),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SLOPE_COMP_IP_ZERO, 0x88),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SEC_ACCESS, 0xA5),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3, 0x0F),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_CURRENT_LIMIT, 0x82),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x03),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL, 0xE1),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80),
-};
-
-static const struct msm8x16_wcd_reg_mask_val msm8909_wcd_reg_defaults[] = {
-	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_SEC_ACCESS, 0xA5),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL3, 0x0F),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SEC_ACCESS, 0xA5),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3, 0x0F),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_TX_1_2_OPAMP_BIAS, 0x4C),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_FBCTRL, 0x28),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL, 0x69),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_DBG, 0x01),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_PERPH_SUBTYPE, 0x0A),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x03),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL, 0xE1),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80),
-};
-
-static const struct msm8x16_wcd_reg_mask_val cajon_wcd_reg_defaults[] = {
-	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_SEC_ACCESS, 0xA5),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL3, 0x0F),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SEC_ACCESS, 0xA5),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3, 0x0F),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_TX_1_2_OPAMP_BIAS, 0x4C),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_CURRENT_LIMIT, 0x82),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_FBCTRL, 0xA8),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_VCTRL, 0xA4),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_ANA_BIAS_SET, 0x41),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL, 0x69),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_DBG, 0x01),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL, 0xE1),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x03),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_RX_HPH_BIAS_PA, 0xFA),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80),
-};
-
-static const struct msm8x16_wcd_reg_mask_val cajon2p0_wcd_reg_defaults[] = {
-	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_SEC_ACCESS, 0xA5),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL3, 0x0F),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SEC_ACCESS, 0xA5),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3, 0x0F),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_TX_1_2_OPAMP_BIAS, 0x4C),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_CURRENT_LIMIT, 0xA2),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_FBCTRL, 0xA8),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_VCTRL, 0xA4),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_ANA_BIAS_SET, 0x41),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL, 0x69),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_DBG, 0x01),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL, 0xE1),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x03),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_RX_EAR_STATUS, 0x10),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_BYPASS_MODE, 0x18),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_RX_HPH_BIAS_PA, 0xFA),
-	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80),
-};
-
-static void msm8x16_wcd_update_reg_defaults(struct snd_soc_codec *codec)
-{
-	u32 i, version;
-	struct msm8x16_wcd_priv *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
-
-	version = get_codec_version(msm8x16_wcd);
-	if (version == TOMBAK_1_0) {
-		for (i = 0; i < ARRAY_SIZE(msm8x16_wcd_reg_defaults); i++)
-			snd_soc_write_wrapper(codec,
-				msm8x16_wcd_reg_defaults[i].reg,
-				msm8x16_wcd_reg_defaults[i].val);
-	} else if (version == TOMBAK_2_0) {
-		for (i = 0; i < ARRAY_SIZE(msm8x16_wcd_reg_defaults_2_0); i++)
-			snd_soc_write_wrapper(codec,
-				msm8x16_wcd_reg_defaults_2_0[i].reg,
-				msm8x16_wcd_reg_defaults_2_0[i].val);
-	} else if (version == CONGA) {
-		for (i = 0; i < ARRAY_SIZE(msm8909_wcd_reg_defaults); i++)
-			snd_soc_write_wrapper(codec,
-				msm8909_wcd_reg_defaults[i].reg,
-				msm8909_wcd_reg_defaults[i].val);
-	} else if (version == CAJON) {
-		for (i = 0; i < ARRAY_SIZE(cajon_wcd_reg_defaults); i++)
-			snd_soc_write_wrapper(codec,
-				cajon_wcd_reg_defaults[i].reg,
-				cajon_wcd_reg_defaults[i].val);
-	} else if (version == CAJON_2_0 || version == DIANGU) {
-		for (i = 0; i < ARRAY_SIZE(cajon2p0_wcd_reg_defaults); i++)
-			snd_soc_write_wrapper(codec,
-				cajon2p0_wcd_reg_defaults[i].reg,
-				cajon2p0_wcd_reg_defaults[i].val);
-	}
-}
-
-static const struct msm8x16_wcd_reg_mask_val
-	msm8x16_wcd_codec_reg_init_val[] = {
-
-	/* Initialize current threshold to 350MA
-	 * number of wait and run cycles to 4096
-	 */
-	{MSM89XX_PMIC_ANALOG_RX_COM_OCP_CTL, 0xFF, 0x12},
-	{MSM89XX_PMIC_ANALOG_RX_COM_OCP_COUNT, 0xFF, 0xFF},
-};
-
-static void msm8x16_wcd_codec_init_reg(struct snd_soc_codec *codec)
-{
-	u32 i;
-
-	for (i = 0; i < ARRAY_SIZE(msm8x16_wcd_codec_reg_init_val); i++)
-		snd_soc_update_bits_wrapper(codec,
-			    msm8x16_wcd_codec_reg_init_val[i].reg,
-			    msm8x16_wcd_codec_reg_init_val[i].mask,
-			    msm8x16_wcd_codec_reg_init_val[i].val);
-}
-
-static int msm8x16_wcd_bringup(struct snd_soc_codec *codec)
-{
-	snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_DIGITAL_SEC_ACCESS,
-		0xA5);
-	snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL4, 0x01);
-	snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_SEC_ACCESS,
-		0xA5);
-	snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL4, 0x01);
-	snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_DIGITAL_SEC_ACCESS,
-		0xA5);
-	snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL4, 0x00);
-	snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_SEC_ACCESS,
-		0xA5);
-	snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL4, 0x00);
-	return 0;
-}
-
-static struct regulator *wcd8x16_wcd_codec_find_regulator(
-				const struct msm8x16_wcd *msm8x16,
-				const char *name)
-{
-	int i;
-
-	for (i = 0; i < msm8x16->num_of_supplies; i++) {
-		if (msm8x16->supplies[i].supply &&
-		    !strcmp(msm8x16->supplies[i].supply, name))
-			return msm8x16->supplies[i].consumer;
-	}
-
-	dev_err(msm8x16->dev, "Error: regulator not found:%s\n"
-				, name);
-	return NULL;
-}
-
-static int msm8x16_wcd_device_down(struct snd_soc_codec *codec)
-{
-	struct msm_asoc_mach_data *pdata = NULL;
-	struct msm8x16_wcd_priv *msm8x16_wcd_priv =
-		snd_soc_codec_get_drvdata(codec);
-	int i;
-
-	pdata = snd_soc_card_get_drvdata(codec->component.card);
-	dev_err(codec->dev, "%s: device down!\n", __func__);
-	snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_TX_1_EN, 0x3);
-	snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_TX_2_EN, 0x3);
-	if (msm8x16_wcd_priv->boost_option == BOOST_ON_FOREVER) {
-		if ((snd_soc_read_wrapper(codec,
-			MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL)
-		& 0x80) == 0) {
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_CLK_MCLK_CTL,	0x01, 0x01);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_CDC_CORE_CLK_PDM_CTL, 0x03, 0x03);
-			snd_soc_write_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL, 0x30);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80, 0x80);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_DIGITAL_CDC_TOP_CLK_CTL,
-				0x0C, 0x0C);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
-				0x84, 0x84);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL,
-				0x10, 0x10);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL,
-				0x1F, 0x1F);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC,
-				0x90, 0x90);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
-				0xFF, 0xFF);
-			usleep_range(20, 21);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL,
-				0xFF, 0xFF);
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
-				0xE9, 0xE9);
-		}
-	}
-	msm8x16_wcd_boost_off(codec);
-	msm8x16_wcd_priv->hph_mode = NORMAL_MODE;
-	for (i = 0; i < MSM89XX_RX_MAX; i++)
-		msm8x16_wcd_priv->comp_enabled[i] = COMPANDER_NONE;
-
-	/* 40ms to allow boost to discharge */
-	msleep(40);
-	/* Disable PA to avoid pop during codec bring up */
-	snd_soc_update_bits_wrapper(codec, MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN,
-			0x30, 0x00);
-	snd_soc_update_bits_wrapper(codec, MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
-			0x80, 0x00);
-	snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_RX_HPH_L_PA_DAC_CTL, 0x20);
-	snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_RX_HPH_R_PA_DAC_CTL, 0x20);
-	snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_RX_EAR_CTL, 0x12);
-	snd_soc_write_wrapper(codec,
-		MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x93);
-
-	msm8x16_wcd_bringup(codec);
-	atomic_set(&pdata->int_mclk0_enabled, false);
-	set_bit(BUS_DOWN, &msm8x16_wcd_priv->status_mask);
-	snd_soc_card_change_online_state(codec->component.card, 0);
-	return 0;
-}
-
-static int msm8x16_wcd_device_up(struct snd_soc_codec *codec)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd_priv =
-		snd_soc_codec_get_drvdata(codec);
-	int ret = 0;
-
-	dev_err(codec->dev, "%s: device up!\n", __func__);
-
-	clear_bit(BUS_DOWN, &msm8x16_wcd_priv->status_mask);
-
-	snd_soc_card_change_online_state(codec->component.card, 1);
-	/* delay is required to make sure sound card state updated */
-	usleep_range(5000, 5100);
-
-	msm8x16_wcd_codec_init_reg(codec);
-	msm8x16_wcd_update_reg_defaults(codec);
-
-	snd_soc_write_wrapper(codec, MSM89XX_PMIC_DIGITAL_INT_EN_SET,
-				MSM89XX_PMIC_DIGITAL_INT_EN_SET__POR);
-	snd_soc_write_wrapper(codec, MSM89XX_PMIC_DIGITAL_INT_EN_CLR,
-				MSM89XX_PMIC_DIGITAL_INT_EN_CLR__POR);
-
-	msm8x16_wcd_set_boost_v(codec);
-
-	msm8x16_wcd_set_micb_v(codec);
-	if (msm8x16_wcd_priv->boost_option == BOOST_ON_FOREVER)
-		msm8x16_wcd_boost_on(codec);
-	else if (msm8x16_wcd_priv->boost_option == BYPASS_ALWAYS)
-		msm8x16_wcd_bypass_on(codec);
-
-	msm8x16_wcd_configure_cap(codec, false, false);
-	wcd_mbhc_stop(&msm8x16_wcd_priv->mbhc);
-	wcd_mbhc_deinit(&msm8x16_wcd_priv->mbhc);
-	ret = wcd_mbhc_init(&msm8x16_wcd_priv->mbhc, codec, &mbhc_cb, &intr_ids,
-			wcd_mbhc_registers, true);
-	if (ret)
-		dev_err(codec->dev, "%s: mbhc initialization failed\n",
-			__func__);
-	else
-		wcd_mbhc_start(&msm8x16_wcd_priv->mbhc,
-			msm8x16_wcd_priv->mbhc.mbhc_cfg);
-
-
-	return 0;
-}
-
-static int adsp_state_callback(struct notifier_block *nb, unsigned long value,
-			       void *priv)
-{
-	bool timedout;
-	unsigned long timeout;
-
-	if (value == SUBSYS_BEFORE_SHUTDOWN)
-		msm8x16_wcd_device_down(registered_codec);
-	else if (value == SUBSYS_AFTER_POWERUP) {
-
-		dev_err(registered_codec->dev,
-			"ADSP is about to power up. bring up codec\n");
-
-		if (!q6core_is_adsp_ready()) {
-			dev_err(registered_codec->dev,
-				"ADSP isn't ready\n");
-			timeout = jiffies +
-				  msecs_to_jiffies(ADSP_STATE_READY_TIMEOUT_MS);
-			while (!(timedout = time_after(jiffies, timeout))) {
-				if (!q6core_is_adsp_ready()) {
-					dev_err(registered_codec->dev,
-						"ADSP isn't ready\n");
-				} else {
-					dev_err(registered_codec->dev,
-						"ADSP is ready\n");
-					break;
-				}
-			}
-		} else {
-			dev_err(registered_codec->dev,
-				"%s: DSP is ready\n", __func__);
-		}
-
-		msm8x16_wcd_device_up(registered_codec);
-	}
-	return NOTIFY_OK;
-}
-
-static struct notifier_block adsp_state_notifier_block = {
-	.notifier_call = adsp_state_callback,
-	.priority = -INT_MAX,
-};
-
-int msm8x16_wcd_hs_detect(struct snd_soc_codec *codec,
-		    struct wcd_mbhc_config *mbhc_cfg)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd_priv =
-		snd_soc_codec_get_drvdata(codec);
-
-	return wcd_mbhc_start(&msm8x16_wcd_priv->mbhc, mbhc_cfg);
-}
-EXPORT_SYMBOL(msm8x16_wcd_hs_detect);
-
-void msm8x16_wcd_hs_detect_exit(struct snd_soc_codec *codec)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd_priv =
-		snd_soc_codec_get_drvdata(codec);
-
-	wcd_mbhc_stop(&msm8x16_wcd_priv->mbhc);
-}
-EXPORT_SYMBOL(msm8x16_wcd_hs_detect_exit);
-
-void msm8x16_update_int_spk_boost(bool enable)
-{
-	pr_err("%s: enable = %d\n", __func__, enable);
-	spkr_boost_en = enable;
-}
-EXPORT_SYMBOL(msm8x16_update_int_spk_boost);
-
-static void msm8x16_wcd_set_micb_v(struct snd_soc_codec *codec)
-{
-
-	struct msm8x16_wcd *msm8x16 = codec->control_data;
-	struct msm8x16_wcd_pdata *pdata = msm8x16->dev->platform_data;
-	u8 reg_val;
-
-	reg_val = VOLTAGE_CONVERTER(pdata->micbias.cfilt1_mv, MICBIAS_MIN_VAL,
-			MICBIAS_STEP_SIZE);
-	dev_err(codec->dev, "cfilt1_mv %d reg_val %x\n",
-			(u32)pdata->micbias.cfilt1_mv, reg_val);
-	snd_soc_update_bits_wrapper(codec, MSM89XX_PMIC_ANALOG_MICB_1_VAL,
-			0xF8, (reg_val << 3));
-}
-
-static void msm8x16_wcd_set_boost_v(struct snd_soc_codec *codec)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd_priv =
-				snd_soc_codec_get_drvdata(codec);
-
-	snd_soc_update_bits_wrapper(codec, MSM89XX_PMIC_ANALOG_OUTPUT_VOLTAGE,
-			0x1F, msm8x16_wcd_priv->boost_voltage);
-}
-
-static void msm8x16_wcd_configure_cap(struct snd_soc_codec *codec,
-		bool micbias1, bool micbias2)
-{
-
-	struct msm_asoc_mach_data *pdata = NULL;
-
-	pdata = snd_soc_card_get_drvdata(codec->component.card);
-
-	pr_err("\n %s: micbias1 %x micbias2 = %d\n", __func__, micbias1,
-			micbias2);
-	if (micbias1 && micbias2) {
-		if ((pdata->micbias1_cap_mode
-		     == MICBIAS_EXT_BYP_CAP) ||
-		    (pdata->micbias2_cap_mode
-		     == MICBIAS_EXT_BYP_CAP))
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_MICB_1_EN,
-				0x40, (MICBIAS_EXT_BYP_CAP << 6));
-		else
-			snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_MICB_1_EN,
-				0x40, (MICBIAS_NO_EXT_BYP_CAP << 6));
-	} else if (micbias2) {
-		snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_MICB_1_EN,
-				0x40, (pdata->micbias2_cap_mode << 6));
-	} else if (micbias1) {
-		snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_MICB_1_EN,
-				0x40, (pdata->micbias1_cap_mode << 6));
-	} else {
-		snd_soc_update_bits_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_MICB_1_EN,
-				0x40, 0x00);
-	}
-}
-
-static int msm89xx_digcodec_probe(struct snd_soc_codec *codec)
-{
-	registered_digcodec = codec;
-
-	return 0;
-}
-
-
-static int msm89xx_digcodec_remove(struct snd_soc_codec *codec)
-{
-	return 0;
-}
-
-static int msm8x16_wcd_codec_probe(struct snd_soc_codec *codec)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd_priv;
-	int i, ret;
-
-	dev_err(codec->dev, "%s()\n", __func__);
-
-	msm8x16_wcd_priv = kzalloc(sizeof(struct msm8x16_wcd_priv), GFP_KERNEL);
-	if (!msm8x16_wcd_priv)
-		return -ENOMEM;
-
-	for (i = 0; i < NUM_DECIMATORS; i++) {
-		tx_hpf_work[i].msm8x16_wcd = msm8x16_wcd_priv;
-		tx_hpf_work[i].decimator = i + 1;
-		INIT_DELAYED_WORK(&tx_hpf_work[i].dwork,
-			tx_hpf_corner_freq_callback);
-	}
-
-	codec->control_data = dev_get_drvdata(codec->dev);
-	snd_soc_codec_set_drvdata(codec, msm8x16_wcd_priv);
-	msm8x16_wcd_priv->codec = codec;
-
-	msm8x16_wcd_priv->spkdrv_reg =
-		wcd8x16_wcd_codec_find_regulator(codec->control_data,
-					MSM89XX_VDD_SPKDRV_NAME);
-	msm8x16_wcd_priv->pmic_rev = snd_soc_read_wrapper(codec,
-				MSM89XX_PMIC_DIGITAL_REVISION1);
-	msm8x16_wcd_priv->codec_version = snd_soc_read_wrapper(codec,
-				MSM89XX_PMIC_DIGITAL_PERPH_SUBTYPE);
-	if (msm8x16_wcd_priv->codec_version == CONGA) {
-		dev_err(codec->dev, "%s :Conga REV: %d\n", __func__,
-				msm8x16_wcd_priv->codec_version);
-		msm8x16_wcd_priv->ext_spk_boost_set = true;
-	} else {
-		dev_err(codec->dev, "%s :PMIC REV: %d\n", __func__,
-					msm8x16_wcd_priv->pmic_rev);
-		if (msm8x16_wcd_priv->pmic_rev == TOMBAK_1_0 &&
-			msm8x16_wcd_priv->codec_version == CAJON_2_0) {
-			msm8x16_wcd_priv->codec_version = DIANGU;
-			dev_err(codec->dev, "%s : Diangu detected\n",
-						__func__);
-		} else if (msm8x16_wcd_priv->pmic_rev == TOMBAK_1_0 &&
-			(snd_soc_read_wrapper(codec,
-				 MSM89XX_PMIC_ANALOG_NCP_FBCTRL)
-			 & 0x80)) {
-			msm8x16_wcd_priv->codec_version = CAJON;
-			dev_err(codec->dev, "%s : Cajon detected\n", __func__);
-		} else if (msm8x16_wcd_priv->pmic_rev == TOMBAK_2_0 &&
-			(snd_soc_read_wrapper(codec,
-				MSM89XX_PMIC_ANALOG_NCP_FBCTRL)
-			 & 0x80)) {
-			msm8x16_wcd_priv->codec_version = CAJON_2_0;
-			dev_err(codec->dev, "%s : Cajon 2.0 detected\n",
-						__func__);
-		}
-	}
-	/*
-	 * set to default boost option BOOST_SWITCH, user mixer path can change
-	 * it to BOOST_ALWAYS or BOOST_BYPASS based on solution chosen.
-	 */
-	msm8x16_wcd_priv->boost_option = BOOST_SWITCH;
-	msm8x16_wcd_priv->hph_mode = NORMAL_MODE;
-
-	for (i = 0; i < MSM89XX_RX_MAX; i++)
-		msm8x16_wcd_priv->comp_enabled[i] = COMPANDER_NONE;
-
-	msm8x16_wcd_dt_parse_boost_info(codec);
-	msm8x16_wcd_set_boost_v(codec);
-
-	snd_soc_add_codec_controls(codec, impedance_detect_controls,
-				   ARRAY_SIZE(impedance_detect_controls));
-	snd_soc_add_codec_controls(codec, hph_type_detect_controls,
-				  ARRAY_SIZE(hph_type_detect_controls));
-
-	msm8x16_wcd_bringup(codec);
-	msm8x16_wcd_codec_init_reg(codec);
-	msm8x16_wcd_update_reg_defaults(codec);
-
-	wcd9xxx_spmi_set_codec(codec);
-
-	msm8x16_wcd_priv->on_demand_list[ON_DEMAND_MICBIAS].supply =
-				wcd8x16_wcd_codec_find_regulator(
-				codec->control_data,
-				on_demand_supply_name[ON_DEMAND_MICBIAS]);
-	atomic_set(&msm8x16_wcd_priv->on_demand_list[ON_DEMAND_MICBIAS].ref, 0);
-
-	BLOCKING_INIT_NOTIFIER_HEAD(&msm8x16_wcd_priv->notifier);
-
-	msm8x16_wcd_priv->fw_data = kzalloc(sizeof(*(msm8x16_wcd_priv->fw_data))
-			, GFP_KERNEL);
-	if (!msm8x16_wcd_priv->fw_data) {
-		kfree(msm8x16_wcd_priv);
-		return -ENOMEM;
-	}
-
-	set_bit(WCD9XXX_MBHC_CAL, msm8x16_wcd_priv->fw_data->cal_bit);
-	ret = wcd_cal_create_hwdep(msm8x16_wcd_priv->fw_data,
-			WCD9XXX_CODEC_HWDEP_NODE, codec);
-	if (ret < 0) {
-		dev_err(codec->dev, "%s hwdep failed %d\n", __func__, ret);
-		kfree(msm8x16_wcd_priv->fw_data);
-		kfree(msm8x16_wcd_priv);
-		return ret;
-	}
-
-	wcd_mbhc_init(&msm8x16_wcd_priv->mbhc, codec, &mbhc_cb, &intr_ids,
-		      wcd_mbhc_registers, true);
-
-	msm8x16_wcd_priv->int_mclk0_enabled = false;
-	msm8x16_wcd_priv->clock_active = false;
-	msm8x16_wcd_priv->config_mode_active = false;
-
-	/*Update speaker boost configuration*/
-	msm8x16_wcd_priv->spk_boost_set = spkr_boost_en;
-	pr_err("%s: speaker boost configured = %d\n",
-			__func__, msm8x16_wcd_priv->spk_boost_set);
-
-	/* Set initial MICBIAS voltage level */
-	msm8x16_wcd_set_micb_v(codec);
-
-	/* Set initial cap mode */
-	msm8x16_wcd_configure_cap(codec, false, false);
-	registered_codec = codec;
-	adsp_state_notifier =
-	    subsys_notif_register_notifier("adsp",
-					   &adsp_state_notifier_block);
-	if (!adsp_state_notifier) {
-		dev_err(codec->dev, "Failed to register adsp state notifier\n");
-		kfree(msm8x16_wcd_priv->fw_data);
-		kfree(msm8x16_wcd_priv);
-		registered_codec = NULL;
-		return -ENOMEM;
-	}
-	return 0;
-}
-
-static int msm8x16_wcd_codec_remove(struct snd_soc_codec *codec)
-{
-	struct msm8x16_wcd_priv *msm8x16_wcd_priv =
-					snd_soc_codec_get_drvdata(codec);
-	struct msm8x16_wcd *msm8x16_wcd;
-
-	msm8x16_wcd = codec->control_data;
-	msm8x16_wcd_priv->spkdrv_reg = NULL;
-	msm8x16_wcd_priv->on_demand_list[ON_DEMAND_MICBIAS].supply = NULL;
-	atomic_set(&msm8x16_wcd_priv->on_demand_list[ON_DEMAND_MICBIAS].ref, 0);
-	kfree(msm8x16_wcd_priv->fw_data);
-	kfree(msm8x16_wcd_priv);
-
-	return 0;
-}
-
-static int msm8x16_wcd_enable_static_supplies_to_optimum(
-				struct msm8x16_wcd *msm8x16,
-				struct msm8x16_wcd_pdata *pdata)
-{
-	int i;
-	int ret = 0;
-
-	for (i = 0; i < msm8x16->num_of_supplies; i++) {
-		if (pdata->regulator[i].ondemand)
-			continue;
-		if (regulator_count_voltages(msm8x16->supplies[i].consumer) <=
-			0)
-			continue;
-
-		ret = regulator_set_voltage(msm8x16->supplies[i].consumer,
-			pdata->regulator[i].min_uv,
-			pdata->regulator[i].max_uv);
-		if (ret) {
-			dev_err(msm8x16->dev,
-				"Setting volt failed for regulator %s err %d\n",
-				msm8x16->supplies[i].supply, ret);
-		}
-
-		ret = regulator_set_load(msm8x16->supplies[i].consumer,
-			pdata->regulator[i].optimum_ua);
-		dev_err(msm8x16->dev, "Regulator %s set optimum mode\n",
-			 msm8x16->supplies[i].supply);
-	}
-
-	return ret;
-}
-
-static int msm8x16_wcd_disable_static_supplies_to_optimum(
-			struct msm8x16_wcd *msm8x16,
-			struct msm8x16_wcd_pdata *pdata)
-{
-	int i;
-	int ret = 0;
-
-	for (i = 0; i < msm8x16->num_of_supplies; i++) {
-		if (pdata->regulator[i].ondemand)
-			continue;
-		if (regulator_count_voltages(msm8x16->supplies[i].consumer) <=
-			0)
-			continue;
-		regulator_set_voltage(msm8x16->supplies[i].consumer, 0,
-			pdata->regulator[i].max_uv);
-		regulator_set_load(msm8x16->supplies[i].consumer, 0);
-		dev_err(msm8x16->dev, "Regulator %s set optimum mode\n",
-				 msm8x16->supplies[i].supply);
-	}
-
-	return ret;
-}
-
-int msm8x16_wcd_suspend(struct snd_soc_codec *codec)
-{
-	struct msm_asoc_mach_data *pdata = NULL;
-	struct msm8x16_wcd *msm8x16 = codec->control_data;
-	struct msm8x16_wcd_pdata *msm8x16_pdata = msm8x16->dev->platform_data;
-
-	pdata = snd_soc_card_get_drvdata(codec->component.card);
-	pr_err("%s: mclk cnt = %d, mclk_enabled = %d\n",
-			__func__, atomic_read(&pdata->int_mclk0_rsc_ref),
-			atomic_read(&pdata->int_mclk0_enabled));
-	if (atomic_read(&pdata->int_mclk0_enabled) == true) {
-		cancel_delayed_work_sync(
-				&pdata->disable_int_mclk0_work);
-		mutex_lock(&pdata->cdc_int_mclk0_mutex);
-		pdata->digital_cdc_core_clk.enable = 0;
-		afe_set_lpass_clock_v2(AFE_PORT_ID_INT0_MI2S_RX,
-				       &pdata->digital_cdc_core_clk);
-		atomic_set(&pdata->int_mclk0_enabled, false);
-		mutex_unlock(&pdata->cdc_int_mclk0_mutex);
-	}
-	msm8x16_wcd_disable_static_supplies_to_optimum(msm8x16, msm8x16_pdata);
-	return 0;
-}
-
-int msm8x16_wcd_resume(struct snd_soc_codec *codec)
-{
-	struct msm_asoc_mach_data *pdata = NULL;
-	struct msm8x16_wcd *msm8x16 = codec->control_data;
-	struct msm8x16_wcd_pdata *msm8x16_pdata = msm8x16->dev->platform_data;
-
-	pdata = snd_soc_card_get_drvdata(codec->component.card);
-	msm8x16_wcd_enable_static_supplies_to_optimum(msm8x16, msm8x16_pdata);
-	return 0;
-}
-
-static struct regmap *msm89xx_pmic_cdc_regmap;
-static struct regmap *msm89xx_pmic_cdc_get_regmap(struct device *dev)
-{
-	return msm89xx_pmic_cdc_regmap;
-}
-
-static const struct snd_soc_codec_driver soc_codec_dev_msm8x16_wcd = {
-	.probe	= msm8x16_wcd_codec_probe,
-	.remove	= msm8x16_wcd_codec_remove,
-
-	.suspend = msm8x16_wcd_suspend,
-	.resume = msm8x16_wcd_resume,
-
-	.controls = msm8x16_wcd_snd_controls,
-	.num_controls = ARRAY_SIZE(msm8x16_wcd_snd_controls),
-	.dapm_widgets = msm8x16_wcd_dapm_widgets,
-	.num_dapm_widgets = ARRAY_SIZE(msm8x16_wcd_dapm_widgets),
-	.dapm_routes = audio_map,
-	.num_dapm_routes = ARRAY_SIZE(audio_map),
-	.get_regmap = msm89xx_pmic_cdc_get_regmap,
-};
-
-static int msm8x16_wcd_init_supplies(struct msm8x16_wcd *msm8x16,
-				struct msm8x16_wcd_pdata *pdata)
-{
-	int ret;
-	int i;
-
-	msm8x16->supplies = kzalloc(sizeof(struct regulator_bulk_data) *
-				   ARRAY_SIZE(pdata->regulator),
-				   GFP_KERNEL);
-	if (!msm8x16->supplies) {
-		ret = -ENOMEM;
-		goto err;
-	}
-
-	msm8x16->num_of_supplies = 0;
-
-	if (ARRAY_SIZE(pdata->regulator) > MAX_REGULATOR) {
-		dev_err(msm8x16->dev, "%s: Array Size out of bound\n",
-			__func__);
-		ret = -EINVAL;
-		goto err;
-	}
-
-	for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) {
-		if (pdata->regulator[i].name) {
-			msm8x16->supplies[i].supply = pdata->regulator[i].name;
-			msm8x16->num_of_supplies++;
-		}
-	}
-
-	ret = regulator_bulk_get(msm8x16->dev, msm8x16->num_of_supplies,
-				 msm8x16->supplies);
-	if (ret != 0) {
-		dev_err(msm8x16->dev, "Failed to get supplies: err = %d\n",
-							ret);
-		goto err_supplies;
-	}
-
-	for (i = 0; i < msm8x16->num_of_supplies; i++) {
-		if (regulator_count_voltages(msm8x16->supplies[i].consumer) <=
-			0)
-			continue;
-
-		ret = regulator_set_voltage(msm8x16->supplies[i].consumer,
-			pdata->regulator[i].min_uv,
-			pdata->regulator[i].max_uv);
-		if (ret) {
-			dev_err(msm8x16->dev, "Setting regulator voltage failed for regulator %s err = %d\n",
-				msm8x16->supplies[i].supply, ret);
-			goto err_get;
-		}
-
-		ret = regulator_set_load(msm8x16->supplies[i].consumer,
-			pdata->regulator[i].optimum_ua);
-		if (ret < 0) {
-			dev_err(msm8x16->dev, "Setting regulator optimum mode failed for regulator %s err = %d\n",
-				msm8x16->supplies[i].supply, ret);
-			goto err_get;
-		} else {
-			ret = 0;
-		}
-	}
-
-	return ret;
-
-err_get:
-	regulator_bulk_free(msm8x16->num_of_supplies, msm8x16->supplies);
-err_supplies:
-	kfree(msm8x16->supplies);
-err:
-	return ret;
-}
-
-static int msm8x16_wcd_enable_static_supplies(struct msm8x16_wcd *msm8x16,
-					  struct msm8x16_wcd_pdata *pdata)
-{
-	int i;
-	int ret = 0;
-
-	for (i = 0; i < msm8x16->num_of_supplies; i++) {
-		if (pdata->regulator[i].ondemand)
-			continue;
-		ret = regulator_enable(msm8x16->supplies[i].consumer);
-		if (ret) {
-			dev_err(msm8x16->dev, "Failed to enable %s\n",
-			       msm8x16->supplies[i].supply);
-			break;
-		}
-		dev_err(msm8x16->dev, "Enabled regulator %s\n",
-				 msm8x16->supplies[i].supply);
-	}
-
-	while (ret && --i)
-		if (!pdata->regulator[i].ondemand)
-			regulator_disable(msm8x16->supplies[i].consumer);
-
-	return ret;
-}
-
-
-
-static void msm8x16_wcd_disable_supplies(struct msm8x16_wcd *msm8x16,
-				     struct msm8x16_wcd_pdata *pdata)
-{
-	int i;
-
-	regulator_bulk_disable(msm8x16->num_of_supplies,
-				    msm8x16->supplies);
-	for (i = 0; i < msm8x16->num_of_supplies; i++) {
-		if (regulator_count_voltages(msm8x16->supplies[i].consumer) <=
-			0)
-			continue;
-		regulator_set_voltage(msm8x16->supplies[i].consumer, 0,
-			pdata->regulator[i].max_uv);
-		regulator_set_load(msm8x16->supplies[i].consumer, 0);
-	}
-	regulator_bulk_free(msm8x16->num_of_supplies, msm8x16->supplies);
-	kfree(msm8x16->supplies);
-}
-
-static struct snd_soc_dai_driver msm_codec_dais[] = {
-	{
-		.name = "msm-codec-rx",
-		.playback = { /* Support maximum range */
-			.stream_name = "Playback",
-			.channels_min = 1,
-			.channels_max = 2,
-			.rates = SNDRV_PCM_RATE_8000_48000,
-			.formats = SNDRV_PCM_FMTBIT_S16_LE,
-		},
-	},
-	{
-		.name = "msm-codec-tx",
-		.capture = { /* Support maximum range */
-			.stream_name = "Record",
-			.channels_min = 1,
-			.channels_max = 4,
-			.rates = SNDRV_PCM_RATE_8000_48000,
-			.formats = SNDRV_PCM_FMTBIT_S16_LE,
-		},
-	},
-};
-
-static struct regmap *msm89xx_codec_regmap;
-static struct regmap *msm89xx_codec_get_regmap(struct device *dev)
-{
-	return msm89xx_codec_regmap;
-}
-
-static struct snd_soc_codec_driver soc_msm89xx_codec = {
-	.probe	= msm89xx_digcodec_probe,
-	.remove	= msm89xx_digcodec_remove,
-	.get_regmap = msm89xx_codec_get_regmap,
-};
-
-static const struct of_device_id msm89xx_codec_of_match[] = {
-	{ .compatible = "qcom,msm-codec-core",
-	  .data = "msm_codec"},
-	{ .compatible = "qcom,pmic-codec-digital",
-	  .data = "pmic-digital-codec"},
-	{ .compatible = "qcom,pmic-codec-analog",
-	  .data = "pmic-analog-codec"},
-	{},
-};
-MODULE_DEVICE_TABLE(of, msm89xx_codec_of_match);
-
-static struct msm8x16_wcd *temp_89xx;
-static int msm89xx_codec_probe(struct platform_device *pdev)
-{
-	int ret = 0;
-	struct msm8x16_wcd *msm8x16 = NULL;
-	struct msm8x16_wcd_pdata *pdata;
-	int adsp_state;
-	static int dev_registered_cnt;
-	const struct of_device_id *match;
-	const char *addr_prop_name = "qcom,dig-cdc-base-addr";
-	u32 dig_cdc_addr;
-	char __iomem *dig_base;
-
-	adsp_state = apr_get_subsys_state();
-	if (adsp_state != APR_SUBSYS_LOADED) {
-		dev_err(&pdev->dev, "Adsp is not loaded yet %d\n",
-				adsp_state);
-		return -EPROBE_DEFER;
-	}
-
-	match = of_match_node(msm89xx_codec_of_match,
-			pdev->dev.of_node);
-
-	dev_dbg(&pdev->dev, "%s(%d):%s\n",
-		__func__, __LINE__, (char *)match->data);
-
-	if (!strcmp(match->data, "pmic-digital-codec")) {
-		device_init_wakeup(&pdev->dev, true);
-
-		if (pdev->dev.of_node) {
-			dev_err(&pdev->dev, "%s:Platform data from device tree\n",
-				__func__);
-			pdata = msm8x16_wcd_populate_dt_pdata(&pdev->dev);
-			pdev->dev.platform_data = pdata;
-		} else {
-			dev_err(&pdev->dev, "%s:Platform data from board file\n",
-				__func__);
-			pdata = pdev->dev.platform_data;
-		}
-		if (pdata == NULL) {
-			dev_err(&pdev->dev, "%s:Platform data failed to populate\n",
-				__func__);
-			goto rtn;
-		}
-		msm8x16 = kzalloc(sizeof(struct msm8x16_wcd), GFP_KERNEL);
-		if (msm8x16 == NULL) {
-			ret = -ENOMEM;
-			goto rtn;
-		}
-
-		msm8x16->dev = &pdev->dev;
-		ret = msm8x16_wcd_init_supplies(msm8x16, pdata);
-		if (ret) {
-			dev_err(&pdev->dev, "%s: Fail to enable Codec supplies\n",
-				__func__);
-			goto err_codec;
-		}
-
-		ret = msm8x16_wcd_enable_static_supplies(msm8x16, pdata);
-		if (ret) {
-			dev_err(&pdev->dev,
-				"%s: Fail to enable Codec pre-reset supplies\n",
-				   __func__);
-			goto err_codec;
-		}
-		usleep_range(5, 6);
-
-		mutex_init(&msm8x16->io_lock);
-		dev_set_drvdata(&pdev->dev, msm8x16);
-		temp_89xx = msm8x16;
-		dev_registered_cnt++;
-	} else if (!strcmp(match->data, "pmic-analog-codec")) {
-		if (wcd9xxx_spmi_irq_init()) {
-			dev_err(&pdev->dev,
-				"%s: irq initialization failed\n", __func__);
-		} else {
-			dev_err(&pdev->dev,
-				"%s: irq initialization passed\n", __func__);
-		}
-		dev_registered_cnt++;
-	} else if (!strcmp(match->data, "msm-codec")) {
-		ret = of_property_read_u32(pdev->dev.of_node, addr_prop_name,
-							&dig_cdc_addr);
-		if (ret) {
-			dev_err(&pdev->dev, "%s: could not find %s entry in dt\n",
-					__func__, addr_prop_name);
-			dig_cdc_addr = MSM89XX_DIGITAL_CODEC_BASE_ADDR;
-		}
-		dig_base = ioremap(dig_cdc_addr,
-				 MSM89XX_DIGITAL_CODEC_REG_SIZE);
-		if (dig_base == NULL) {
-			dev_err(&pdev->dev, "%s ioremap failed\n", __func__);
-			return -ENOMEM;
-		}
-		msm89xx_codec_regmap =
-			devm_regmap_init_mmio_clk(&pdev->dev, NULL,
-				dig_base, &msm89xx_cdc_core_regmap_config);
-		snd_soc_register_codec(&pdev->dev, &soc_msm89xx_codec,
-				msm_codec_dais, ARRAY_SIZE(msm_codec_dais));
-		dev_registered_cnt++;
-	}
-
-	if ((dev_registered_cnt == MAX_MSM89XX_DEVICE) && (!ret)) {
-		msm89xx_pmic_cdc_regmap =
-			devm_regmap_init_spmi_ext(
-				(struct spmi_device *) &pdev->dev.parent,
-				&msm89xx_pmic_cdc_regmap_config);
-		ret = snd_soc_register_codec(temp_89xx->dev,
-				&soc_codec_dev_msm8x16_wcd,
-				msm8x16_wcd_i2s_dai,
-				ARRAY_SIZE(msm8x16_wcd_i2s_dai));
-		if (ret) {
-			dev_err(&pdev->dev,
-			"%s:snd_soc_register_codec failed with error %d\n",
-			__func__, ret);
-			goto err_supplies;
-		}
-	}
-	return ret;
-err_supplies:
-	msm8x16_wcd_disable_supplies(msm8x16, pdata);
-err_codec:
-	kfree(msm8x16);
-rtn:
-	return ret;
-}
-
-static int msm89xx_codec_remove(struct platform_device *pdev)
-{
-	struct msm8x16_wcd *msm8x16 = dev_get_drvdata(&pdev->dev);
-
-	mutex_destroy(&msm8x16->io_lock);
-	kfree(msm8x16);
-
-	return 0;
-}
-
-static struct platform_driver msm_codec_driver = {
-	.driver                 = {
-		.owner          = THIS_MODULE,
-		.name           = DRV_NAME,
-		.of_match_table = of_match_ptr(msm89xx_codec_of_match)
-	},
-	.probe                  = msm89xx_codec_probe,
-	.remove                 = msm89xx_codec_remove,
-};
-module_platform_driver(msm_codec_driver);
-
-MODULE_DESCRIPTION("MSM89xx Audio codec driver");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/msm8x16/msm8x16-wcd.h b/sound/soc/codecs/msm8x16/msm8x16-wcd.h
deleted file mode 100644
index 45ebab2..0000000
--- a/sound/soc/codecs/msm8x16/msm8x16-wcd.h
+++ /dev/null
@@ -1,315 +0,0 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#ifndef MSM8X16_WCD_H
-#define MSM8X16_WCD_H
-
-#include <sound/soc.h>
-#include <sound/jack.h>
-#include <sound/q6afe-v2.h>
-#include "../wcd-mbhc-v2.h"
-#include "../wcdcal-hwdep.h"
-#include "msm8x16_wcd_registers.h"
-
-#define MICBIAS_EXT_BYP_CAP 0x00
-#define MICBIAS_NO_EXT_BYP_CAP 0x01
-
-#define MSM89XX_NUM_IRQ_REGS	2
-#define MAX_REGULATOR				7
-#define MSM89XX_REG_VAL(reg, val)		{reg, 0, val}
-#define MSM8X16_TOMBAK_LPASS_AUDIO_CORE_DIG_CODEC_CLK_SEL	0xFE03B004
-#define MSM8X16_TOMBAK_LPASS_DIGCODEC_CMD_RCGR			0x0181C09C
-#define MSM8X16_TOMBAK_LPASS_DIGCODEC_CFG_RCGR			0x0181C0A0
-#define MSM8X16_TOMBAK_LPASS_DIGCODEC_M				0x0181C0A4
-#define MSM8X16_TOMBAK_LPASS_DIGCODEC_N				0x0181C0A8
-#define MSM8X16_TOMBAK_LPASS_DIGCODEC_D				0x0181C0AC
-#define MSM8X16_TOMBAK_LPASS_DIGCODEC_CBCR			0x0181C0B0
-#define MSM8X16_TOMBAK_LPASS_DIGCODEC_AHB_CBCR			0x0181C0B4
-
-#define MSM8X16_CODEC_NAME "msm8x16_wcd_codec"
-
-#define MSM89XX_IS_CDC_CORE_REG(reg) \
-	(((reg >= 0x00) && (reg <= 0x3FF)) ? 1 : 0)
-#define MSM89XX_IS_PMIC_CDC_REG(reg) \
-	(((reg >= 0xF000) && (reg <= 0xF1FF)) ? 1 : 0)
-/*
- * MCLK activity indicators during suspend and resume call
- */
-#define MCLK_SUS_DIS	1
-#define MCLK_SUS_RSC	2
-#define MCLK_SUS_NO_ACT	3
-
-#define NUM_DECIMATORS	4
-#define MSM89XX_VDD_SPKDRV_NAME "cdc-vdd-spkdrv"
-
-#define DEFAULT_MULTIPLIER 800
-#define DEFAULT_GAIN 9
-#define DEFAULT_OFFSET 100
-
-extern const u8 msm89xx_pmic_cdc_reg_readable[MSM89XX_PMIC_CDC_CACHE_SIZE];
-extern const u8 msm89xx_cdc_core_reg_readable[MSM89XX_CDC_CORE_CACHE_SIZE];
-extern struct regmap_config msm89xx_cdc_core_regmap_config;
-extern struct regmap_config msm89xx_pmic_cdc_regmap_config;
-
-enum codec_versions {
-	TOMBAK_1_0,
-	TOMBAK_2_0,
-	CONGA,
-	CAJON,
-	CAJON_2_0,
-	DIANGU,
-	UNSUPPORTED,
-};
-
-/* Support different hph modes */
-enum {
-	NORMAL_MODE = 0,
-	HD2_MODE,
-};
-
-/* Codec supports 1 compander */
-enum {
-	COMPANDER_NONE = 0,
-	COMPANDER_1, /* HPHL/R */
-	COMPANDER_MAX,
-};
-
-enum wcd_curr_ref {
-	I_h4_UA = 0,
-	I_pt5_UA,
-	I_14_UA,
-	I_l4_UA,
-	I_1_UA,
-};
-
-enum wcd_mbhc_imp_det_pin {
-	WCD_MBHC_DET_NONE = 0,
-	WCD_MBHC_DET_HPHL,
-	WCD_MBHC_DET_HPHR,
-	WCD_MBHC_DET_BOTH,
-};
-
-
-/* Each micbias can be assigned to one of three cfilters
- * Vbatt_min >= .15V + ldoh_v
- * ldoh_v >= .15v + cfiltx_mv
- * If ldoh_v = 1.95 160 mv < cfiltx_mv < 1800 mv
- * If ldoh_v = 2.35 200 mv < cfiltx_mv < 2200 mv
- * If ldoh_v = 2.75 240 mv < cfiltx_mv < 2600 mv
- * If ldoh_v = 2.85 250 mv < cfiltx_mv < 2700 mv
- */
-
-struct wcd9xxx_micbias_setting {
-	u8 ldoh_v;
-	u32 cfilt1_mv; /* in mv */
-	u32 cfilt2_mv; /* in mv */
-	u32 cfilt3_mv; /* in mv */
-	/* Different WCD9xxx series codecs may not
-	 * have 4 mic biases. If a codec has fewer
-	 * mic biases, some of these properties will
-	 * not be used.
-	 */
-	u8 bias1_cfilt_sel;
-	u8 bias2_cfilt_sel;
-	u8 bias3_cfilt_sel;
-	u8 bias4_cfilt_sel;
-	u8 bias1_cap_mode;
-	u8 bias2_cap_mode;
-	u8 bias3_cap_mode;
-	u8 bias4_cap_mode;
-	bool bias2_is_headset_only;
-};
-
-enum msm8x16_wcd_pid_current {
-	MSM89XX_PID_MIC_2P5_UA,
-	MSM89XX_PID_MIC_5_UA,
-	MSM89XX_PID_MIC_10_UA,
-	MSM89XX_PID_MIC_20_UA,
-};
-
-struct msm8x16_wcd_reg_mask_val {
-	u16	reg;
-	u8	mask;
-	u8	val;
-};
-
-enum msm8x16_wcd_mbhc_analog_pwr_cfg {
-	MSM89XX_ANALOG_PWR_COLLAPSED = 0,
-	MSM89XX_ANALOG_PWR_ON,
-	MSM89XX_NUM_ANALOG_PWR_CONFIGS,
-};
-
-/* Number of input and output I2S port */
-enum {
-	MSM89XX_RX1 = 0,
-	MSM89XX_RX2,
-	MSM89XX_RX3,
-	MSM89XX_RX_MAX,
-};
-
-enum {
-	MSM89XX_TX1 = 0,
-	MSM89XX_TX2,
-	MSM89XX_TX3,
-	MSM89XX_TX4,
-	MSM89XX_TX_MAX,
-};
-
-enum {
-	/* INTR_REG 0 - Digital Periph */
-	MSM89XX_IRQ_SPKR_CNP = 0,
-	MSM89XX_IRQ_SPKR_CLIP,
-	MSM89XX_IRQ_SPKR_OCP,
-	MSM89XX_IRQ_MBHC_INSREM_DET1,
-	MSM89XX_IRQ_MBHC_RELEASE,
-	MSM89XX_IRQ_MBHC_PRESS,
-	MSM89XX_IRQ_MBHC_INSREM_DET,
-	MSM89XX_IRQ_MBHC_HS_DET,
-	/* INTR_REG 1 - Analog Periph */
-	MSM89XX_IRQ_EAR_OCP,
-	MSM89XX_IRQ_HPHR_OCP,
-	MSM89XX_IRQ_HPHL_OCP,
-	MSM89XX_IRQ_EAR_CNP,
-	MSM89XX_IRQ_HPHR_CNP,
-	MSM89XX_IRQ_HPHL_CNP,
-	MSM89XX_NUM_IRQS,
-};
-
-enum {
-	ON_DEMAND_MICBIAS = 0,
-	ON_DEMAND_SPKDRV,
-	ON_DEMAND_SUPPLIES_MAX,
-};
-
-/*
- * The delay list is per codec HW specification.
- * Please add delay in the list in the future instead
- * of magic number
- */
-enum {
-	CODEC_DELAY_1_MS = 1000,
-	CODEC_DELAY_1_1_MS  = 1100,
-};
-
-struct msm8x16_wcd_regulator {
-	const char *name;
-	int min_uv;
-	int max_uv;
-	int optimum_ua;
-	bool ondemand;
-	struct regulator *regulator;
-};
-
-struct on_demand_supply {
-	struct regulator *supply;
-	atomic_t ref;
-};
-
-struct wcd_imped_i_ref {
-	enum wcd_curr_ref curr_ref;
-	int min_val;
-	int multiplier;
-	int gain_adj;
-	int offset;
-};
-
-struct msm8x16_wcd_pdata {
-	int irq;
-	int irq_base;
-	int num_irqs;
-	int reset_gpio;
-	void *msm8x16_wcd_ahb_base_vaddr;
-	struct wcd9xxx_micbias_setting micbias;
-	struct msm8x16_wcd_regulator regulator[MAX_REGULATOR];
-	u32 mclk_rate;
-	u32 is_lpass;
-};
-
-enum msm8x16_wcd_micbias_num {
-	MSM89XX_MICBIAS1 = 0,
-};
-
-struct msm8x16_wcd {
-	struct device *dev;
-	struct mutex io_lock;
-	u8 version;
-
-	int reset_gpio;
-	int (*read_dev)(struct snd_soc_codec *codec,
-			unsigned short reg);
-	int (*write_dev)(struct snd_soc_codec *codec,
-			 unsigned short reg, u8 val);
-
-	u32 num_of_supplies;
-	struct regulator_bulk_data *supplies;
-
-	u8 idbyte[4];
-
-	int num_irqs;
-	u32 mclk_rate;
-};
-
-struct msm8x16_wcd_priv {
-	struct snd_soc_codec *codec;
-	u16 pmic_rev;
-	u16 codec_version;
-	u32 boost_voltage;
-	u32 adc_count;
-	u32 rx_bias_count;
-	s32 dmic_1_2_clk_cnt;
-	u32 mute_mask;
-	bool int_mclk0_enabled;
-	bool clock_active;
-	bool config_mode_active;
-	u16 boost_option;
-	/* mode to select hd2 */
-	u32 hph_mode;
-	/* compander used for each rx chain */
-	u32 comp_enabled[MSM89XX_RX_MAX];
-	bool spk_boost_set;
-	bool ear_pa_boost_set;
-	bool ext_spk_boost_set;
-	bool dec_active[NUM_DECIMATORS];
-	struct on_demand_supply on_demand_list[ON_DEMAND_SUPPLIES_MAX];
-	struct regulator *spkdrv_reg;
-	/* mbhc module */
-	struct wcd_mbhc mbhc;
-	/* cal info for codec */
-	struct fw_info *fw_data;
-	struct blocking_notifier_head notifier;
-	int (*codec_spk_ext_pa_cb)(struct snd_soc_codec *codec, int enable);
-	int (*codec_hph_comp_gpio)(bool enable);
-	unsigned long status_mask;
-	struct wcd_imped_i_ref imped_i_ref;
-	enum wcd_mbhc_imp_det_pin imped_det_pin;
-};
-
-extern int msm8x16_wcd_mclk_enable(struct snd_soc_codec *codec, int mclk_enable,
-			     bool dapm);
-
-extern int msm8x16_wcd_hs_detect(struct snd_soc_codec *codec,
-		    struct wcd_mbhc_config *mbhc_cfg);
-
-extern void msm8x16_wcd_hs_detect_exit(struct snd_soc_codec *codec);
-
-extern void msm8x16_update_int_spk_boost(bool enable);
-
-extern void msm8x16_wcd_spk_ext_pa_cb(
-		int (*codec_spk_ext_pa)(struct snd_soc_codec *codec,
-		int enable), struct snd_soc_codec *codec);
-
-extern void msm8x16_wcd_hph_comp_cb(
-		int (*codec_hph_comp_gpio)(bool enable),
-		struct snd_soc_codec *codec);
-void enable_digital_callback(void *flag);
-void disable_digital_callback(void *flag);
-
-#endif
diff --git a/sound/soc/codecs/msm_hdmi_codec_rx.c b/sound/soc/codecs/msm_hdmi_codec_rx.c
index cd5e707..46cfe7d 100644
--- a/sound/soc/codecs/msm_hdmi_codec_rx.c
+++ b/sound/soc/codecs/msm_hdmi_codec_rx.c
@@ -20,10 +20,17 @@
 #include <linux/msm_ext_display.h>
 
 #define MSM_EXT_DISP_PCM_RATES	SNDRV_PCM_RATE_48000
+#define AUD_EXT_DISP_ACK_DISCONNECT (AUDIO_ACK_CONNECT ^ AUDIO_ACK_CONNECT)
+#define AUD_EXT_DISP_ACK_CONNECT    (AUDIO_ACK_CONNECT)
+#define AUD_EXT_DISP_ACK_ENABLE     (AUDIO_ACK_SET_ENABLE | AUDIO_ACK_ENABLE)
 
 static const char *const ext_disp_audio_type_text[] = {"None", "HDMI", "DP"};
+static const char *const ext_disp_audio_ack_text[] = {"Disconnect",  "Connect",
+						      "Ack_Enable"};
 
 static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_audio_type, ext_disp_audio_type_text);
+static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_audio_ack_state,
+				ext_disp_audio_ack_text);
 
 struct msm_ext_disp_audio_codec_rx_data {
 	struct platform_device *ext_disp_core_pdev;
@@ -84,6 +91,15 @@
 	rc = codec_data->ext_disp_ops.get_audio_edid_blk(
 			codec_data->ext_disp_core_pdev, &edid_blk);
 	if (rc >= 0) {
+		if (sizeof(ucontrol->value.bytes.data) <
+			  (edid_blk.audio_data_blk_size +
+			   edid_blk.spk_alloc_data_blk_size)) {
+			dev_err(codec->dev,
+				"%s: Not enough memory to copy EDID data\n",
+				__func__);
+			return -ENOMEM;
+		}
+
 		memcpy(ucontrol->value.bytes.data,
 		       edid_blk.audio_data_blk,
 		       edid_blk.audio_data_blk_size);
@@ -166,6 +182,55 @@
 	return rc;
 }
 
+static int msm_ext_disp_audio_ack_set(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct msm_ext_disp_audio_codec_rx_data *codec_data;
+	u32 ack_state = 0;
+	int rc;
+
+	codec_data = snd_soc_codec_get_drvdata(codec);
+	if (!codec_data ||
+	    !codec_data->ext_disp_ops.acknowledge) {
+		dev_err(codec->dev,
+			"%s: codec_data or ops acknowledge() is NULL\n",
+			__func__);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	switch (ucontrol->value.enumerated.item[0]) {
+	case 0:
+		ack_state = AUD_EXT_DISP_ACK_DISCONNECT;
+		break;
+	case 1:
+		ack_state = AUD_EXT_DISP_ACK_CONNECT;
+		break;
+	case 2:
+		ack_state = AUD_EXT_DISP_ACK_ENABLE;
+		break;
+	default:
+		rc = -EINVAL;
+		dev_err(codec->dev,
+			"%s: invalid value %d for mixer ctl\n",
+			__func__, ucontrol->value.enumerated.item[0]);
+		goto done;
+	}
+	dev_dbg(codec->dev, "%s: control %d, ack set value 0x%x\n",
+		__func__, ucontrol->value.enumerated.item[0], ack_state);
+
+	rc = codec_data->ext_disp_ops.acknowledge(
+			 codec_data->ext_disp_core_pdev, ack_state);
+	if (rc < 0) {
+		dev_err(codec->dev, "%s: error from acknowledge(), err:%d\n",
+			__func__, rc);
+	}
+
+done:
+	return rc;
+}
+
 static const struct snd_kcontrol_new msm_ext_disp_codec_rx_controls[] = {
 	{
 		.access = SNDRV_CTL_ELEM_ACCESS_READ |
@@ -185,6 +250,8 @@
 	},
 	SOC_ENUM_EXT("External Display Type", ext_disp_audio_type,
 		     msm_ext_disp_audio_type_get, NULL),
+	SOC_ENUM_EXT("External Display Audio Ack", ext_disp_audio_ack_state,
+		     NULL, msm_ext_disp_audio_ack_set),
 };
 
 static int msm_ext_disp_audio_codec_rx_dai_startup(
diff --git a/sound/soc/codecs/msm_sdw/Kconfig b/sound/soc/codecs/msm_sdw/Kconfig
new file mode 100644
index 0000000..abd7c8c
--- /dev/null
+++ b/sound/soc/codecs/msm_sdw/Kconfig
@@ -0,0 +1,6 @@
+config SND_SOC_MSM_SDW
+	tristate "MSM Internal soundwire codec"
+	 help
+	 MSM-based soundwire codec core driver
+	 supported along with internal digital
+	 codec core.
diff --git a/sound/soc/codecs/msm_sdw/Makefile b/sound/soc/codecs/msm_sdw/Makefile
new file mode 100644
index 0000000..64e932b
--- /dev/null
+++ b/sound/soc/codecs/msm_sdw/Makefile
@@ -0,0 +1,3 @@
+snd-soc-msm-sdw-objs := msm_sdw_cdc.o msm_sdw_regmap.o msm-sdw-tables.o msm_sdw_cdc_utils.o
+obj-$(CONFIG_SND_SOC_MSM_SDW)	+= snd-soc-msm-sdw.o
+ccflags-y += -I$(srctree)/sound/soc/msm
diff --git a/sound/soc/codecs/msm_sdw/msm-sdw-tables.c b/sound/soc/codecs/msm_sdw/msm-sdw-tables.c
new file mode 100644
index 0000000..4cbdb72
--- /dev/null
+++ b/sound/soc/codecs/msm_sdw/msm-sdw-tables.c
@@ -0,0 +1,222 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include "msm_sdw.h"
+
+const u8 msm_sdw_page_map[MSM_SDW_MAX_REGISTER] = {
+	[MSM_SDW_TX9_SPKR_PROT_PATH_CTL] = 0xa,
+	[MSM_SDW_TX9_SPKR_PROT_PATH_CFG0] = 0xa,
+	[MSM_SDW_TX10_SPKR_PROT_PATH_CTL] = 0xa,
+	[MSM_SDW_TX10_SPKR_PROT_PATH_CFG0] = 0xa,
+	[MSM_SDW_TX11_SPKR_PROT_PATH_CTL] = 0xa,
+	[MSM_SDW_TX11_SPKR_PROT_PATH_CFG0] = 0xa,
+	[MSM_SDW_TX12_SPKR_PROT_PATH_CTL] = 0xa,
+	[MSM_SDW_TX12_SPKR_PROT_PATH_CFG0] = 0xa,
+	[MSM_SDW_COMPANDER7_CTL0] = 0xb,
+	[MSM_SDW_COMPANDER7_CTL1] = 0xb,
+	[MSM_SDW_COMPANDER7_CTL2] = 0xb,
+	[MSM_SDW_COMPANDER7_CTL3] = 0xb,
+	[MSM_SDW_COMPANDER7_CTL4] = 0xb,
+	[MSM_SDW_COMPANDER7_CTL5] = 0xb,
+	[MSM_SDW_COMPANDER7_CTL6] = 0xb,
+	[MSM_SDW_COMPANDER7_CTL7] = 0xb,
+	[MSM_SDW_COMPANDER8_CTL0] = 0xb,
+	[MSM_SDW_COMPANDER8_CTL1] = 0xb,
+	[MSM_SDW_COMPANDER8_CTL2] = 0xb,
+	[MSM_SDW_COMPANDER8_CTL3] = 0xb,
+	[MSM_SDW_COMPANDER8_CTL4] = 0xb,
+	[MSM_SDW_COMPANDER8_CTL5] = 0xb,
+	[MSM_SDW_COMPANDER8_CTL6] = 0xb,
+	[MSM_SDW_COMPANDER8_CTL7] = 0xb,
+	[MSM_SDW_RX7_RX_PATH_CTL] = 0xb,
+	[MSM_SDW_RX7_RX_PATH_CFG0] = 0xb,
+	[MSM_SDW_RX7_RX_PATH_CFG1] = 0xb,
+	[MSM_SDW_RX7_RX_PATH_CFG2] = 0xb,
+	[MSM_SDW_RX7_RX_VOL_CTL] = 0xb,
+	[MSM_SDW_RX7_RX_PATH_MIX_CTL] = 0xb,
+	[MSM_SDW_RX7_RX_PATH_MIX_CFG] = 0xb,
+	[MSM_SDW_RX7_RX_VOL_MIX_CTL] = 0xb,
+	[MSM_SDW_RX7_RX_PATH_SEC0] = 0xb,
+	[MSM_SDW_RX7_RX_PATH_SEC1] = 0xb,
+	[MSM_SDW_RX7_RX_PATH_SEC2] = 0xb,
+	[MSM_SDW_RX7_RX_PATH_SEC3] = 0xb,
+	[MSM_SDW_RX7_RX_PATH_SEC5] = 0xb,
+	[MSM_SDW_RX7_RX_PATH_SEC6] = 0xb,
+	[MSM_SDW_RX7_RX_PATH_SEC7] = 0xb,
+	[MSM_SDW_RX7_RX_PATH_MIX_SEC0] = 0xb,
+	[MSM_SDW_RX7_RX_PATH_MIX_SEC1] = 0xb,
+	[MSM_SDW_RX8_RX_PATH_CTL] = 0xb,
+	[MSM_SDW_RX8_RX_PATH_CFG0] = 0xb,
+	[MSM_SDW_RX8_RX_PATH_CFG1] = 0xb,
+	[MSM_SDW_RX8_RX_PATH_CFG2] = 0xb,
+	[MSM_SDW_RX8_RX_VOL_CTL] = 0xb,
+	[MSM_SDW_RX8_RX_PATH_MIX_CTL] = 0xb,
+	[MSM_SDW_RX8_RX_PATH_MIX_CFG] = 0xb,
+	[MSM_SDW_RX8_RX_VOL_MIX_CTL] = 0xb,
+	[MSM_SDW_RX8_RX_PATH_SEC0] = 0xb,
+	[MSM_SDW_RX8_RX_PATH_SEC1] = 0xb,
+	[MSM_SDW_RX8_RX_PATH_SEC2] = 0xb,
+	[MSM_SDW_RX8_RX_PATH_SEC3] = 0xb,
+	[MSM_SDW_RX8_RX_PATH_SEC5] = 0xb,
+	[MSM_SDW_RX8_RX_PATH_SEC6] = 0xb,
+	[MSM_SDW_RX8_RX_PATH_SEC7] = 0xb,
+	[MSM_SDW_RX8_RX_PATH_MIX_SEC0] = 0xb,
+	[MSM_SDW_RX8_RX_PATH_MIX_SEC1] = 0xb,
+	[MSM_SDW_BOOST0_BOOST_PATH_CTL] = 0xc,
+	[MSM_SDW_BOOST0_BOOST_CTL] = 0xc,
+	[MSM_SDW_BOOST0_BOOST_CFG1] = 0xc,
+	[MSM_SDW_BOOST0_BOOST_CFG2] = 0xc,
+	[MSM_SDW_BOOST1_BOOST_PATH_CTL] = 0xc,
+	[MSM_SDW_BOOST1_BOOST_CTL] = 0xc,
+	[MSM_SDW_BOOST1_BOOST_CFG1] = 0xc,
+	[MSM_SDW_BOOST1_BOOST_CFG2] = 0xc,
+	[MSM_SDW_AHB_BRIDGE_WR_DATA_0] = 0xc,
+	[MSM_SDW_AHB_BRIDGE_WR_DATA_1] = 0xc,
+	[MSM_SDW_AHB_BRIDGE_WR_DATA_2] = 0xc,
+	[MSM_SDW_AHB_BRIDGE_WR_DATA_3] = 0xc,
+	[MSM_SDW_AHB_BRIDGE_WR_ADDR_0] = 0xc,
+	[MSM_SDW_AHB_BRIDGE_WR_ADDR_1] = 0xc,
+	[MSM_SDW_AHB_BRIDGE_WR_ADDR_2] = 0xc,
+	[MSM_SDW_AHB_BRIDGE_WR_ADDR_3] = 0xc,
+	[MSM_SDW_AHB_BRIDGE_RD_ADDR_0] = 0xc,
+	[MSM_SDW_AHB_BRIDGE_RD_ADDR_1] = 0xc,
+	[MSM_SDW_AHB_BRIDGE_RD_ADDR_2] = 0xc,
+	[MSM_SDW_AHB_BRIDGE_RD_ADDR_3] = 0xc,
+	[MSM_SDW_AHB_BRIDGE_RD_DATA_0] = 0xc,
+	[MSM_SDW_AHB_BRIDGE_RD_DATA_1] = 0xc,
+	[MSM_SDW_AHB_BRIDGE_RD_DATA_2] = 0xc,
+	[MSM_SDW_AHB_BRIDGE_RD_DATA_3] = 0xc,
+	[MSM_SDW_AHB_BRIDGE_ACCESS_CFG] = 0xc,
+	[MSM_SDW_AHB_BRIDGE_ACCESS_STATUS] = 0xc,
+	[MSM_SDW_CLK_RST_CTRL_MCLK_CONTROL] = 0xd,
+	[MSM_SDW_CLK_RST_CTRL_FS_CNT_CONTROL] = 0xd,
+	[MSM_SDW_CLK_RST_CTRL_SWR_CONTROL] = 0xd,
+	[MSM_SDW_TOP_TOP_CFG0] = 0xd,
+	[MSM_SDW_TOP_TOP_CFG1] = 0xd,
+	[MSM_SDW_TOP_RX_I2S_CTL] = 0xd,
+	[MSM_SDW_TOP_TX_I2S_CTL] = 0xd,
+	[MSM_SDW_TOP_I2S_CLK] = 0xd,
+	[MSM_SDW_TOP_RX7_PATH_INPUT0_MUX] = 0xd,
+	[MSM_SDW_TOP_RX7_PATH_INPUT1_MUX] = 0xd,
+	[MSM_SDW_TOP_RX8_PATH_INPUT0_MUX] = 0xd,
+	[MSM_SDW_TOP_RX8_PATH_INPUT1_MUX] = 0xd,
+	[MSM_SDW_TOP_FREQ_MCLK] = 0xd,
+	[MSM_SDW_TOP_DEBUG_BUS_SEL] = 0xd,
+	[MSM_SDW_TOP_DEBUG_EN] = 0xd,
+	[MSM_SDW_TOP_I2S_RESET] = 0xd,
+	[MSM_SDW_TOP_BLOCKS_RESET] = 0xd,
+};
+
+const u8 msm_sdw_reg_readable[MSM_SDW_MAX_REGISTER] = {
+	[MSM_SDW_PAGE_REGISTER] = 1,
+	[MSM_SDW_TX9_SPKR_PROT_PATH_CTL] = 1,
+	[MSM_SDW_TX9_SPKR_PROT_PATH_CFG0] = 1,
+	[MSM_SDW_TX10_SPKR_PROT_PATH_CTL] = 1,
+	[MSM_SDW_TX10_SPKR_PROT_PATH_CFG0] = 1,
+	[MSM_SDW_TX11_SPKR_PROT_PATH_CTL] = 1,
+	[MSM_SDW_TX11_SPKR_PROT_PATH_CFG0] = 1,
+	[MSM_SDW_TX12_SPKR_PROT_PATH_CTL] = 1,
+	[MSM_SDW_TX12_SPKR_PROT_PATH_CFG0] = 1,
+	[MSM_SDW_COMPANDER7_CTL0] = 1,
+	[MSM_SDW_COMPANDER7_CTL1] = 1,
+	[MSM_SDW_COMPANDER7_CTL2] = 1,
+	[MSM_SDW_COMPANDER7_CTL3] = 1,
+	[MSM_SDW_COMPANDER7_CTL4] = 1,
+	[MSM_SDW_COMPANDER7_CTL5] = 1,
+	[MSM_SDW_COMPANDER7_CTL6] = 1,
+	[MSM_SDW_COMPANDER7_CTL7] = 1,
+	[MSM_SDW_COMPANDER8_CTL0] = 1,
+	[MSM_SDW_COMPANDER8_CTL1] = 1,
+	[MSM_SDW_COMPANDER8_CTL2] = 1,
+	[MSM_SDW_COMPANDER8_CTL3] = 1,
+	[MSM_SDW_COMPANDER8_CTL4] = 1,
+	[MSM_SDW_COMPANDER8_CTL5] = 1,
+	[MSM_SDW_COMPANDER8_CTL6] = 1,
+	[MSM_SDW_COMPANDER8_CTL7] = 1,
+	[MSM_SDW_RX7_RX_PATH_CTL] = 1,
+	[MSM_SDW_RX7_RX_PATH_CFG0] = 1,
+	[MSM_SDW_RX7_RX_PATH_CFG1] = 1,
+	[MSM_SDW_RX7_RX_PATH_CFG2] = 1,
+	[MSM_SDW_RX7_RX_VOL_CTL] = 1,
+	[MSM_SDW_RX7_RX_PATH_MIX_CTL] = 1,
+	[MSM_SDW_RX7_RX_PATH_MIX_CFG] = 1,
+	[MSM_SDW_RX7_RX_VOL_MIX_CTL] = 1,
+	[MSM_SDW_RX7_RX_PATH_SEC0] = 1,
+	[MSM_SDW_RX7_RX_PATH_SEC1] = 1,
+	[MSM_SDW_RX7_RX_PATH_SEC2] = 1,
+	[MSM_SDW_RX7_RX_PATH_SEC3] = 1,
+	[MSM_SDW_RX7_RX_PATH_SEC5] = 1,
+	[MSM_SDW_RX7_RX_PATH_SEC6] = 1,
+	[MSM_SDW_RX7_RX_PATH_SEC7] = 1,
+	[MSM_SDW_RX7_RX_PATH_MIX_SEC0] = 1,
+	[MSM_SDW_RX7_RX_PATH_MIX_SEC1] = 1,
+	[MSM_SDW_RX8_RX_PATH_CTL] = 1,
+	[MSM_SDW_RX8_RX_PATH_CFG0] = 1,
+	[MSM_SDW_RX8_RX_PATH_CFG1] = 1,
+	[MSM_SDW_RX8_RX_PATH_CFG2] = 1,
+	[MSM_SDW_RX8_RX_VOL_CTL] = 1,
+	[MSM_SDW_RX8_RX_PATH_MIX_CTL] = 1,
+	[MSM_SDW_RX8_RX_PATH_MIX_CFG] = 1,
+	[MSM_SDW_RX8_RX_VOL_MIX_CTL] = 1,
+	[MSM_SDW_RX8_RX_PATH_SEC0] = 1,
+	[MSM_SDW_RX8_RX_PATH_SEC1] = 1,
+	[MSM_SDW_RX8_RX_PATH_SEC2] = 1,
+	[MSM_SDW_RX8_RX_PATH_SEC3] = 1,
+	[MSM_SDW_RX8_RX_PATH_SEC5] = 1,
+	[MSM_SDW_RX8_RX_PATH_SEC6] = 1,
+	[MSM_SDW_RX8_RX_PATH_SEC7] = 1,
+	[MSM_SDW_RX8_RX_PATH_MIX_SEC0] = 1,
+	[MSM_SDW_RX8_RX_PATH_MIX_SEC1] = 1,
+	[MSM_SDW_BOOST0_BOOST_PATH_CTL] = 1,
+	[MSM_SDW_BOOST0_BOOST_CTL] = 1,
+	[MSM_SDW_BOOST0_BOOST_CFG1] = 1,
+	[MSM_SDW_BOOST0_BOOST_CFG2] = 1,
+	[MSM_SDW_BOOST1_BOOST_PATH_CTL] = 1,
+	[MSM_SDW_BOOST1_BOOST_CTL] = 1,
+	[MSM_SDW_BOOST1_BOOST_CFG1] = 1,
+	[MSM_SDW_BOOST1_BOOST_CFG2] = 1,
+	[MSM_SDW_AHB_BRIDGE_WR_DATA_0] = 1,
+	[MSM_SDW_AHB_BRIDGE_WR_DATA_1] = 1,
+	[MSM_SDW_AHB_BRIDGE_WR_DATA_2] = 1,
+	[MSM_SDW_AHB_BRIDGE_WR_DATA_3] = 1,
+	[MSM_SDW_AHB_BRIDGE_WR_ADDR_0] = 1,
+	[MSM_SDW_AHB_BRIDGE_WR_ADDR_1] = 1,
+	[MSM_SDW_AHB_BRIDGE_WR_ADDR_2] = 1,
+	[MSM_SDW_AHB_BRIDGE_WR_ADDR_3] = 1,
+	[MSM_SDW_AHB_BRIDGE_RD_ADDR_0] = 1,
+	[MSM_SDW_AHB_BRIDGE_RD_ADDR_1] = 1,
+	[MSM_SDW_AHB_BRIDGE_RD_ADDR_2] = 1,
+	[MSM_SDW_AHB_BRIDGE_RD_ADDR_3] = 1,
+	[MSM_SDW_AHB_BRIDGE_RD_DATA_0] = 1,
+	[MSM_SDW_AHB_BRIDGE_RD_DATA_1] = 1,
+	[MSM_SDW_AHB_BRIDGE_RD_DATA_2] = 1,
+	[MSM_SDW_AHB_BRIDGE_RD_DATA_3] = 1,
+	[MSM_SDW_AHB_BRIDGE_ACCESS_CFG] = 1,
+	[MSM_SDW_AHB_BRIDGE_ACCESS_STATUS] = 1,
+	[MSM_SDW_CLK_RST_CTRL_MCLK_CONTROL] = 1,
+	[MSM_SDW_CLK_RST_CTRL_FS_CNT_CONTROL] = 1,
+	[MSM_SDW_CLK_RST_CTRL_SWR_CONTROL] = 1,
+	[MSM_SDW_TOP_TOP_CFG0] = 1,
+	[MSM_SDW_TOP_TOP_CFG1] = 1,
+	[MSM_SDW_TOP_RX_I2S_CTL] = 1,
+	[MSM_SDW_TOP_TX_I2S_CTL] = 1,
+	[MSM_SDW_TOP_RX7_PATH_INPUT0_MUX] = 1,
+	[MSM_SDW_TOP_RX7_PATH_INPUT1_MUX] = 1,
+	[MSM_SDW_TOP_RX8_PATH_INPUT0_MUX] = 1,
+	[MSM_SDW_TOP_RX8_PATH_INPUT1_MUX] = 1,
+	[MSM_SDW_TOP_FREQ_MCLK] = 1,
+	[MSM_SDW_TOP_DEBUG_BUS_SEL] = 1,
+	[MSM_SDW_TOP_DEBUG_EN] = 1,
+	[MSM_SDW_TOP_I2S_RESET] = 1,
+	[MSM_SDW_TOP_BLOCKS_RESET] = 1,
+};
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw.h b/sound/soc/codecs/msm_sdw/msm_sdw.h
new file mode 100644
index 0000000..3691e84
--- /dev/null
+++ b/sound/soc/codecs/msm_sdw/msm_sdw.h
@@ -0,0 +1,169 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef MSM_SDW_H
+#define MSM_SDW_H
+
+#include <sound/soc.h>
+#include <sound/q6afe-v2.h>
+#include "msm_sdw_registers.h"
+
+#define MSM_SDW_MAX_REGISTER 0x400
+
+extern const struct regmap_config msm_sdw_regmap_config;
+extern const u8 msm_sdw_page_map[MSM_SDW_MAX_REGISTER];
+extern const u8 msm_sdw_reg_readable[MSM_SDW_MAX_REGISTER];
+
+enum {
+	MSM_SDW_RX4 = 0,
+	MSM_SDW_RX5,
+	MSM_SDW_RX_MAX,
+};
+
+enum {
+	MSM_SDW_TX0 = 0,
+	MSM_SDW_TX1,
+	MSM_SDW_TX_MAX,
+};
+
+enum {
+	COMP1, /* SPK_L */
+	COMP2, /* SPK_R */
+	COMP_MAX
+};
+
+/*
+ * Structure used to update codec
+ * register defaults after reset
+ */
+struct msm_sdw_reg_mask_val {
+	u16 reg;
+	u8 mask;
+	u8 val;
+};
+
+/*
+ * Selects compander and smart boost settings
+ * for a given speaker mode
+ */
+enum {
+	SPKR_MODE_DEFAULT,
+	SPKR_MODE_1,          /* COMP Gain = 12dB, Smartboost Max = 5.5V */
+};
+
+/* Rx path gain offsets */
+enum {
+	RX_GAIN_OFFSET_M1P5_DB,
+	RX_GAIN_OFFSET_0_DB,
+};
+
+struct msm_sdw_reg_val {
+	unsigned short reg; /* register address */
+	u8 *buf;            /* buffer to be written to reg. addr */
+	int bytes;          /* number of bytes to be written */
+};
+
+/* Hold instance to soundwire platform device */
+struct msm_sdw_ctrl_data {
+	struct platform_device *sdw_pdev;
+};
+
+struct wcd_sdw_ctrl_platform_data {
+	void *handle; /* holds codec private data */
+	int (*read)(void *handle, int reg);
+	int (*write)(void *handle, int reg, int val);
+	int (*bulk_write)(void *handle, u32 *reg, u32 *val, size_t len);
+	int (*clk)(void *handle, bool enable);
+	int (*handle_irq)(void *handle,
+			  irqreturn_t (*swrm_irq_handler)(int irq,
+							  void *data),
+			  void *swrm_handle,
+			  int action);
+};
+
+struct msm_sdw_priv {
+	struct device *dev;
+	struct mutex io_lock;
+
+	int (*read_dev)(struct msm_sdw_priv *msm_sdw, unsigned short reg,
+			int bytes, void *dest);
+	int (*write_dev)(struct msm_sdw_priv *msm_sdw, unsigned short reg,
+			 int bytes, void *src);
+	int (*multi_reg_write)(struct msm_sdw_priv *msm_sdw, const void *data,
+			       size_t count);
+	struct snd_soc_codec *codec;
+	struct device_node *sdw_gpio_p; /* used by pinctrl API */
+	/* SoundWire data structure */
+	struct msm_sdw_ctrl_data *sdw_ctrl_data;
+	int nr;
+
+	/* compander */
+	int comp_enabled[COMP_MAX];
+	int ear_spkr_gain;
+
+	/* to track the status */
+	unsigned long status_mask;
+
+	struct work_struct msm_sdw_add_child_devices_work;
+	struct wcd_sdw_ctrl_platform_data sdw_plat_data;
+
+	unsigned int vi_feed_value;
+
+	struct mutex sdw_read_lock;
+	struct mutex sdw_write_lock;
+	struct mutex sdw_clk_lock;
+	int sdw_clk_users;
+	int sdw_mclk_users;
+
+	int sdw_irq;
+	int int_mclk1_rsc_ref;
+	bool int_mclk1_enabled;
+	bool sdw_npl_clk_enabled;
+	struct mutex cdc_int_mclk1_mutex;
+	struct mutex sdw_npl_clk_mutex;
+	struct delayed_work disable_int_mclk1_work;
+	struct afe_clk_set sdw_cdc_core_clk;
+	struct afe_clk_set sdw_npl_clk;
+	struct notifier_block service_nb;
+	int (*sdw_cdc_gpio_fn)(bool enable, struct snd_soc_codec *codec);
+	bool dev_up;
+
+	int spkr_gain_offset;
+	int spkr_mode;
+	struct mutex codec_mutex;
+	int rx_4_count;
+	int rx_5_count;
+	u32 mclk_rate;
+	struct regmap *regmap;
+
+	bool prev_pg_valid;
+	u8 prev_pg;
+	u32 sdw_base_addr;
+	char __iomem *sdw_base;
+	u32 version;
+
+	/* Entry for version info */
+	struct snd_info_entry *entry;
+	struct snd_info_entry *version_entry;
+};
+
+extern int msm_sdw_set_spkr_mode(struct snd_soc_codec *codec, int mode);
+extern int msm_sdw_set_spkr_gain_offset(struct snd_soc_codec *codec,
+					int offset);
+extern void msm_sdw_gpio_cb(
+	int (*sdw_cdc_gpio_fn)(bool enable, struct snd_soc_codec *codec),
+	struct snd_soc_codec *codec);
+extern struct regmap *msm_sdw_regmap_init(struct device *dev,
+					  const struct regmap_config *config);
+extern int msm_sdw_codec_info_create_codec_entry(
+	struct snd_info_entry *codec_root,
+	struct snd_soc_codec *codec);
+#endif
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
new file mode 100644
index 0000000..502aa4f
--- /dev/null
+++ b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
@@ -0,0 +1,1991 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/msm-cdc-pinctrl.h>
+#include <linux/printk.h>
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/qdsp6v2/apr.h>
+#include <linux/soundwire/swr-wcd.h>
+#include <linux/qdsp6v2/audio_notifier.h>
+#include <sound/apr_audio-v2.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/q6core.h>
+#include <sound/tlv.h>
+#include "msm_sdw.h"
+#include "msm_sdw_registers.h"
+
+#define MSM_SDW_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+			SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000)
+#define MSM_SDW_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+		SNDRV_PCM_FMTBIT_S24_LE |\
+		SNDRV_PCM_FMTBIT_S24_3LE)
+
+#define MSM_SDW_STRING_LEN 80
+
+#define INT_MCLK1_FREQ 9600000
+#define SDW_NPL_FREQ 153600000
+
+#define MSM_SDW_VERSION_1_0 0x0001
+#define MSM_SDW_VERSION_ENTRY_SIZE 32
+
+/*
+ * 200 Milliseconds sufficient for DSP bring up in the modem
+ * after Sub System Restart
+ */
+#define ADSP_STATE_READY_TIMEOUT_MS 200
+
+static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
+static struct snd_soc_dai_driver msm_sdw_dai[];
+static bool skip_irq = true;
+
+static int msm_sdw_config_ear_spkr_gain(struct snd_soc_codec *codec,
+					int event, int gain_reg);
+static int msm_sdw_config_compander(struct snd_soc_codec *, int, int);
+static int msm_sdw_mclk_enable(struct msm_sdw_priv *msm_sdw,
+			       int mclk_enable, bool dapm);
+static int msm_int_enable_sdw_cdc_clk(struct msm_sdw_priv *msm_sdw,
+				      int enable, bool dapm);
+
+enum {
+	VI_SENSE_1,
+	VI_SENSE_2,
+};
+
+enum {
+	AIF1_SDW_PB = 0,
+	AIF1_SDW_VIFEED,
+	NUM_CODEC_DAIS,
+};
+
+static const struct msm_sdw_reg_mask_val msm_sdw_spkr_default[] = {
+	{MSM_SDW_COMPANDER7_CTL3, 0x80, 0x80},
+	{MSM_SDW_COMPANDER8_CTL3, 0x80, 0x80},
+	{MSM_SDW_COMPANDER7_CTL7, 0x01, 0x01},
+	{MSM_SDW_COMPANDER8_CTL7, 0x01, 0x01},
+	{MSM_SDW_BOOST0_BOOST_CTL, 0x7C, 0x50},
+	{MSM_SDW_BOOST1_BOOST_CTL, 0x7C, 0x50},
+};
+
+static const struct msm_sdw_reg_mask_val msm_sdw_spkr_mode1[] = {
+	{MSM_SDW_COMPANDER7_CTL3, 0x80, 0x00},
+	{MSM_SDW_COMPANDER8_CTL3, 0x80, 0x00},
+	{MSM_SDW_COMPANDER7_CTL7, 0x01, 0x00},
+	{MSM_SDW_COMPANDER8_CTL7, 0x01, 0x00},
+	{MSM_SDW_BOOST0_BOOST_CTL, 0x7C, 0x44},
+	{MSM_SDW_BOOST1_BOOST_CTL, 0x7C, 0x44},
+};
+
+/**
+ * msm_sdw_set_spkr_gain_offset - offset the speaker path
+ * gain with the given offset value.
+ *
+ * @codec: codec instance
+ * @offset: Indicates speaker path gain offset value.
+ *
+ * Returns 0 on success or -EINVAL on error.
+ */
+int msm_sdw_set_spkr_gain_offset(struct snd_soc_codec *codec, int offset)
+{
+	struct msm_sdw_priv *priv;
+
+	if (!codec) {
+		pr_err("%s: NULL codec pointer!\n", __func__);
+		return -EINVAL;
+	}
+
+	priv = snd_soc_codec_get_drvdata(codec);
+	if (!priv)
+		return -EINVAL;
+
+	priv->spkr_gain_offset = offset;
+	return 0;
+}
+EXPORT_SYMBOL(msm_sdw_set_spkr_gain_offset);
+
+/**
+ * msm_sdw_set_spkr_mode - Configures speaker compander and smartboost
+ * settings based on speaker mode.
+ *
+ * @codec: codec instance
+ * @mode: Indicates speaker configuration mode.
+ *
+ * Returns 0 on success or -EINVAL on error.
+ */
+int msm_sdw_set_spkr_mode(struct snd_soc_codec *codec, int mode)
+{
+	struct msm_sdw_priv *priv;
+	int i;
+	const struct msm_sdw_reg_mask_val *regs;
+	int size;
+
+	if (!codec) {
+		pr_err("%s: NULL codec pointer!\n", __func__);
+		return -EINVAL;
+	}
+
+	priv = snd_soc_codec_get_drvdata(codec);
+	if (!priv)
+		return -EINVAL;
+
+	switch (mode) {
+	case SPKR_MODE_1:
+		regs = msm_sdw_spkr_mode1;
+		size = ARRAY_SIZE(msm_sdw_spkr_mode1);
+		break;
+	default:
+		regs = msm_sdw_spkr_default;
+		size = ARRAY_SIZE(msm_sdw_spkr_default);
+		break;
+	}
+
+	priv->spkr_mode = mode;
+	for (i = 0; i < size; i++)
+		snd_soc_update_bits(codec, regs[i].reg,
+				    regs[i].mask, regs[i].val);
+	return 0;
+}
+EXPORT_SYMBOL(msm_sdw_set_spkr_mode);
+
+static int msm_enable_sdw_npl_clk(struct msm_sdw_priv *msm_sdw, int enable)
+{
+	int ret = 0;
+
+	dev_dbg(msm_sdw->dev, "%s: enable %d\n", __func__, enable);
+
+	mutex_lock(&msm_sdw->sdw_npl_clk_mutex);
+	if (enable) {
+		if (msm_sdw->sdw_npl_clk_enabled == false) {
+			msm_sdw->sdw_npl_clk.enable = 1;
+			ret = afe_set_lpass_clock_v2(
+				AFE_PORT_ID_INT4_MI2S_RX,
+				&msm_sdw->sdw_npl_clk);
+			if (ret < 0) {
+				dev_err(msm_sdw->dev,
+					"%s: failed to enable SDW NPL CLK\n",
+					__func__);
+				mutex_unlock(&msm_sdw->sdw_npl_clk_mutex);
+				return ret;
+			}
+			dev_dbg(msm_sdw->dev, "enabled sdw npl clk\n");
+			msm_sdw->sdw_npl_clk_enabled = true;
+		}
+	} else {
+		if (msm_sdw->sdw_npl_clk_enabled == true) {
+			msm_sdw->sdw_npl_clk.enable = 0;
+			ret = afe_set_lpass_clock_v2(
+				AFE_PORT_ID_INT4_MI2S_RX,
+				&msm_sdw->sdw_npl_clk);
+			if (ret < 0)
+				dev_err(msm_sdw->dev,
+					"%s: failed to disable SDW NPL CLK\n",
+					__func__);
+			msm_sdw->sdw_npl_clk_enabled = false;
+		}
+	}
+	mutex_unlock(&msm_sdw->sdw_npl_clk_mutex);
+	return ret;
+}
+
+static int msm_int_enable_sdw_cdc_clk(struct msm_sdw_priv *msm_sdw,
+				      int enable, bool dapm)
+{
+	int ret = 0;
+
+	mutex_lock(&msm_sdw->cdc_int_mclk1_mutex);
+	dev_dbg(msm_sdw->dev, "%s: enable %d mclk1 ref counter %d\n",
+		__func__, enable, msm_sdw->int_mclk1_rsc_ref);
+	if (enable) {
+		if (msm_sdw->int_mclk1_rsc_ref == 0) {
+			cancel_delayed_work_sync(
+					&msm_sdw->disable_int_mclk1_work);
+			if (msm_sdw->int_mclk1_enabled == false) {
+				msm_sdw->sdw_cdc_core_clk.enable = 1;
+				ret = afe_set_lpass_clock_v2(
+					AFE_PORT_ID_INT4_MI2S_RX,
+					&msm_sdw->sdw_cdc_core_clk);
+				if (ret < 0) {
+					dev_err(msm_sdw->dev,
+						"%s: failed to enable SDW MCLK\n",
+						__func__);
+					goto rtn;
+				}
+				dev_dbg(msm_sdw->dev,
+					"enabled sdw codec core mclk\n");
+				msm_sdw->int_mclk1_enabled = true;
+			}
+		}
+		msm_sdw->int_mclk1_rsc_ref++;
+	} else {
+		cancel_delayed_work_sync(&msm_sdw->disable_int_mclk1_work);
+		if (msm_sdw->int_mclk1_rsc_ref > 0) {
+			msm_sdw->int_mclk1_rsc_ref--;
+			dev_dbg(msm_sdw->dev,
+				"%s: decrementing mclk_res_ref %d\n",
+				 __func__, msm_sdw->int_mclk1_rsc_ref);
+		}
+		if (msm_sdw->int_mclk1_enabled == true &&
+			msm_sdw->int_mclk1_rsc_ref == 0) {
+			msm_sdw->sdw_cdc_core_clk.enable = 0;
+			ret = afe_set_lpass_clock_v2(
+				AFE_PORT_ID_INT4_MI2S_RX,
+				&msm_sdw->sdw_cdc_core_clk);
+			if (ret < 0)
+				dev_err(msm_sdw->dev,
+					"%s: failed to disable SDW MCLK\n",
+					__func__);
+			msm_sdw->int_mclk1_enabled = false;
+		}
+	}
+	mutex_unlock(&msm_sdw->cdc_int_mclk1_mutex);
+rtn:
+	return ret;
+}
+EXPORT_SYMBOL(msm_int_enable_sdw_cdc_clk);
+
+static void msm_disable_int_mclk1(struct work_struct *work)
+{
+	struct msm_sdw_priv *msm_sdw = NULL;
+	struct delayed_work *dwork;
+	int ret = 0;
+
+	dwork = to_delayed_work(work);
+	msm_sdw = container_of(dwork, struct msm_sdw_priv,
+			disable_int_mclk1_work);
+	mutex_lock(&msm_sdw->cdc_int_mclk1_mutex);
+	dev_dbg(msm_sdw->dev, "%s: mclk1_enabled %d mclk1_rsc_ref %d\n",
+		__func__, msm_sdw->int_mclk1_enabled,
+		msm_sdw->int_mclk1_rsc_ref);
+	if (msm_sdw->int_mclk1_enabled == true
+			&& msm_sdw->int_mclk1_rsc_ref == 0) {
+		dev_dbg(msm_sdw->dev, "Disable the mclk1\n");
+		msm_sdw->sdw_cdc_core_clk.enable = 0;
+		ret = afe_set_lpass_clock_v2(
+			AFE_PORT_ID_INT4_MI2S_RX,
+			&msm_sdw->sdw_cdc_core_clk);
+		if (ret < 0)
+			dev_err(msm_sdw->dev,
+				"%s failed to disable the MCLK1\n",
+				__func__);
+		msm_sdw->int_mclk1_enabled = false;
+	}
+	mutex_unlock(&msm_sdw->cdc_int_mclk1_mutex);
+}
+
+static int msm_int_mclk1_event(struct snd_soc_dapm_widget *w,
+			       struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct msm_sdw_priv *msm_sdw = snd_soc_codec_get_drvdata(codec);
+	int ret = 0;
+
+	dev_dbg(msm_sdw->dev, "%s: event = %d\n", __func__, event);
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		/* enable the codec mclk config */
+		msm_int_enable_sdw_cdc_clk(msm_sdw, 1, true);
+		msm_sdw_mclk_enable(msm_sdw, 1, true);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/* disable the codec mclk config */
+		msm_sdw_mclk_enable(msm_sdw, 0, true);
+		msm_int_enable_sdw_cdc_clk(msm_sdw, 0, true);
+		break;
+	default:
+		dev_err(msm_sdw->dev,
+			"%s: invalid DAPM event %d\n", __func__, event);
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+static int msm_sdw_ahb_write_device(struct msm_sdw_priv *msm_sdw,
+					u16 reg, u8 *value)
+{
+	u32 temp = (u32)(*value) & 0x000000FF;
+
+	if (!msm_sdw->dev_up) {
+		dev_err_ratelimited(msm_sdw->dev, "%s: q6 not ready\n",
+				    __func__);
+		return 0;
+	}
+
+	iowrite32(temp, msm_sdw->sdw_base + reg);
+	return 0;
+}
+
+static int msm_sdw_ahb_read_device(struct msm_sdw_priv *msm_sdw,
+					u16 reg, u8 *value)
+{
+	u32 temp;
+
+	if (!msm_sdw->dev_up) {
+		dev_err_ratelimited(msm_sdw->dev, "%s: q6 not ready\n",
+				    __func__);
+		return 0;
+	}
+
+	temp = ioread32(msm_sdw->sdw_base + reg);
+	*value = (u8)temp;
+	return 0;
+}
+
+static int __msm_sdw_reg_read(struct msm_sdw_priv *msm_sdw, unsigned short reg,
+			int bytes, void *dest)
+{
+	int ret = -EINVAL, i;
+	u8 temp = 0;
+
+	dev_dbg(msm_sdw->dev, "%s reg = %x\n", __func__, reg);
+	mutex_lock(&msm_sdw->cdc_int_mclk1_mutex);
+	if (msm_sdw->int_mclk1_enabled == false) {
+		msm_sdw->sdw_cdc_core_clk.enable = 1;
+		ret = afe_set_lpass_clock_v2(
+					AFE_PORT_ID_INT4_MI2S_RX,
+					&msm_sdw->sdw_cdc_core_clk);
+		if (ret < 0) {
+			dev_err(msm_sdw->dev,
+				"%s:failed to enable the INT_MCLK1\n",
+				__func__);
+			goto unlock_exit;
+		}
+		dev_dbg(msm_sdw->dev, "%s:enabled sdw codec core clk\n",
+			__func__);
+		for (i = 0; i < bytes; i++)  {
+			ret = msm_sdw_ahb_read_device(
+				msm_sdw, reg + (4 * i), &temp);
+			((u8 *)dest)[i] = temp;
+		}
+		msm_sdw->int_mclk1_enabled = true;
+		schedule_delayed_work(&msm_sdw->disable_int_mclk1_work, 50);
+		goto unlock_exit;
+	}
+	for (i = 0; i < bytes; i++)  {
+		ret = msm_sdw_ahb_read_device(
+			msm_sdw, reg + (4 * i), &temp);
+		((u8 *)dest)[i] = temp;
+	}
+unlock_exit:
+	mutex_unlock(&msm_sdw->cdc_int_mclk1_mutex);
+	if (ret < 0) {
+		dev_err_ratelimited(msm_sdw->dev,
+				    "%s: codec read failed for reg 0x%x\n",
+				    __func__, reg);
+		return ret;
+	}
+	dev_dbg(msm_sdw->dev, "Read 0x%02x from 0x%x\n", temp, reg);
+
+	return 0;
+}
+
+static int __msm_sdw_reg_write(struct msm_sdw_priv *msm_sdw, unsigned short reg,
+			       int bytes, void *src)
+{
+	int ret = -EINVAL, i;
+
+	mutex_lock(&msm_sdw->cdc_int_mclk1_mutex);
+	if (msm_sdw->int_mclk1_enabled == false) {
+		msm_sdw->sdw_cdc_core_clk.enable = 1;
+		ret = afe_set_lpass_clock_v2(AFE_PORT_ID_INT4_MI2S_RX,
+					     &msm_sdw->sdw_cdc_core_clk);
+		if (ret < 0) {
+			dev_err(msm_sdw->dev,
+				"%s: failed to enable the INT_MCLK1\n",
+				__func__);
+			ret = 0;
+			goto unlock_exit;
+		}
+		dev_dbg(msm_sdw->dev, "%s: enabled INT_MCLK1\n", __func__);
+		for (i = 0; i < bytes; i++)
+			ret = msm_sdw_ahb_write_device(msm_sdw, reg + (4 * i),
+						       &((u8 *)src)[i]);
+		msm_sdw->int_mclk1_enabled = true;
+		schedule_delayed_work(&msm_sdw->disable_int_mclk1_work, 50);
+		goto unlock_exit;
+	}
+	for (i = 0; i < bytes; i++)
+		ret = msm_sdw_ahb_write_device(msm_sdw, reg + (4 * i),
+					       &((u8 *)src)[i]);
+unlock_exit:
+	mutex_unlock(&msm_sdw->cdc_int_mclk1_mutex);
+	dev_dbg(msm_sdw->dev, "Write 0x%x val 0x%02x\n",
+				reg, (u32)(*(u32 *)src));
+
+	return ret;
+}
+
+static int msm_sdw_codec_enable_vi_feedback(struct snd_soc_dapm_widget *w,
+					    struct snd_kcontrol *kcontrol,
+					    int event)
+{
+	struct snd_soc_codec *codec = NULL;
+	struct msm_sdw_priv *msm_sdw_p = NULL;
+	int ret = 0;
+
+	if (!w) {
+		pr_err("%s invalid params\n", __func__);
+		return -EINVAL;
+	}
+	codec = snd_soc_dapm_to_codec(w->dapm);
+	msm_sdw_p = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s: num_dai %d stream name %s\n",
+		__func__, codec->component.num_dai, w->sname);
+
+	dev_dbg(codec->dev, "%s(): w->name %s event %d w->shift %d\n",
+		__func__, w->name, event, w->shift);
+	if (w->shift != AIF1_SDW_VIFEED) {
+		dev_err(codec->dev,
+			"%s:Error in enabling the vi feedback path\n",
+			__func__);
+		ret = -EINVAL;
+		goto out_vi;
+	}
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		if (test_bit(VI_SENSE_1, &msm_sdw_p->status_mask)) {
+			dev_dbg(codec->dev, "%s: spkr1 enabled\n", __func__);
+			/* Enable V&I sensing */
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x20, 0x20);
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x20,
+				0x20);
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x0F, 0x04);
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x0F, 0x04);
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x10, 0x10);
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x10,
+				0x10);
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x20, 0x00);
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x20,
+				0x00);
+		}
+		if (test_bit(VI_SENSE_2, &msm_sdw_p->status_mask)) {
+			dev_dbg(codec->dev, "%s: spkr2 enabled\n", __func__);
+			/* Enable V&I sensing */
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX11_SPKR_PROT_PATH_CTL, 0x20,
+				0x20);
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX12_SPKR_PROT_PATH_CTL, 0x20,
+				0x20);
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX11_SPKR_PROT_PATH_CTL, 0x0F,
+				0x04);
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX12_SPKR_PROT_PATH_CTL, 0x0F,
+				0x04);
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX11_SPKR_PROT_PATH_CTL, 0x10,
+				0x10);
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX12_SPKR_PROT_PATH_CTL, 0x10,
+				0x10);
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX11_SPKR_PROT_PATH_CTL, 0x20,
+				0x00);
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX12_SPKR_PROT_PATH_CTL, 0x20,
+				0x00);
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		if (test_bit(VI_SENSE_1, &msm_sdw_p->status_mask)) {
+			/* Disable V&I sensing */
+			dev_dbg(codec->dev, "%s: spkr1 disabled\n", __func__);
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x20, 0x20);
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x20,
+				0x20);
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x10, 0x00);
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x10,
+				0x00);
+		}
+		if (test_bit(VI_SENSE_2, &msm_sdw_p->status_mask)) {
+			/* Disable V&I sensing */
+			dev_dbg(codec->dev, "%s: spkr2 disabled\n", __func__);
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX11_SPKR_PROT_PATH_CTL, 0x20,
+				0x20);
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX12_SPKR_PROT_PATH_CTL, 0x20,
+				0x20);
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX11_SPKR_PROT_PATH_CTL, 0x10,
+				0x00);
+			snd_soc_update_bits(codec,
+				MSM_SDW_TX12_SPKR_PROT_PATH_CTL, 0x10,
+				0x00);
+		}
+		break;
+	}
+out_vi:
+	return ret;
+}
+
+static int msm_sdwm_handle_irq(void *handle,
+			       irqreturn_t (*swrm_irq_handler)(int irq,
+							       void *data),
+			       void *swrm_handle,
+			       int action)
+{
+	struct msm_sdw_priv *msm_sdw;
+	int ret = 0;
+
+	if (!handle) {
+		pr_err("%s: null handle received\n", __func__);
+		return -EINVAL;
+	}
+	msm_sdw = (struct msm_sdw_priv *) handle;
+
+	if (skip_irq)
+		return ret;
+
+	if (action) {
+		ret = request_threaded_irq(msm_sdw->sdw_irq, NULL,
+					   swrm_irq_handler,
+					   IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+					   "swr_master_irq", swrm_handle);
+		if (ret)
+			dev_err(msm_sdw->dev, "%s: Failed to request irq %d\n",
+				__func__, ret);
+	} else
+		free_irq(msm_sdw->sdw_irq, swrm_handle);
+
+	return ret;
+}
+
+static void msm_sdw_codec_hd2_control(struct snd_soc_codec *codec,
+				      u16 reg, int event)
+{
+	u16 hd2_scale_reg;
+	u16 hd2_enable_reg = 0;
+
+	if (reg == MSM_SDW_RX7_RX_PATH_CTL) {
+		hd2_scale_reg = MSM_SDW_RX7_RX_PATH_SEC3;
+		hd2_enable_reg = MSM_SDW_RX7_RX_PATH_CFG0;
+	}
+	if (reg == MSM_SDW_RX8_RX_PATH_CTL) {
+		hd2_scale_reg = MSM_SDW_RX8_RX_PATH_SEC3;
+		hd2_enable_reg = MSM_SDW_RX8_RX_PATH_CFG0;
+	}
+
+	if (hd2_enable_reg && SND_SOC_DAPM_EVENT_ON(event)) {
+		snd_soc_update_bits(codec, hd2_scale_reg, 0x3C, 0x10);
+		snd_soc_update_bits(codec, hd2_scale_reg, 0x03, 0x01);
+		snd_soc_update_bits(codec, hd2_enable_reg, 0x04, 0x04);
+	}
+
+	if (hd2_enable_reg && SND_SOC_DAPM_EVENT_OFF(event)) {
+		snd_soc_update_bits(codec, hd2_enable_reg, 0x04, 0x00);
+		snd_soc_update_bits(codec, hd2_scale_reg, 0x03, 0x00);
+		snd_soc_update_bits(codec, hd2_scale_reg, 0x3C, 0x00);
+	}
+}
+
+static int msm_sdw_enable_swr(struct snd_soc_dapm_widget *w,
+		struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct msm_sdw_priv *msm_sdw;
+	int i, ch_cnt;
+
+	msm_sdw = snd_soc_codec_get_drvdata(codec);
+
+	if (!msm_sdw->nr)
+		return 0;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (!(strnstr(w->name, "RX4", sizeof("RX4 MIX"))) &&
+		    !msm_sdw->rx_4_count)
+			msm_sdw->rx_4_count++;
+		if (!(strnstr(w->name, "RX5", sizeof("RX5 MIX"))) &&
+		    !msm_sdw->rx_5_count)
+			msm_sdw->rx_5_count++;
+		ch_cnt = msm_sdw->rx_4_count + msm_sdw->rx_5_count;
+
+		for (i = 0; i < msm_sdw->nr; i++) {
+			swrm_wcd_notify(msm_sdw->sdw_ctrl_data[i].sdw_pdev,
+					SWR_DEVICE_UP, NULL);
+			swrm_wcd_notify(msm_sdw->sdw_ctrl_data[i].sdw_pdev,
+					SWR_SET_NUM_RX_CH, &ch_cnt);
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		if (!(strnstr(w->name, "RX4", sizeof("RX4 MIX"))) &&
+		    msm_sdw->rx_4_count)
+			msm_sdw->rx_4_count--;
+		if (!(strnstr(w->name, "RX5", sizeof("RX5 MIX"))) &&
+		    msm_sdw->rx_5_count)
+			msm_sdw->rx_5_count--;
+		ch_cnt = msm_sdw->rx_4_count + msm_sdw->rx_5_count;
+
+		for (i = 0; i < msm_sdw->nr; i++)
+			swrm_wcd_notify(msm_sdw->sdw_ctrl_data[i].sdw_pdev,
+					SWR_SET_NUM_RX_CH, &ch_cnt);
+		break;
+	}
+	dev_dbg(msm_sdw->dev, "%s: current swr ch cnt: %d\n",
+		__func__, msm_sdw->rx_4_count + msm_sdw->rx_5_count);
+
+	return 0;
+}
+
+static int msm_sdw_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
+					     struct snd_kcontrol *kcontrol,
+					     int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct msm_sdw_priv *msm_sdw = snd_soc_codec_get_drvdata(codec);
+	u16 gain_reg;
+	u16 reg;
+	int val;
+	int offset_val = 0;
+
+	dev_dbg(codec->dev, "%s %d %s\n", __func__, event, w->name);
+
+	if (!(strcmp(w->name, "RX INT4 INTERP"))) {
+		reg = MSM_SDW_RX7_RX_PATH_CTL;
+		gain_reg = MSM_SDW_RX7_RX_VOL_CTL;
+	} else if (!(strcmp(w->name, "RX INT5 INTERP"))) {
+		reg = MSM_SDW_RX8_RX_PATH_CTL;
+		gain_reg = MSM_SDW_RX8_RX_VOL_CTL;
+	} else {
+		dev_err(codec->dev, "%s: Interpolator reg not found\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec, reg, 0x10, 0x10);
+		msm_sdw_codec_hd2_control(codec, reg, event);
+		snd_soc_update_bits(codec, reg, 1 << 0x5, 1 << 0x5);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		msm_sdw_config_compander(codec, w->shift, event);
+		/* apply gain after int clk is enabled */
+		if ((msm_sdw->spkr_gain_offset == RX_GAIN_OFFSET_M1P5_DB) &&
+		    (msm_sdw->comp_enabled[COMP1] ||
+		     msm_sdw->comp_enabled[COMP2]) &&
+		    (gain_reg == MSM_SDW_RX7_RX_VOL_CTL ||
+		     gain_reg == MSM_SDW_RX8_RX_VOL_CTL)) {
+			snd_soc_update_bits(codec, MSM_SDW_RX7_RX_PATH_SEC1,
+					    0x01, 0x01);
+			snd_soc_update_bits(codec,
+					    MSM_SDW_RX7_RX_PATH_MIX_SEC0,
+					    0x01, 0x01);
+			snd_soc_update_bits(codec, MSM_SDW_RX8_RX_PATH_SEC1,
+					    0x01, 0x01);
+			snd_soc_update_bits(codec,
+					    MSM_SDW_RX8_RX_PATH_MIX_SEC0,
+					    0x01, 0x01);
+			offset_val = -2;
+		}
+		val = snd_soc_read(codec, gain_reg);
+		val += offset_val;
+		snd_soc_write(codec, gain_reg, val);
+		msm_sdw_config_ear_spkr_gain(codec, event, gain_reg);
+		snd_soc_update_bits(codec, reg, 0x10, 0x00);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, reg, 1 << 0x5, 0 << 0x5);
+		snd_soc_update_bits(codec, reg,	0x40, 0x40);
+		snd_soc_update_bits(codec, reg,	0x40, 0x00);
+		msm_sdw_codec_hd2_control(codec, reg, event);
+		msm_sdw_config_compander(codec, w->shift, event);
+		if ((msm_sdw->spkr_gain_offset == RX_GAIN_OFFSET_M1P5_DB) &&
+		    (msm_sdw->comp_enabled[COMP1] ||
+		     msm_sdw->comp_enabled[COMP2]) &&
+		    (gain_reg == MSM_SDW_RX7_RX_VOL_CTL ||
+		     gain_reg == MSM_SDW_RX8_RX_VOL_CTL)) {
+			snd_soc_update_bits(codec, MSM_SDW_RX7_RX_PATH_SEC1,
+					    0x01, 0x00);
+			snd_soc_update_bits(codec,
+					    MSM_SDW_RX7_RX_PATH_MIX_SEC0,
+					    0x01, 0x00);
+			snd_soc_update_bits(codec, MSM_SDW_RX8_RX_PATH_SEC1,
+					    0x01, 0x00);
+			snd_soc_update_bits(codec,
+					    MSM_SDW_RX8_RX_PATH_MIX_SEC0,
+					    0x01, 0x00);
+			offset_val = 2;
+			val = snd_soc_read(codec, gain_reg);
+			val += offset_val;
+			snd_soc_write(codec, gain_reg, val);
+		}
+		msm_sdw_config_ear_spkr_gain(codec, event, gain_reg);
+		break;
+	};
+
+	return 0;
+}
+
+static int msm_sdw_config_ear_spkr_gain(struct snd_soc_codec *codec,
+					int event, int gain_reg)
+{
+	int comp_gain_offset, val;
+	struct msm_sdw_priv *msm_sdw = snd_soc_codec_get_drvdata(codec);
+
+	switch (msm_sdw->spkr_mode) {
+	/* Compander gain in SPKR_MODE1 case is 12 dB */
+	case SPKR_MODE_1:
+		comp_gain_offset = -12;
+		break;
+	/* Default case compander gain is 15 dB */
+	default:
+		comp_gain_offset = -15;
+		break;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		/* Apply ear spkr gain only if compander is enabled */
+		if (msm_sdw->comp_enabled[COMP1] &&
+		    (gain_reg == MSM_SDW_RX7_RX_VOL_CTL) &&
+		    (msm_sdw->ear_spkr_gain != 0)) {
+			/* For example, val is -8(-12+5-1) for 4dB of gain */
+			val = comp_gain_offset + msm_sdw->ear_spkr_gain - 1;
+			snd_soc_write(codec, gain_reg, val);
+
+			dev_dbg(codec->dev, "%s: RX4 Volume %d dB\n",
+				__func__, val);
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/*
+		 * Reset RX4 volume to 0 dB if compander is enabled and
+		 * ear_spkr_gain is non-zero.
+		 */
+		if (msm_sdw->comp_enabled[COMP1] &&
+		    (gain_reg == MSM_SDW_RX7_RX_VOL_CTL) &&
+		    (msm_sdw->ear_spkr_gain != 0)) {
+			snd_soc_write(codec, gain_reg, 0x0);
+
+			dev_dbg(codec->dev, "%s: Reset RX4 Volume to 0 dB\n",
+				__func__);
+		}
+		break;
+	}
+
+	return 0;
+}
+
+static int msm_sdw_codec_spk_boost_event(struct snd_soc_dapm_widget *w,
+					 struct snd_kcontrol *kcontrol,
+					 int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	u16 boost_path_ctl, boost_path_cfg1;
+	u16 reg;
+
+	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+	if (!strcmp(w->name, "RX INT4 CHAIN")) {
+		boost_path_ctl = MSM_SDW_BOOST0_BOOST_PATH_CTL;
+		boost_path_cfg1 = MSM_SDW_RX7_RX_PATH_CFG1;
+		reg = MSM_SDW_RX7_RX_PATH_CTL;
+	} else if (!strcmp(w->name, "RX INT5 CHAIN")) {
+		boost_path_ctl = MSM_SDW_BOOST1_BOOST_PATH_CTL;
+		boost_path_cfg1 = MSM_SDW_RX8_RX_PATH_CFG1;
+		reg = MSM_SDW_RX8_RX_PATH_CTL;
+	} else {
+		dev_err(codec->dev, "%s: boost reg not found\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec, boost_path_ctl, 0x10, 0x10);
+		snd_soc_update_bits(codec, boost_path_cfg1, 0x01, 0x01);
+		snd_soc_update_bits(codec, reg, 0x10, 0x00);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, boost_path_cfg1, 0x01, 0x00);
+		snd_soc_update_bits(codec, boost_path_ctl, 0x10, 0x00);
+		break;
+	};
+
+	return 0;
+}
+
+static int msm_sdw_config_compander(struct snd_soc_codec *codec, int comp,
+				    int event)
+{
+	struct msm_sdw_priv *msm_sdw = snd_soc_codec_get_drvdata(codec);
+	u16 comp_ctl0_reg, rx_path_cfg0_reg;
+
+	if (comp < COMP1 || comp >= COMP_MAX)
+		return 0;
+
+	dev_dbg(codec->dev, "%s: event %d compander %d, enabled %d\n",
+		__func__, event, comp + 1, msm_sdw->comp_enabled[comp]);
+
+	if (!msm_sdw->comp_enabled[comp])
+		return 0;
+
+	comp_ctl0_reg = MSM_SDW_COMPANDER7_CTL0 + (comp * 8);
+	rx_path_cfg0_reg = MSM_SDW_RX7_RX_PATH_CFG0 + (comp * 20);
+
+	if (SND_SOC_DAPM_EVENT_ON(event)) {
+		/* Enable Compander Clock */
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x01, 0x01);
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x02, 0x02);
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x02, 0x00);
+		snd_soc_update_bits(codec, rx_path_cfg0_reg, 0x02, 0x02);
+	}
+
+	if (SND_SOC_DAPM_EVENT_OFF(event)) {
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x04, 0x04);
+		snd_soc_update_bits(codec, rx_path_cfg0_reg, 0x02, 0x00);
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x02, 0x02);
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x02, 0x00);
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x01, 0x00);
+		snd_soc_update_bits(codec, comp_ctl0_reg, 0x04, 0x00);
+	}
+
+	return 0;
+}
+
+static int msm_sdw_get_compander(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	int comp = ((struct soc_multi_mixer_control *)
+		    kcontrol->private_value)->shift;
+	struct msm_sdw_priv *msm_sdw = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = msm_sdw->comp_enabled[comp];
+	return 0;
+}
+
+static int msm_sdw_set_compander(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct msm_sdw_priv *msm_sdw = snd_soc_codec_get_drvdata(codec);
+	int comp = ((struct soc_multi_mixer_control *)
+		    kcontrol->private_value)->shift;
+	int value = ucontrol->value.integer.value[0];
+
+	dev_dbg(codec->dev, "%s: Compander %d enable current %d, new %d\n",
+		__func__, comp + 1, msm_sdw->comp_enabled[comp], value);
+	msm_sdw->comp_enabled[comp] = value;
+
+	return 0;
+}
+
+static int msm_sdw_ear_spkr_pa_gain_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct msm_sdw_priv *msm_sdw = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = msm_sdw->ear_spkr_gain;
+
+	dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+		__func__, ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static int msm_sdw_ear_spkr_pa_gain_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct msm_sdw_priv *msm_sdw = snd_soc_codec_get_drvdata(codec);
+
+	msm_sdw->ear_spkr_gain =  ucontrol->value.integer.value[0];
+
+	dev_dbg(codec->dev, "%s: gain = %d\n", __func__,
+		msm_sdw->ear_spkr_gain);
+
+	return 0;
+}
+
+static int msm_sdw_vi_feed_mixer_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
+	struct msm_sdw_priv *msm_sdw_p = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = msm_sdw_p->vi_feed_value;
+
+	return 0;
+}
+
+static int msm_sdw_vi_feed_mixer_put(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
+	struct msm_sdw_priv *msm_sdw_p = snd_soc_codec_get_drvdata(codec);
+	struct soc_multi_mixer_control *mixer =
+		((struct soc_multi_mixer_control *)kcontrol->private_value);
+	u32 dai_id = widget->shift;
+	u32 port_id = mixer->shift;
+	u32 enable = ucontrol->value.integer.value[0];
+
+	dev_dbg(codec->dev, "%s: enable: %d, port_id:%d, dai_id: %d\n",
+		__func__, enable, port_id, dai_id);
+
+	msm_sdw_p->vi_feed_value = ucontrol->value.integer.value[0];
+
+	mutex_lock(&msm_sdw_p->codec_mutex);
+	if (enable) {
+		if (port_id == MSM_SDW_TX0 && !test_bit(VI_SENSE_1,
+						&msm_sdw_p->status_mask))
+			set_bit(VI_SENSE_1, &msm_sdw_p->status_mask);
+		if (port_id == MSM_SDW_TX1 && !test_bit(VI_SENSE_2,
+						&msm_sdw_p->status_mask))
+			set_bit(VI_SENSE_2, &msm_sdw_p->status_mask);
+	} else {
+		if (port_id == MSM_SDW_TX0 && test_bit(VI_SENSE_1,
+					&msm_sdw_p->status_mask))
+			clear_bit(VI_SENSE_1, &msm_sdw_p->status_mask);
+		if (port_id == MSM_SDW_TX1 && test_bit(VI_SENSE_2,
+					&msm_sdw_p->status_mask))
+			clear_bit(VI_SENSE_2, &msm_sdw_p->status_mask);
+	}
+	mutex_unlock(&msm_sdw_p->codec_mutex);
+	snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, enable, NULL);
+
+	return 0;
+}
+
+static int msm_sdw_mclk_enable(struct msm_sdw_priv *msm_sdw,
+			       int mclk_enable, bool dapm)
+{
+	dev_dbg(msm_sdw->dev, "%s: mclk_enable = %u, dapm = %d clk_users= %d\n",
+		__func__, mclk_enable, dapm, msm_sdw->sdw_mclk_users);
+	if (mclk_enable) {
+		msm_sdw->sdw_mclk_users++;
+		if (msm_sdw->sdw_mclk_users == 1) {
+			regmap_update_bits(msm_sdw->regmap,
+					MSM_SDW_CLK_RST_CTRL_FS_CNT_CONTROL,
+					0x01, 0x01);
+			regmap_update_bits(msm_sdw->regmap,
+				MSM_SDW_CLK_RST_CTRL_MCLK_CONTROL,
+				0x01, 0x01);
+			/* 9.6MHz MCLK, set value 0x00 if other frequency */
+			regmap_update_bits(msm_sdw->regmap,
+				MSM_SDW_TOP_FREQ_MCLK, 0x01, 0x01);
+		}
+	} else {
+		msm_sdw->sdw_mclk_users--;
+		if (msm_sdw->sdw_mclk_users == 0) {
+			regmap_update_bits(msm_sdw->regmap,
+					MSM_SDW_CLK_RST_CTRL_FS_CNT_CONTROL,
+					0x01, 0x00);
+			regmap_update_bits(msm_sdw->regmap,
+					MSM_SDW_CLK_RST_CTRL_MCLK_CONTROL,
+					0x01, 0x00);
+		}
+	}
+	return 0;
+}
+EXPORT_SYMBOL(msm_sdw_mclk_enable);
+
+static int msm_sdw_swrm_read(void *handle, int reg)
+{
+	struct msm_sdw_priv *msm_sdw;
+	unsigned short sdw_rd_addr_base;
+	unsigned short sdw_rd_data_base;
+	int val, ret;
+
+	if (!handle) {
+		pr_err("%s: NULL handle\n", __func__);
+		return -EINVAL;
+	}
+	msm_sdw = (struct msm_sdw_priv *)handle;
+
+	dev_dbg(msm_sdw->dev, "%s: Reading soundwire register, 0x%x\n",
+		__func__, reg);
+	sdw_rd_addr_base = MSM_SDW_AHB_BRIDGE_RD_ADDR_0;
+	sdw_rd_data_base = MSM_SDW_AHB_BRIDGE_RD_DATA_0;
+
+	/*
+	 * Add sleep as SWR slave access read takes time.
+	 * Allow for RD_DONE to complete for previous register if any.
+	 */
+	usleep_range(50, 55);
+
+	/* read_lock */
+	mutex_lock(&msm_sdw->sdw_read_lock);
+	ret = regmap_bulk_write(msm_sdw->regmap, sdw_rd_addr_base,
+				(u8 *)&reg, 4);
+	if (ret < 0) {
+		dev_err(msm_sdw->dev, "%s: RD Addr Failure\n", __func__);
+		goto err;
+	}
+	/* Check for RD value */
+	ret = regmap_bulk_read(msm_sdw->regmap, sdw_rd_data_base,
+			       (u8 *)&val, 4);
+	if (ret < 0) {
+		dev_err(msm_sdw->dev, "%s: RD Data Failure\n", __func__);
+		goto err;
+	}
+	ret = val;
+err:
+	/* read_unlock */
+	mutex_unlock(&msm_sdw->sdw_read_lock);
+	return ret;
+}
+
+static int msm_sdw_bulk_write(struct msm_sdw_priv *msm_sdw,
+				struct msm_sdw_reg_val *bulk_reg,
+				size_t len)
+{
+	int i, ret = 0;
+	unsigned short sdw_wr_addr_base;
+	unsigned short sdw_wr_data_base;
+
+	sdw_wr_addr_base = MSM_SDW_AHB_BRIDGE_WR_ADDR_0;
+	sdw_wr_data_base = MSM_SDW_AHB_BRIDGE_WR_DATA_0;
+
+	for (i = 0; i < len; i += 2) {
+		/* First Write the Data to register */
+		ret = regmap_bulk_write(msm_sdw->regmap,
+			sdw_wr_data_base, bulk_reg[i].buf, 4);
+		if (ret < 0) {
+			dev_err(msm_sdw->dev, "%s: WR Data Failure\n",
+				__func__);
+			break;
+		}
+		/* Next Write Address */
+		ret = regmap_bulk_write(msm_sdw->regmap,
+			sdw_wr_addr_base, bulk_reg[i+1].buf, 4);
+		if (ret < 0) {
+			dev_err(msm_sdw->dev,
+				"%s: WR Addr Failure: 0x%x\n",
+				__func__, (u32)(bulk_reg[i+1].buf[0]));
+			break;
+		}
+	}
+	return ret;
+}
+
+static int msm_sdw_swrm_bulk_write(void *handle, u32 *reg, u32 *val, size_t len)
+{
+	struct msm_sdw_priv *msm_sdw;
+	struct msm_sdw_reg_val *bulk_reg;
+	unsigned short sdw_wr_addr_base;
+	unsigned short sdw_wr_data_base;
+	int i, j, ret;
+
+	if (!handle) {
+		pr_err("%s: NULL handle\n", __func__);
+		return -EINVAL;
+	}
+
+	msm_sdw = (struct msm_sdw_priv *)handle;
+	if (len <= 0) {
+		dev_err(msm_sdw->dev,
+			"%s: Invalid size: %zu\n", __func__, len);
+		return -EINVAL;
+	}
+
+	sdw_wr_addr_base = MSM_SDW_AHB_BRIDGE_WR_ADDR_0;
+	sdw_wr_data_base = MSM_SDW_AHB_BRIDGE_WR_DATA_0;
+
+	bulk_reg = kzalloc((2 * len * sizeof(struct msm_sdw_reg_val)),
+			   GFP_KERNEL);
+	if (!bulk_reg)
+		return -ENOMEM;
+
+	for (i = 0, j = 0; i < (len * 2); i += 2, j++) {
+		bulk_reg[i].reg = sdw_wr_data_base;
+		bulk_reg[i].buf = (u8 *)(&val[j]);
+		bulk_reg[i].bytes = 4;
+		bulk_reg[i+1].reg = sdw_wr_addr_base;
+		bulk_reg[i+1].buf = (u8 *)(&reg[j]);
+		bulk_reg[i+1].bytes = 4;
+	}
+	mutex_lock(&msm_sdw->sdw_write_lock);
+
+	ret = msm_sdw_bulk_write(msm_sdw, bulk_reg, (len * 2));
+	if (ret)
+		dev_err(msm_sdw->dev, "%s: swrm bulk write failed, ret: %d\n",
+			__func__, ret);
+
+	mutex_unlock(&msm_sdw->sdw_write_lock);
+	kfree(bulk_reg);
+
+	return ret;
+}
+
+static int msm_sdw_swrm_write(void *handle, int reg, int val)
+{
+	struct msm_sdw_priv *msm_sdw;
+	unsigned short sdw_wr_addr_base;
+	unsigned short sdw_wr_data_base;
+	struct msm_sdw_reg_val bulk_reg[2];
+	int ret;
+
+	if (!handle) {
+		pr_err("%s: NULL handle\n", __func__);
+		return -EINVAL;
+	}
+	msm_sdw = (struct msm_sdw_priv *)handle;
+
+	sdw_wr_addr_base = MSM_SDW_AHB_BRIDGE_WR_ADDR_0;
+	sdw_wr_data_base = MSM_SDW_AHB_BRIDGE_WR_DATA_0;
+
+	/* First Write the Data to register */
+	bulk_reg[0].reg = sdw_wr_data_base;
+	bulk_reg[0].buf = (u8 *)(&val);
+	bulk_reg[0].bytes = 4;
+	bulk_reg[1].reg = sdw_wr_addr_base;
+	bulk_reg[1].buf = (u8 *)(&reg);
+	bulk_reg[1].bytes = 4;
+
+	mutex_lock(&msm_sdw->sdw_write_lock);
+
+	ret = msm_sdw_bulk_write(msm_sdw, bulk_reg, 2);
+	if (ret < 0)
+		dev_err(msm_sdw->dev, "%s: WR Data Failure\n", __func__);
+
+	mutex_unlock(&msm_sdw->sdw_write_lock);
+	return ret;
+}
+
+static int msm_sdw_swrm_clock(void *handle, bool enable)
+{
+	struct msm_sdw_priv *msm_sdw = (struct msm_sdw_priv *) handle;
+
+	mutex_lock(&msm_sdw->sdw_clk_lock);
+
+	dev_dbg(msm_sdw->dev, "%s: swrm clock %s\n",
+		__func__, (enable ? "enable" : "disable"));
+	if (enable) {
+		msm_sdw->sdw_clk_users++;
+		if (msm_sdw->sdw_clk_users == 1) {
+			msm_int_enable_sdw_cdc_clk(msm_sdw, 1, true);
+			msm_sdw_mclk_enable(msm_sdw, 1, true);
+			regmap_update_bits(msm_sdw->regmap,
+				MSM_SDW_CLK_RST_CTRL_SWR_CONTROL, 0x01, 0x01);
+			msm_enable_sdw_npl_clk(msm_sdw, true);
+			msm_cdc_pinctrl_select_active_state(
+							msm_sdw->sdw_gpio_p);
+		}
+	} else {
+		msm_sdw->sdw_clk_users--;
+		if (msm_sdw->sdw_clk_users == 0) {
+			regmap_update_bits(msm_sdw->regmap,
+				MSM_SDW_CLK_RST_CTRL_SWR_CONTROL,
+				0x01, 0x00);
+			msm_sdw_mclk_enable(msm_sdw, 0, true);
+			msm_int_enable_sdw_cdc_clk(msm_sdw, 0, true);
+			msm_enable_sdw_npl_clk(msm_sdw, false);
+			msm_cdc_pinctrl_select_sleep_state(msm_sdw->sdw_gpio_p);
+		}
+	}
+	dev_dbg(msm_sdw->dev, "%s: swrm clock users %d\n",
+		__func__, msm_sdw->sdw_clk_users);
+	mutex_unlock(&msm_sdw->sdw_clk_lock);
+	return 0;
+}
+
+static int msm_sdw_startup(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	dev_dbg(dai->codec->dev, "%s(): substream = %s  stream = %d\n",
+		__func__,
+		substream->name, substream->stream);
+	return 0;
+}
+
+static int msm_sdw_hw_params(struct snd_pcm_substream *substream,
+			    struct snd_pcm_hw_params *params,
+			    struct snd_soc_dai *dai)
+{
+	u8 clk_fs_rate, fs_rate;
+
+	dev_dbg(dai->codec->dev,
+		"%s: dai_name = %s DAI-ID %x rate %d num_ch %d format %d\n",
+		__func__, dai->name, dai->id, params_rate(params),
+		params_channels(params), params_format(params));
+
+	switch (params_rate(params)) {
+	case 8000:
+		clk_fs_rate = 0x00;
+		fs_rate = 0x00;
+		break;
+	case 16000:
+		clk_fs_rate = 0x01;
+		fs_rate = 0x01;
+		break;
+	case 32000:
+		clk_fs_rate = 0x02;
+		fs_rate = 0x03;
+		break;
+	case 48000:
+		clk_fs_rate = 0x03;
+		fs_rate = 0x04;
+		break;
+	case 96000:
+		clk_fs_rate = 0x04;
+		fs_rate = 0x05;
+		break;
+	case 192000:
+		clk_fs_rate = 0x05;
+		fs_rate = 0x06;
+		break;
+	default:
+		dev_err(dai->codec->dev,
+			"%s: Invalid sampling rate %d\n", __func__,
+			params_rate(params));
+		return -EINVAL;
+	}
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		snd_soc_update_bits(dai->codec,
+				MSM_SDW_TOP_TX_I2S_CTL, 0x1C,
+				(clk_fs_rate << 2));
+	} else {
+		snd_soc_update_bits(dai->codec,
+				MSM_SDW_TOP_RX_I2S_CTL, 0x1C,
+				(clk_fs_rate << 2));
+		snd_soc_update_bits(dai->codec,
+				MSM_SDW_RX7_RX_PATH_CTL, 0x0F,
+				fs_rate);
+		snd_soc_update_bits(dai->codec,
+				MSM_SDW_RX8_RX_PATH_CTL, 0x0F,
+				fs_rate);
+	}
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+			snd_soc_update_bits(dai->codec,
+					MSM_SDW_TOP_TX_I2S_CTL, 0x20, 0x20);
+		else
+			snd_soc_update_bits(dai->codec,
+					MSM_SDW_TOP_RX_I2S_CTL, 0x20, 0x20);
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+	case SNDRV_PCM_FORMAT_S24_3LE:
+		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+			snd_soc_update_bits(dai->codec,
+					MSM_SDW_TOP_TX_I2S_CTL, 0x20, 0x00);
+		else
+			snd_soc_update_bits(dai->codec,
+					MSM_SDW_TOP_RX_I2S_CTL, 0x20, 0x00);
+		break;
+	default:
+		dev_err(dai->codec->dev, "%s: wrong format selected\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void msm_sdw_shutdown(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	dev_dbg(dai->codec->dev,
+		"%s(): substream = %s  stream = %d\n", __func__,
+		substream->name, substream->stream);
+}
+
+static ssize_t msm_sdw_codec_version_read(struct snd_info_entry *entry,
+					  void *file_private_data,
+					  struct file *file,
+					  char __user *buf, size_t count,
+					  loff_t pos)
+{
+	struct msm_sdw_priv *msm_sdw;
+	char buffer[MSM_SDW_VERSION_ENTRY_SIZE];
+	int len = 0;
+
+	msm_sdw = (struct msm_sdw_priv *) entry->private_data;
+	if (!msm_sdw) {
+		pr_err("%s: msm_sdw priv is null\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (msm_sdw->version) {
+	case MSM_SDW_VERSION_1_0:
+		len = snprintf(buffer, sizeof(buffer), "SDW-CDC_1_0\n");
+		break;
+	default:
+		len = snprintf(buffer, sizeof(buffer), "VER_UNDEFINED\n");
+	}
+
+	return simple_read_from_buffer(buf, count, &pos, buffer, len);
+}
+
+static struct snd_info_entry_ops msm_sdw_codec_info_ops = {
+	.read = msm_sdw_codec_version_read,
+};
+
+/*
+ * msm_sdw_codec_info_create_codec_entry - creates msm_sdw module
+ * @codec_root: The parent directory
+ * @codec: Codec instance
+ *
+ * Creates msm_sdw module and version entry under the given
+ * parent directory.
+ *
+ * Return: 0 on success or negative error code on failure.
+ */
+int msm_sdw_codec_info_create_codec_entry(struct snd_info_entry *codec_root,
+					  struct snd_soc_codec *codec)
+{
+	struct snd_info_entry *version_entry;
+	struct msm_sdw_priv *msm_sdw;
+	struct snd_soc_card *card;
+
+	if (!codec_root || !codec)
+		return -EINVAL;
+
+	msm_sdw = snd_soc_codec_get_drvdata(codec);
+	card = codec->component.card;
+	msm_sdw->entry = snd_register_module_info(codec_root->module,
+						  "152c1000.msm-sdw-codec",
+						  codec_root);
+	if (!msm_sdw->entry) {
+		dev_err(codec->dev, "%s: failed to create msm_sdw entry\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	version_entry = snd_info_create_card_entry(card->snd_card,
+						   "version",
+						   msm_sdw->entry);
+	if (!version_entry) {
+		dev_err(codec->dev, "%s: failed to create msm_sdw version entry\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	version_entry->private_data = msm_sdw;
+	version_entry->size = MSM_SDW_VERSION_ENTRY_SIZE;
+	version_entry->content = SNDRV_INFO_CONTENT_DATA;
+	version_entry->c.ops = &msm_sdw_codec_info_ops;
+
+	if (snd_info_register(version_entry) < 0) {
+		snd_info_free_entry(version_entry);
+		return -ENOMEM;
+	}
+	msm_sdw->version_entry = version_entry;
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_sdw_codec_info_create_codec_entry);
+
+static struct snd_soc_dai_ops msm_sdw_dai_ops = {
+	.startup = msm_sdw_startup,
+	.shutdown = msm_sdw_shutdown,
+	.hw_params = msm_sdw_hw_params,
+};
+
+static struct snd_soc_dai_driver msm_sdw_dai[] = {
+	{
+		.name = "msm_sdw_i2s_rx1",
+		.id = AIF1_SDW_PB,
+		.playback = {
+			.stream_name = "AIF1_SDW Playback",
+			.rates = MSM_SDW_RATES,
+			.formats = MSM_SDW_FORMATS,
+			.rate_max = 192000,
+			.rate_min = 8000,
+			.channels_min = 1,
+			.channels_max = 4,
+		},
+		.ops = &msm_sdw_dai_ops,
+	},
+	{
+		.name = "msm_sdw_vifeedback",
+		.id = AIF1_SDW_VIFEED,
+		.capture = {
+			.stream_name = "VIfeed_SDW",
+			.rates = MSM_SDW_RATES,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rate_max = 48000,
+			.rate_min = 8000,
+			.channels_min = 2,
+			.channels_max = 4,
+		},
+		.ops = &msm_sdw_dai_ops,
+	},
+};
+
+static const char * const rx_mix1_text[] = {
+	"ZERO", "RX4", "RX5"
+};
+
+static const char * const msm_sdw_ear_spkr_pa_gain_text[] = {
+	"G_DEFAULT", "G_0_DB", "G_1_DB", "G_2_DB", "G_3_DB",
+	"G_4_DB", "G_5_DB", "G_6_DB"
+};
+
+static SOC_ENUM_SINGLE_EXT_DECL(msm_sdw_ear_spkr_pa_gain_enum,
+				msm_sdw_ear_spkr_pa_gain_text);
+/* RX4 MIX1 */
+static const struct soc_enum rx4_mix1_inp1_chain_enum =
+	SOC_ENUM_SINGLE(MSM_SDW_TOP_RX7_PATH_INPUT0_MUX,
+		0, 3, rx_mix1_text);
+
+static const struct soc_enum rx4_mix1_inp2_chain_enum =
+	SOC_ENUM_SINGLE(MSM_SDW_TOP_RX7_PATH_INPUT1_MUX,
+		0, 3, rx_mix1_text);
+
+/* RX5 MIX1 */
+static const struct soc_enum rx5_mix1_inp1_chain_enum =
+	SOC_ENUM_SINGLE(MSM_SDW_TOP_RX8_PATH_INPUT0_MUX,
+		0, 3, rx_mix1_text);
+
+static const struct soc_enum rx5_mix1_inp2_chain_enum =
+	SOC_ENUM_SINGLE(MSM_SDW_TOP_RX8_PATH_INPUT1_MUX,
+		0, 3, rx_mix1_text);
+
+static const struct snd_kcontrol_new rx4_mix1_inp1_mux =
+	SOC_DAPM_ENUM("RX4 MIX1 INP1 Mux", rx4_mix1_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx4_mix1_inp2_mux =
+	SOC_DAPM_ENUM("RX4 MIX1 INP2 Mux", rx4_mix1_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx5_mix1_inp1_mux =
+	SOC_DAPM_ENUM("RX5 MIX1 INP1 Mux", rx5_mix1_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx5_mix1_inp2_mux =
+	SOC_DAPM_ENUM("RX5 MIX1 INP2 Mux", rx5_mix1_inp2_chain_enum);
+
+static const struct snd_kcontrol_new aif1_vi_mixer[] = {
+	SOC_SINGLE_EXT("SPKR_VI_1", SND_SOC_NOPM, MSM_SDW_TX0, 1, 0,
+			msm_sdw_vi_feed_mixer_get, msm_sdw_vi_feed_mixer_put),
+	SOC_SINGLE_EXT("SPKR_VI_2", SND_SOC_NOPM, MSM_SDW_TX1, 1, 0,
+			msm_sdw_vi_feed_mixer_get, msm_sdw_vi_feed_mixer_put),
+};
+
+static const struct snd_soc_dapm_widget msm_sdw_dapm_widgets[] = {
+	SND_SOC_DAPM_AIF_IN("I2S RX4", "AIF1_SDW Playback", 0,
+		SND_SOC_NOPM, 0, 0),
+
+	SND_SOC_DAPM_AIF_IN("I2S RX5", "AIF1_SDW Playback", 0,
+		SND_SOC_NOPM, 0, 0),
+
+	SND_SOC_DAPM_AIF_OUT_E("AIF1_SDW VI", "VIfeed_SDW", 0, SND_SOC_NOPM,
+		AIF1_SDW_VIFEED, 0, msm_sdw_codec_enable_vi_feedback,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MIXER("AIF1_VI_SDW Mixer", SND_SOC_NOPM, AIF1_SDW_VIFEED,
+		0, aif1_vi_mixer, ARRAY_SIZE(aif1_vi_mixer)),
+
+	SND_SOC_DAPM_MUX_E("RX4 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+		&rx4_mix1_inp1_mux, msm_sdw_enable_swr,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX4 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+		&rx4_mix1_inp2_mux, msm_sdw_enable_swr,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX5 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+		&rx5_mix1_inp1_mux, msm_sdw_enable_swr,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("RX5 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+		&rx5_mix1_inp2_mux, msm_sdw_enable_swr,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MIXER("RX4 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX5 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	SND_SOC_DAPM_MIXER_E("RX INT4 INTERP", SND_SOC_NOPM,
+		COMP1, 0, NULL, 0, msm_sdw_codec_enable_interpolator,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MIXER_E("RX INT5 INTERP", SND_SOC_NOPM,
+		COMP2, 0, NULL, 0, msm_sdw_codec_enable_interpolator,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MIXER_E("RX INT4 CHAIN", SND_SOC_NOPM, 0, 0,
+		NULL, 0, msm_sdw_codec_spk_boost_event,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MIXER_E("RX INT5 CHAIN", SND_SOC_NOPM, 0, 0,
+		NULL, 0, msm_sdw_codec_spk_boost_event,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_INPUT("VIINPUT_SDW"),
+
+	SND_SOC_DAPM_OUTPUT("SPK1 OUT"),
+	SND_SOC_DAPM_OUTPUT("SPK2 OUT"),
+
+	SND_SOC_DAPM_SUPPLY_S("SDW_CONN", -1, MSM_SDW_TOP_I2S_CLK,
+		0, 0, NULL, 0),
+
+	SND_SOC_DAPM_SUPPLY_S("INT_MCLK1", -2, SND_SOC_NOPM, 0, 0,
+	msm_int_mclk1_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY("SDW_RX_I2S_CLK",
+		MSM_SDW_TOP_RX_I2S_CTL, 0, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("SDW_TX_I2S_CLK",
+		MSM_SDW_TOP_TX_I2S_CTL, 0, 0, NULL, 0),
+};
+
+static const struct snd_kcontrol_new msm_sdw_snd_controls[] = {
+	SOC_ENUM_EXT("EAR SPKR PA Gain", msm_sdw_ear_spkr_pa_gain_enum,
+		     msm_sdw_ear_spkr_pa_gain_get,
+		     msm_sdw_ear_spkr_pa_gain_put),
+	SOC_SINGLE_SX_TLV("RX4 Digital Volume", MSM_SDW_RX7_RX_VOL_CTL,
+		0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("RX5 Digital Volume", MSM_SDW_RX8_RX_VOL_CTL,
+		0, -84, 40, digital_gain),
+	SOC_SINGLE_EXT("COMP1 Switch", SND_SOC_NOPM, COMP1, 1, 0,
+		msm_sdw_get_compander, msm_sdw_set_compander),
+	SOC_SINGLE_EXT("COMP2 Switch", SND_SOC_NOPM, COMP2, 1, 0,
+		msm_sdw_get_compander, msm_sdw_set_compander),
+};
+
+static const struct snd_soc_dapm_route audio_map[] = {
+
+	{"AIF1_SDW VI", NULL, "SDW_TX_I2S_CLK"},
+	{"SDW_TX_I2S_CLK", NULL, "INT_MCLK1"},
+	{"SDW_TX_I2S_CLK", NULL, "SDW_CONN"},
+
+	/* VI Feedback */
+	{"AIF1_VI_SDW Mixer", "SPKR_VI_1", "VIINPUT_SDW"},
+	{"AIF1_VI_SDW Mixer", "SPKR_VI_2", "VIINPUT_SDW"},
+	{"AIF1_SDW VI", NULL, "AIF1_VI_SDW Mixer"},
+
+	{"SDW_RX_I2S_CLK", NULL, "INT_MCLK1"},
+	{"SDW_RX_I2S_CLK", NULL, "SDW_CONN"},
+	{"I2S RX4", NULL, "SDW_RX_I2S_CLK"},
+	{"I2S RX5", NULL, "SDW_RX_I2S_CLK"},
+
+	{"RX4 MIX1 INP1", "RX4", "I2S RX4"},
+	{"RX4 MIX1 INP1", "RX5", "I2S RX5"},
+	{"RX4 MIX1 INP2", "RX4", "I2S RX4"},
+	{"RX4 MIX1 INP2", "RX5", "I2S RX5"},
+	{"RX5 MIX1 INP1", "RX4", "I2S RX4"},
+	{"RX5 MIX1 INP1", "RX5", "I2S RX5"},
+	{"RX5 MIX1 INP2", "RX4", "I2S RX4"},
+	{"RX5 MIX1 INP2", "RX5", "I2S RX5"},
+
+	{"RX4 MIX1", NULL, "RX4 MIX1 INP1"},
+	{"RX4 MIX1", NULL, "RX4 MIX1 INP2"},
+	{"RX5 MIX1", NULL, "RX5 MIX1 INP1"},
+	{"RX5 MIX1", NULL, "RX5 MIX1 INP2"},
+
+	{"RX INT4 INTERP", NULL, "RX4 MIX1"},
+	{"RX INT4 CHAIN", NULL, "RX INT4 INTERP"},
+	{"SPK1 OUT", NULL, "RX INT4 CHAIN"},
+
+	{"RX INT5 INTERP", NULL, "RX5 MIX1"},
+	{"RX INT5 CHAIN", NULL, "RX INT5 INTERP"},
+	{"SPK2 OUT", NULL, "RX INT5 CHAIN"},
+};
+
+static const struct msm_sdw_reg_mask_val msm_sdw_reg_init[] = {
+	{MSM_SDW_BOOST0_BOOST_CFG1, 0x3F, 0x12},
+	{MSM_SDW_BOOST0_BOOST_CFG2, 0x1C, 0x08},
+	{MSM_SDW_COMPANDER7_CTL7, 0x1E, 0x18},
+	{MSM_SDW_BOOST1_BOOST_CFG1, 0x3F, 0x12},
+	{MSM_SDW_BOOST1_BOOST_CFG2, 0x1C, 0x08},
+	{MSM_SDW_COMPANDER8_CTL7, 0x1E, 0x18},
+	{MSM_SDW_BOOST0_BOOST_CTL, 0x70, 0x50},
+	{MSM_SDW_BOOST1_BOOST_CTL, 0x70, 0x50},
+	{MSM_SDW_RX7_RX_PATH_CFG1, 0x08, 0x08},
+	{MSM_SDW_RX8_RX_PATH_CFG1, 0x08, 0x08},
+	{MSM_SDW_TOP_TOP_CFG1, 0x02, 0x02},
+	{MSM_SDW_TOP_TOP_CFG1, 0x01, 0x01},
+	{MSM_SDW_TX9_SPKR_PROT_PATH_CFG0, 0x01, 0x01},
+	{MSM_SDW_TX10_SPKR_PROT_PATH_CFG0, 0x01, 0x01},
+	{MSM_SDW_TX11_SPKR_PROT_PATH_CFG0, 0x01, 0x01},
+	{MSM_SDW_TX12_SPKR_PROT_PATH_CFG0, 0x01, 0x01},
+	{MSM_SDW_COMPANDER7_CTL3, 0x80, 0x80},
+	{MSM_SDW_COMPANDER8_CTL3, 0x80, 0x80},
+	{MSM_SDW_COMPANDER7_CTL7, 0x01, 0x01},
+	{MSM_SDW_COMPANDER8_CTL7, 0x01, 0x01},
+	{MSM_SDW_RX7_RX_PATH_CFG0, 0x01, 0x01},
+	{MSM_SDW_RX8_RX_PATH_CFG0, 0x01, 0x01},
+	{MSM_SDW_RX7_RX_PATH_MIX_CFG, 0x01, 0x01},
+	{MSM_SDW_RX8_RX_PATH_MIX_CFG, 0x01, 0x01},
+};
+
+static void msm_sdw_init_reg(struct snd_soc_codec *codec)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(msm_sdw_reg_init); i++)
+		snd_soc_update_bits(codec,
+				msm_sdw_reg_init[i].reg,
+				msm_sdw_reg_init[i].mask,
+				msm_sdw_reg_init[i].val);
+}
+
+static int msm_sdw_notifier_service_cb(struct notifier_block *nb,
+				       unsigned long opcode, void *ptr)
+{
+	int i;
+	struct msm_sdw_priv *msm_sdw = container_of(nb,
+						    struct msm_sdw_priv,
+						    service_nb);
+	bool adsp_ready = false;
+	unsigned long timeout;
+
+	pr_debug("%s: Service opcode 0x%lx\n", __func__, opcode);
+
+	mutex_lock(&msm_sdw->codec_mutex);
+	switch (opcode) {
+	case AUDIO_NOTIFIER_SERVICE_DOWN:
+		msm_sdw->dev_up = false;
+		for (i = 0; i < msm_sdw->nr; i++)
+			swrm_wcd_notify(msm_sdw->sdw_ctrl_data[i].sdw_pdev,
+					SWR_DEVICE_DOWN, NULL);
+		break;
+	case AUDIO_NOTIFIER_SERVICE_UP:
+		if (!q6core_is_adsp_ready()) {
+			dev_dbg(msm_sdw->dev, "ADSP isn't ready\n");
+			timeout = jiffies +
+				  msecs_to_jiffies(ADSP_STATE_READY_TIMEOUT_MS);
+			while (!time_after(jiffies, timeout)) {
+				if (!q6core_is_adsp_ready()) {
+					dev_dbg(msm_sdw->dev,
+						"ADSP isn't ready\n");
+				} else {
+					dev_dbg(msm_sdw->dev,
+						"ADSP is ready\n");
+					adsp_ready = true;
+					goto powerup;
+				}
+			}
+		} else {
+			adsp_ready = true;
+			dev_dbg(msm_sdw->dev, "%s: DSP is ready\n", __func__);
+		}
+powerup:
+		if (adsp_ready) {
+			msm_sdw->dev_up = true;
+			msm_sdw_init_reg(msm_sdw->codec);
+			regcache_mark_dirty(msm_sdw->regmap);
+			regcache_sync(msm_sdw->regmap);
+			msm_sdw_set_spkr_mode(msm_sdw->codec,
+					      msm_sdw->spkr_mode);
+		}
+		break;
+	default:
+		break;
+	}
+	mutex_unlock(&msm_sdw->codec_mutex);
+	return NOTIFY_OK;
+}
+
+static int msm_sdw_codec_probe(struct snd_soc_codec *codec)
+{
+	struct msm_sdw_priv *msm_sdw;
+	int i, ret;
+
+	msm_sdw = snd_soc_codec_get_drvdata(codec);
+	if (!msm_sdw) {
+		pr_err("%s:SDW priv data null\n", __func__);
+		return -EINVAL;
+	}
+	msm_sdw->codec = codec;
+	for (i = 0; i < COMP_MAX; i++)
+		msm_sdw->comp_enabled[i] = 0;
+
+	msm_sdw->spkr_gain_offset = RX_GAIN_OFFSET_0_DB;
+	msm_sdw_init_reg(codec);
+	msm_sdw->version = MSM_SDW_VERSION_1_0;
+
+	msm_sdw->service_nb.notifier_call = msm_sdw_notifier_service_cb;
+	ret = audio_notifier_register("msm_sdw",
+				AUDIO_NOTIFIER_ADSP_DOMAIN,
+				&msm_sdw->service_nb);
+	if (ret < 0)
+		dev_err(msm_sdw->dev,
+			"%s: Audio notifier register failed ret = %d\n",
+			__func__, ret);
+	return 0;
+}
+
+static int msm_sdw_codec_remove(struct snd_soc_codec *codec)
+{
+	return 0;
+}
+
+static struct regmap *msm_sdw_get_regmap(struct device *dev)
+{
+	struct msm_sdw_priv *msm_sdw = dev_get_drvdata(dev);
+
+	return msm_sdw->regmap;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_msm_sdw = {
+	.probe = msm_sdw_codec_probe,
+	.remove = msm_sdw_codec_remove,
+	.controls = msm_sdw_snd_controls,
+	.num_controls = ARRAY_SIZE(msm_sdw_snd_controls),
+	.dapm_widgets = msm_sdw_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(msm_sdw_dapm_widgets),
+	.dapm_routes = audio_map,
+	.num_dapm_routes = ARRAY_SIZE(audio_map),
+	.get_regmap = msm_sdw_get_regmap,
+};
+
+static void msm_sdw_add_child_devices(struct work_struct *work)
+{
+	struct msm_sdw_priv *msm_sdw;
+	struct platform_device *pdev;
+	struct device_node *node;
+	struct msm_sdw_ctrl_data *sdw_ctrl_data = NULL, *temp;
+	int ret, ctrl_num = 0;
+	struct wcd_sdw_ctrl_platform_data *platdata;
+	char plat_dev_name[MSM_SDW_STRING_LEN];
+
+	msm_sdw = container_of(work, struct msm_sdw_priv,
+			     msm_sdw_add_child_devices_work);
+	if (!msm_sdw) {
+		pr_err("%s: Memory for msm_sdw does not exist\n",
+			__func__);
+		return;
+	}
+	if (!msm_sdw->dev->of_node) {
+		dev_err(msm_sdw->dev,
+			"%s: DT node for msm_sdw does not exist\n", __func__);
+		return;
+	}
+
+	platdata = &msm_sdw->sdw_plat_data;
+
+	for_each_available_child_of_node(msm_sdw->dev->of_node, node) {
+		if (!strcmp(node->name, "swr_master"))
+			strlcpy(plat_dev_name, "msm_sdw_swr_ctrl",
+				(MSM_SDW_STRING_LEN - 1));
+		else if (strnstr(node->name, "msm_cdc_pinctrl",
+				 strlen("msm_cdc_pinctrl")) != NULL)
+			strlcpy(plat_dev_name, node->name,
+				(MSM_SDW_STRING_LEN - 1));
+		else
+			continue;
+
+		pdev = platform_device_alloc(plat_dev_name, -1);
+		if (!pdev) {
+			dev_err(msm_sdw->dev, "%s: pdev memory alloc failed\n",
+				__func__);
+			ret = -ENOMEM;
+			goto err;
+		}
+		pdev->dev.parent = msm_sdw->dev;
+		pdev->dev.of_node = node;
+
+		if (!strcmp(node->name, "swr_master")) {
+			ret = platform_device_add_data(pdev, platdata,
+						       sizeof(*platdata));
+			if (ret) {
+				dev_err(&pdev->dev,
+					"%s: cannot add plat data ctrl:%d\n",
+					__func__, ctrl_num);
+				goto fail_pdev_add;
+			}
+		}
+
+		ret = platform_device_add(pdev);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"%s: Cannot add platform device\n",
+				__func__);
+			goto fail_pdev_add;
+		}
+
+		if (!strcmp(node->name, "swr_master")) {
+			temp = krealloc(sdw_ctrl_data,
+					(ctrl_num + 1) * sizeof(
+					struct msm_sdw_ctrl_data),
+					GFP_KERNEL);
+			if (!temp) {
+				dev_err(&pdev->dev, "out of memory\n");
+				ret = -ENOMEM;
+				goto err;
+			}
+			sdw_ctrl_data = temp;
+			sdw_ctrl_data[ctrl_num].sdw_pdev = pdev;
+			ctrl_num++;
+			dev_dbg(&pdev->dev,
+				"%s: Added soundwire ctrl device(s)\n",
+				__func__);
+			msm_sdw->nr = ctrl_num;
+			msm_sdw->sdw_ctrl_data = sdw_ctrl_data;
+		}
+	}
+
+	return;
+fail_pdev_add:
+	platform_device_put(pdev);
+err:
+	return;
+}
+
+static int msm_sdw_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct msm_sdw_priv *msm_sdw;
+	int adsp_state;
+
+	adsp_state = apr_get_subsys_state();
+	if (adsp_state != APR_SUBSYS_LOADED) {
+		dev_err(&pdev->dev, "Adsp is not loaded yet %d\n",
+				adsp_state);
+		return -EPROBE_DEFER;
+	}
+
+	msm_sdw = devm_kzalloc(&pdev->dev, sizeof(struct msm_sdw_priv),
+			    GFP_KERNEL);
+	if (!msm_sdw)
+		return -ENOMEM;
+	dev_set_drvdata(&pdev->dev, msm_sdw);
+	msm_sdw->dev_up = true;
+
+	msm_sdw->dev = &pdev->dev;
+	INIT_WORK(&msm_sdw->msm_sdw_add_child_devices_work,
+		  msm_sdw_add_child_devices);
+	msm_sdw->sdw_plat_data.handle = (void *) msm_sdw;
+	msm_sdw->sdw_plat_data.read = msm_sdw_swrm_read;
+	msm_sdw->sdw_plat_data.write = msm_sdw_swrm_write;
+	msm_sdw->sdw_plat_data.bulk_write = msm_sdw_swrm_bulk_write;
+	msm_sdw->sdw_plat_data.clk = msm_sdw_swrm_clock;
+	msm_sdw->sdw_plat_data.handle_irq = msm_sdwm_handle_irq;
+	ret = of_property_read_u32(pdev->dev.of_node, "reg",
+				   &msm_sdw->sdw_base_addr);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: could not find %s entry in dt\n",
+			__func__, "reg");
+		goto err_sdw_cdc;
+	}
+
+	msm_sdw->sdw_gpio_p = of_parse_phandle(pdev->dev.of_node,
+					"qcom,cdc-sdw-gpios", 0);
+	msm_sdw->sdw_base = ioremap(msm_sdw->sdw_base_addr,
+				    MSM_SDW_MAX_REGISTER);
+	msm_sdw->read_dev = __msm_sdw_reg_read;
+	msm_sdw->write_dev = __msm_sdw_reg_write;
+
+	msm_sdw->regmap = msm_sdw_regmap_init(msm_sdw->dev,
+					      &msm_sdw_regmap_config);
+	msm_sdw->sdw_irq = platform_get_irq_byname(pdev, "swr_master_irq");
+	if (msm_sdw->sdw_irq < 0) {
+		dev_err(msm_sdw->dev, "%s() error getting irq handle: %d\n",
+				__func__, msm_sdw->sdw_irq);
+		ret = -ENODEV;
+		goto err_sdw_cdc;
+	}
+	ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_msm_sdw,
+				     msm_sdw_dai, ARRAY_SIZE(msm_sdw_dai));
+	if (ret) {
+		dev_err(&pdev->dev, "%s: Codec registration failed, ret = %d\n",
+			__func__, ret);
+		goto err_sdw_cdc;
+	}
+	/* initialize the int_mclk1 */
+	msm_sdw->sdw_cdc_core_clk.clk_set_minor_version =
+			AFE_API_VERSION_I2S_CONFIG;
+	msm_sdw->sdw_cdc_core_clk.clk_id =
+			Q6AFE_LPASS_CLK_ID_INT_MCLK_1;
+	msm_sdw->sdw_cdc_core_clk.clk_freq_in_hz =
+			INT_MCLK1_FREQ;
+	msm_sdw->sdw_cdc_core_clk.clk_attri =
+			Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO;
+	msm_sdw->sdw_cdc_core_clk.clk_root =
+			Q6AFE_LPASS_CLK_ROOT_DEFAULT;
+	msm_sdw->sdw_cdc_core_clk.enable = 0;
+
+	/* initialize the sdw_npl_clk */
+	msm_sdw->sdw_npl_clk.clk_set_minor_version =
+			AFE_API_VERSION_I2S_CONFIG;
+	msm_sdw->sdw_npl_clk.clk_id =
+			AFE_CLOCK_SET_CLOCK_ID_SWR_NPL_CLK;
+	msm_sdw->sdw_npl_clk.clk_freq_in_hz = SDW_NPL_FREQ;
+	msm_sdw->sdw_npl_clk.clk_attri =
+			Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO;
+	msm_sdw->sdw_npl_clk.clk_root =
+			Q6AFE_LPASS_CLK_ROOT_DEFAULT;
+	msm_sdw->sdw_npl_clk.enable = 0;
+
+	INIT_DELAYED_WORK(&msm_sdw->disable_int_mclk1_work,
+			  msm_disable_int_mclk1);
+	mutex_init(&msm_sdw->cdc_int_mclk1_mutex);
+	mutex_init(&msm_sdw->sdw_npl_clk_mutex);
+	mutex_init(&msm_sdw->io_lock);
+	mutex_init(&msm_sdw->sdw_read_lock);
+	mutex_init(&msm_sdw->sdw_write_lock);
+	mutex_init(&msm_sdw->sdw_clk_lock);
+	mutex_init(&msm_sdw->codec_mutex);
+	schedule_work(&msm_sdw->msm_sdw_add_child_devices_work);
+
+	dev_dbg(&pdev->dev, "%s: msm_sdw driver probe done\n", __func__);
+	return ret;
+
+err_sdw_cdc:
+	devm_kfree(&pdev->dev, msm_sdw);
+	return ret;
+}
+
+static int msm_sdw_remove(struct platform_device *pdev)
+{
+	struct msm_sdw_priv *msm_sdw;
+
+	msm_sdw = dev_get_drvdata(&pdev->dev);
+
+	mutex_destroy(&msm_sdw->io_lock);
+	mutex_destroy(&msm_sdw->sdw_read_lock);
+	mutex_destroy(&msm_sdw->sdw_write_lock);
+	mutex_destroy(&msm_sdw->sdw_clk_lock);
+	mutex_destroy(&msm_sdw->codec_mutex);
+	mutex_destroy(&msm_sdw->cdc_int_mclk1_mutex);
+	devm_kfree(&pdev->dev, msm_sdw);
+	snd_soc_unregister_codec(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id msm_sdw_codec_dt_match[] = {
+	{ .compatible = "qcom,msm-sdw-codec", },
+	{}
+};
+
+static struct platform_driver msm_sdw_codec_driver = {
+	.probe = msm_sdw_probe,
+	.remove = msm_sdw_remove,
+	.driver = {
+		.name = "msm_sdw_codec",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_sdw_codec_dt_match,
+	},
+};
+module_platform_driver(msm_sdw_codec_driver);
+
+MODULE_DESCRIPTION("MSM Soundwire Codec driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_cdc_utils.c b/sound/soc/codecs/msm_sdw/msm_sdw_cdc_utils.c
new file mode 100644
index 0000000..9a5c85b
--- /dev/null
+++ b/sound/soc/codecs/msm_sdw/msm_sdw_cdc_utils.c
@@ -0,0 +1,211 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include "msm_sdw.h"
+
+#define REG_BYTES 2
+#define VAL_BYTES 1
+/*
+ * Page Register Address that APP Proc uses to
+ * access WCD9335 Codec registers is identified
+ * as 0x00
+ */
+#define PAGE_REG_ADDR 0x00
+
+/*
+ * msm_sdw_page_write:
+ * Retrieve page number from register and
+ * write that page number to the page address.
+ * Called under io_lock acquisition.
+ *
+ * @msm_sdw: pointer to msm_sdw
+ * @reg: Register address from which page number is retrieved
+ *
+ * Returns 0 for success and negative error code for failure.
+ */
+int msm_sdw_page_write(struct msm_sdw_priv *msm_sdw, unsigned short reg)
+{
+	int ret = 0;
+	u8 pg_num, prev_pg_num;
+
+	pg_num = msm_sdw_page_map[reg];
+	if (msm_sdw->prev_pg_valid) {
+		prev_pg_num = msm_sdw->prev_pg;
+		if (prev_pg_num != pg_num) {
+			ret = msm_sdw->write_dev(msm_sdw, PAGE_REG_ADDR, 1,
+						 (void *) &pg_num);
+			if (ret < 0) {
+				dev_err(msm_sdw->dev,
+					"page write error, pg_num: 0x%x\n",
+					pg_num);
+			} else {
+				msm_sdw->prev_pg = pg_num;
+				dev_dbg(msm_sdw->dev,
+					"%s: Page 0x%x Write to 0x00\n",
+					__func__, pg_num);
+			}
+		}
+	} else {
+		ret = msm_sdw->write_dev(msm_sdw, PAGE_REG_ADDR, 1,
+					 (void *) &pg_num);
+		if (ret < 0) {
+			dev_err(msm_sdw->dev,
+				"page write error, pg_num: 0x%x\n", pg_num);
+		} else {
+			msm_sdw->prev_pg = pg_num;
+			msm_sdw->prev_pg_valid = true;
+			dev_dbg(msm_sdw->dev, "%s: Page 0x%x Write to 0x00\n",
+				__func__, pg_num);
+		}
+	}
+	return ret;
+}
+EXPORT_SYMBOL(msm_sdw_page_write);
+
+static int regmap_bus_read(void *context, const void *reg, size_t reg_size,
+			   void *val, size_t val_size)
+{
+	struct device *dev = context;
+	struct msm_sdw_priv *msm_sdw = dev_get_drvdata(dev);
+	unsigned short c_reg;
+	int ret, i;
+
+	if (!msm_sdw) {
+		dev_err(dev, "%s: msm_sdw is NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (!reg || !val) {
+		dev_err(dev, "%s: reg or val is NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (reg_size != REG_BYTES) {
+		dev_err(dev, "%s: register size %zd bytes, not supported\n",
+			__func__, reg_size);
+		return -EINVAL;
+	}
+	if (!msm_sdw->dev_up) {
+		dev_dbg_ratelimited(dev, "%s: No read allowed. dev_up = %d\n",
+				    __func__, msm_sdw->dev_up);
+		return 0;
+	}
+
+	mutex_lock(&msm_sdw->io_lock);
+	c_reg = *(u16 *)reg;
+	ret = msm_sdw_page_write(msm_sdw, c_reg);
+	if (ret)
+		goto err;
+	ret = msm_sdw->read_dev(msm_sdw, c_reg, val_size, val);
+	if (ret < 0)
+		dev_err(dev, "%s: Codec read failed (%d), reg: 0x%x, size:%zd\n",
+			__func__, ret, c_reg, val_size);
+	else {
+		for (i = 0; i < val_size; i++)
+			dev_dbg(dev, "%s: Read 0x%02x from 0x%x\n",
+				__func__, ((u8 *)val)[i], c_reg + i);
+	}
+err:
+	mutex_unlock(&msm_sdw->io_lock);
+
+	return ret;
+}
+
+static int regmap_bus_gather_write(void *context,
+				   const void *reg, size_t reg_size,
+				   const void *val, size_t val_size)
+{
+	struct device *dev = context;
+	struct msm_sdw_priv *msm_sdw = dev_get_drvdata(dev);
+	unsigned short c_reg;
+	int ret, i;
+
+	if (!msm_sdw) {
+		dev_err(dev, "%s: msm_sdw is NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (!reg || !val) {
+		dev_err(dev, "%s: reg or val is NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (reg_size != REG_BYTES) {
+		dev_err(dev, "%s: register size %zd bytes, not supported\n",
+			__func__, reg_size);
+		return -EINVAL;
+	}
+	if (!msm_sdw->dev_up) {
+		dev_dbg_ratelimited(dev, "%s: No write allowed. dev_up = %d\n",
+				    __func__, msm_sdw->dev_up);
+		return 0;
+	}
+
+	mutex_lock(&msm_sdw->io_lock);
+	c_reg = *(u16 *)reg;
+	ret = msm_sdw_page_write(msm_sdw, c_reg);
+	if (ret)
+		goto err;
+
+	for (i = 0; i < val_size; i++)
+		dev_dbg(dev, "Write %02x to 0x%x\n", ((u8 *)val)[i],
+			c_reg + i*4);
+
+	ret = msm_sdw->write_dev(msm_sdw, c_reg, val_size, (void *) val);
+	if (ret < 0)
+		dev_err(dev,
+			"%s: Codec write failed (%d), reg:0x%x, size:%zd\n",
+			__func__, ret, c_reg, val_size);
+
+err:
+	mutex_unlock(&msm_sdw->io_lock);
+	return ret;
+}
+
+static int regmap_bus_write(void *context, const void *data, size_t count)
+{
+	struct device *dev = context;
+	struct msm_sdw_priv *msm_sdw = dev_get_drvdata(dev);
+
+	if (!msm_sdw)
+		return -EINVAL;
+
+	WARN_ON(count < REG_BYTES);
+
+	return regmap_bus_gather_write(context, data, REG_BYTES,
+				       data + REG_BYTES,
+				       count - REG_BYTES);
+
+}
+
+static struct regmap_bus regmap_bus_config = {
+	.write = regmap_bus_write,
+	.gather_write = regmap_bus_gather_write,
+	.read = regmap_bus_read,
+	.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+	.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+/*
+ * msm_sdw_regmap_init:
+ * Initialize msm_sdw register map
+ *
+ * @dev: pointer to wcd device
+ * @config: pointer to register map config
+ *
+ * Returns pointer to regmap structure for success
+ * or NULL in case of failure.
+ */
+struct regmap *msm_sdw_regmap_init(struct device *dev,
+				   const struct regmap_config *config)
+{
+	return devm_regmap_init(dev, &regmap_bus_config, dev, config);
+}
+EXPORT_SYMBOL(msm_sdw_regmap_init);
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_registers.h b/sound/soc/codecs/msm_sdw/msm_sdw_registers.h
new file mode 100644
index 0000000..1b7b0b0
--- /dev/null
+++ b/sound/soc/codecs/msm_sdw/msm_sdw_registers.h
@@ -0,0 +1,126 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef MSM_SDW_REGISTERS_H
+#define MSM_SDW_REGISTERS_H
+
+#define MSM_SDW_PAGE_REGISTER                     0x0000
+
+/* Page-A Registers */
+#define MSM_SDW_TX9_SPKR_PROT_PATH_CTL               0x0308
+#define MSM_SDW_TX9_SPKR_PROT_PATH_CFG0              0x030c
+#define MSM_SDW_TX10_SPKR_PROT_PATH_CTL              0x0318
+#define MSM_SDW_TX10_SPKR_PROT_PATH_CFG0             0x031c
+#define MSM_SDW_TX11_SPKR_PROT_PATH_CTL              0x0328
+#define MSM_SDW_TX11_SPKR_PROT_PATH_CFG0             0x032c
+#define MSM_SDW_TX12_SPKR_PROT_PATH_CTL              0x0338
+#define MSM_SDW_TX12_SPKR_PROT_PATH_CFG0             0x033c
+
+/* Page-B Registers */
+#define MSM_SDW_COMPANDER7_CTL0                      0x0024
+#define MSM_SDW_COMPANDER7_CTL1                      0x0028
+#define MSM_SDW_COMPANDER7_CTL2                      0x002c
+#define MSM_SDW_COMPANDER7_CTL3                      0x0030
+#define MSM_SDW_COMPANDER7_CTL4                      0x0034
+#define MSM_SDW_COMPANDER7_CTL5                      0x0038
+#define MSM_SDW_COMPANDER7_CTL6                      0x003c
+#define MSM_SDW_COMPANDER7_CTL7                      0x0040
+#define MSM_SDW_COMPANDER8_CTL0                      0x0044
+#define MSM_SDW_COMPANDER8_CTL1                      0x0048
+#define MSM_SDW_COMPANDER8_CTL2                      0x004c
+#define MSM_SDW_COMPANDER8_CTL3                      0x0050
+#define MSM_SDW_COMPANDER8_CTL4                      0x0054
+#define MSM_SDW_COMPANDER8_CTL5                      0x0058
+#define MSM_SDW_COMPANDER8_CTL6                      0x005c
+#define MSM_SDW_COMPANDER8_CTL7                      0x0060
+#define MSM_SDW_RX7_RX_PATH_CTL                      0x01a4
+#define MSM_SDW_RX7_RX_PATH_CFG0                     0x01a8
+#define MSM_SDW_RX7_RX_PATH_CFG1                     0x01ac
+#define MSM_SDW_RX7_RX_PATH_CFG2                     0x01b0
+#define MSM_SDW_RX7_RX_VOL_CTL                       0x01b4
+#define MSM_SDW_RX7_RX_PATH_MIX_CTL                  0x01b8
+#define MSM_SDW_RX7_RX_PATH_MIX_CFG                  0x01bc
+#define MSM_SDW_RX7_RX_VOL_MIX_CTL                   0x01c0
+#define MSM_SDW_RX7_RX_PATH_SEC0                     0x01c4
+#define MSM_SDW_RX7_RX_PATH_SEC1                     0x01c8
+#define MSM_SDW_RX7_RX_PATH_SEC2                     0x01cc
+#define MSM_SDW_RX7_RX_PATH_SEC3                     0x01d0
+#define MSM_SDW_RX7_RX_PATH_SEC5                     0x01d8
+#define MSM_SDW_RX7_RX_PATH_SEC6                     0x01dc
+#define MSM_SDW_RX7_RX_PATH_SEC7                     0x01e0
+#define MSM_SDW_RX7_RX_PATH_MIX_SEC0                 0x01e4
+#define MSM_SDW_RX7_RX_PATH_MIX_SEC1                 0x01e8
+#define MSM_SDW_RX8_RX_PATH_CTL                      0x0384
+#define MSM_SDW_RX8_RX_PATH_CFG0                     0x0388
+#define MSM_SDW_RX8_RX_PATH_CFG1                     0x038c
+#define MSM_SDW_RX8_RX_PATH_CFG2                     0x0390
+#define MSM_SDW_RX8_RX_VOL_CTL                       0x0394
+#define MSM_SDW_RX8_RX_PATH_MIX_CTL                  0x0398
+#define MSM_SDW_RX8_RX_PATH_MIX_CFG                  0x039c
+#define MSM_SDW_RX8_RX_VOL_MIX_CTL                   0x03a0
+#define MSM_SDW_RX8_RX_PATH_SEC0                     0x03a4
+#define MSM_SDW_RX8_RX_PATH_SEC1                     0x03a8
+#define MSM_SDW_RX8_RX_PATH_SEC2                     0x03ac
+#define MSM_SDW_RX8_RX_PATH_SEC3                     0x03b0
+#define MSM_SDW_RX8_RX_PATH_SEC5                     0x03b8
+#define MSM_SDW_RX8_RX_PATH_SEC6                     0x03bc
+#define MSM_SDW_RX8_RX_PATH_SEC7                     0x03c0
+#define MSM_SDW_RX8_RX_PATH_MIX_SEC0                 0x03c4
+#define MSM_SDW_RX8_RX_PATH_MIX_SEC1                 0x03c8
+
+/* Page-C Registers */
+#define MSM_SDW_BOOST0_BOOST_PATH_CTL                0x0064
+#define MSM_SDW_BOOST0_BOOST_CTL                     0x0068
+#define MSM_SDW_BOOST0_BOOST_CFG1                    0x006c
+#define MSM_SDW_BOOST0_BOOST_CFG2                    0x0070
+#define MSM_SDW_BOOST1_BOOST_PATH_CTL                0x0084
+#define MSM_SDW_BOOST1_BOOST_CTL                     0x0088
+#define MSM_SDW_BOOST1_BOOST_CFG1                    0x008c
+#define MSM_SDW_BOOST1_BOOST_CFG2                    0x0090
+#define MSM_SDW_AHB_BRIDGE_WR_DATA_0                 0x00a4
+#define MSM_SDW_AHB_BRIDGE_WR_DATA_1                 0x00a8
+#define MSM_SDW_AHB_BRIDGE_WR_DATA_2                 0x00ac
+#define MSM_SDW_AHB_BRIDGE_WR_DATA_3                 0x00b0
+#define MSM_SDW_AHB_BRIDGE_WR_ADDR_0                 0x00b4
+#define MSM_SDW_AHB_BRIDGE_WR_ADDR_1                 0x00b8
+#define MSM_SDW_AHB_BRIDGE_WR_ADDR_2                 0x00bc
+#define MSM_SDW_AHB_BRIDGE_WR_ADDR_3                 0x00c0
+#define MSM_SDW_AHB_BRIDGE_RD_ADDR_0                 0x00c4
+#define MSM_SDW_AHB_BRIDGE_RD_ADDR_1                 0x00c8
+#define MSM_SDW_AHB_BRIDGE_RD_ADDR_2                 0x00cc
+#define MSM_SDW_AHB_BRIDGE_RD_ADDR_3                 0x00d0
+#define MSM_SDW_AHB_BRIDGE_RD_DATA_0                 0x00d4
+#define MSM_SDW_AHB_BRIDGE_RD_DATA_1                 0x00d8
+#define MSM_SDW_AHB_BRIDGE_RD_DATA_2                 0x00dc
+#define MSM_SDW_AHB_BRIDGE_RD_DATA_3                 0x00e0
+#define MSM_SDW_AHB_BRIDGE_ACCESS_CFG                0x00e4
+#define MSM_SDW_AHB_BRIDGE_ACCESS_STATUS             0x00e8
+
+/* Page-D Registers */
+#define MSM_SDW_CLK_RST_CTRL_MCLK_CONTROL            0x0104
+#define MSM_SDW_CLK_RST_CTRL_FS_CNT_CONTROL          0x0108
+#define MSM_SDW_CLK_RST_CTRL_SWR_CONTROL             0x010c
+#define MSM_SDW_TOP_TOP_CFG0                         0x0204
+#define MSM_SDW_TOP_TOP_CFG1                         0x0208
+#define MSM_SDW_TOP_RX_I2S_CTL                       0x020c
+#define MSM_SDW_TOP_TX_I2S_CTL                       0x0210
+#define MSM_SDW_TOP_I2S_CLK                          0x0214
+#define MSM_SDW_TOP_RX7_PATH_INPUT0_MUX              0x0218
+#define MSM_SDW_TOP_RX7_PATH_INPUT1_MUX              0x021c
+#define MSM_SDW_TOP_RX8_PATH_INPUT0_MUX              0x0220
+#define MSM_SDW_TOP_RX8_PATH_INPUT1_MUX              0x0224
+#define MSM_SDW_TOP_FREQ_MCLK                        0x0228
+#define MSM_SDW_TOP_DEBUG_BUS_SEL                    0x022c
+#define MSM_SDW_TOP_DEBUG_EN                         0x0230
+#define MSM_SDW_TOP_I2S_RESET                        0x0234
+#define MSM_SDW_TOP_BLOCKS_RESET                     0x0238
+
+#endif
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_regmap.c b/sound/soc/codecs/msm_sdw/msm_sdw_regmap.c
new file mode 100644
index 0000000..78858f0
--- /dev/null
+++ b/sound/soc/codecs/msm_sdw/msm_sdw_regmap.c
@@ -0,0 +1,155 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/regmap.h>
+#include "msm_sdw.h"
+
+static const struct reg_default msm_sdw_defaults[] = {
+	/* Page #10 registers */
+	{ MSM_SDW_PAGE_REGISTER, 0x00 },
+	{ MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x02 },
+	{ MSM_SDW_TX9_SPKR_PROT_PATH_CFG0, 0x00 },
+	{ MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x02 },
+	{ MSM_SDW_TX10_SPKR_PROT_PATH_CFG0, 0x00 },
+	{ MSM_SDW_TX11_SPKR_PROT_PATH_CTL, 0x02 },
+	{ MSM_SDW_TX11_SPKR_PROT_PATH_CFG0, 0x00 },
+	{ MSM_SDW_TX12_SPKR_PROT_PATH_CTL, 0x02 },
+	{ MSM_SDW_TX12_SPKR_PROT_PATH_CFG0, 0x00 },
+	/* Page #11 registers */
+	{ MSM_SDW_COMPANDER7_CTL0, 0x60 },
+	{ MSM_SDW_COMPANDER7_CTL1, 0xdb },
+	{ MSM_SDW_COMPANDER7_CTL2, 0xff },
+	{ MSM_SDW_COMPANDER7_CTL3, 0x35 },
+	{ MSM_SDW_COMPANDER7_CTL4, 0xff },
+	{ MSM_SDW_COMPANDER7_CTL5, 0x00 },
+	{ MSM_SDW_COMPANDER7_CTL6, 0x01 },
+	{ MSM_SDW_COMPANDER8_CTL0, 0x60 },
+	{ MSM_SDW_COMPANDER8_CTL1, 0xdb },
+	{ MSM_SDW_COMPANDER8_CTL2, 0xff },
+	{ MSM_SDW_COMPANDER8_CTL3, 0x35 },
+	{ MSM_SDW_COMPANDER8_CTL4, 0xff },
+	{ MSM_SDW_COMPANDER8_CTL5, 0x00 },
+	{ MSM_SDW_COMPANDER8_CTL6, 0x01 },
+	{ MSM_SDW_RX7_RX_PATH_CTL, 0x04 },
+	{ MSM_SDW_RX7_RX_PATH_CFG0, 0x00 },
+	{ MSM_SDW_RX7_RX_PATH_CFG2, 0x8f },
+	{ MSM_SDW_RX7_RX_VOL_CTL, 0x00 },
+	{ MSM_SDW_RX7_RX_PATH_MIX_CTL, 0x04 },
+	{ MSM_SDW_RX7_RX_VOL_MIX_CTL, 0x00 },
+	{ MSM_SDW_RX7_RX_PATH_SEC2, 0x00 },
+	{ MSM_SDW_RX7_RX_PATH_SEC3, 0x00 },
+	{ MSM_SDW_RX7_RX_PATH_SEC5, 0x00 },
+	{ MSM_SDW_RX7_RX_PATH_SEC6, 0x00 },
+	{ MSM_SDW_RX7_RX_PATH_SEC7, 0x00 },
+	{ MSM_SDW_RX7_RX_PATH_MIX_SEC1, 0x00 },
+	{ MSM_SDW_RX8_RX_PATH_CTL, 0x04 },
+	{ MSM_SDW_RX8_RX_PATH_CFG0, 0x00 },
+	{ MSM_SDW_RX8_RX_PATH_CFG2, 0x8f },
+	{ MSM_SDW_RX8_RX_VOL_CTL, 0x00 },
+	{ MSM_SDW_RX8_RX_PATH_MIX_CTL, 0x04 },
+	{ MSM_SDW_RX8_RX_VOL_MIX_CTL, 0x00 },
+	{ MSM_SDW_RX8_RX_PATH_SEC2, 0x00 },
+	{ MSM_SDW_RX8_RX_PATH_SEC3, 0x00 },
+	{ MSM_SDW_RX8_RX_PATH_SEC5, 0x00 },
+	{ MSM_SDW_RX8_RX_PATH_SEC6, 0x00 },
+	{ MSM_SDW_RX8_RX_PATH_SEC7, 0x00 },
+	{ MSM_SDW_RX8_RX_PATH_MIX_SEC1, 0x00 },
+	/* Page #12 registers */
+	{ MSM_SDW_BOOST0_BOOST_PATH_CTL, 0x00 },
+	{ MSM_SDW_BOOST0_BOOST_CTL, 0xb2 },
+	{ MSM_SDW_BOOST0_BOOST_CFG1, 0x00 },
+	{ MSM_SDW_BOOST0_BOOST_CFG2, 0x00 },
+	{ MSM_SDW_BOOST1_BOOST_PATH_CTL, 0x00 },
+	{ MSM_SDW_BOOST1_BOOST_CTL, 0xb2 },
+	{ MSM_SDW_BOOST1_BOOST_CFG1, 0x00 },
+	{ MSM_SDW_BOOST1_BOOST_CFG2, 0x00 },
+	{ MSM_SDW_AHB_BRIDGE_WR_DATA_0, 0x00 },
+	{ MSM_SDW_AHB_BRIDGE_WR_DATA_1, 0x00 },
+	{ MSM_SDW_AHB_BRIDGE_WR_DATA_2, 0x00 },
+	{ MSM_SDW_AHB_BRIDGE_WR_DATA_3, 0x00 },
+	{ MSM_SDW_AHB_BRIDGE_WR_ADDR_0, 0x00 },
+	{ MSM_SDW_AHB_BRIDGE_WR_ADDR_1, 0x00 },
+	{ MSM_SDW_AHB_BRIDGE_WR_ADDR_2, 0x00 },
+	{ MSM_SDW_AHB_BRIDGE_WR_ADDR_3, 0x00 },
+	{ MSM_SDW_AHB_BRIDGE_RD_ADDR_0, 0x00 },
+	{ MSM_SDW_AHB_BRIDGE_RD_ADDR_1, 0x00 },
+	{ MSM_SDW_AHB_BRIDGE_RD_ADDR_2, 0x00 },
+	{ MSM_SDW_AHB_BRIDGE_RD_ADDR_3, 0x00 },
+	{ MSM_SDW_AHB_BRIDGE_RD_DATA_0, 0x00 },
+	{ MSM_SDW_AHB_BRIDGE_RD_DATA_1, 0x00 },
+	{ MSM_SDW_AHB_BRIDGE_RD_DATA_2, 0x00 },
+	{ MSM_SDW_AHB_BRIDGE_RD_DATA_3, 0x00 },
+	{ MSM_SDW_AHB_BRIDGE_ACCESS_CFG, 0x0f },
+	{ MSM_SDW_AHB_BRIDGE_ACCESS_STATUS, 0x03 },
+	/* Page #13 registers */
+	{ MSM_SDW_CLK_RST_CTRL_MCLK_CONTROL, 0x00 },
+	{ MSM_SDW_CLK_RST_CTRL_FS_CNT_CONTROL, 0x00 },
+	{ MSM_SDW_CLK_RST_CTRL_SWR_CONTROL, 0x00 },
+	{ MSM_SDW_TOP_TOP_CFG0, 0x00 },
+	{ MSM_SDW_TOP_TOP_CFG1, 0x00 },
+	{ MSM_SDW_TOP_RX_I2S_CTL, 0x0C },
+	{ MSM_SDW_TOP_TX_I2S_CTL, 0x00 },
+	{ MSM_SDW_TOP_I2S_CLK, 0x00 },
+	{ MSM_SDW_TOP_RX7_PATH_INPUT0_MUX, 0x00 },
+	{ MSM_SDW_TOP_RX7_PATH_INPUT1_MUX, 0x00 },
+	{ MSM_SDW_TOP_RX8_PATH_INPUT0_MUX, 0x00 },
+	{ MSM_SDW_TOP_RX8_PATH_INPUT1_MUX, 0x00 },
+	{ MSM_SDW_TOP_FREQ_MCLK, 0x00 },
+	{ MSM_SDW_TOP_DEBUG_BUS_SEL, 0x00 },
+	{ MSM_SDW_TOP_DEBUG_EN, 0x00 },
+	{ MSM_SDW_TOP_I2S_RESET, 0x00 },
+	{ MSM_SDW_TOP_BLOCKS_RESET, 0x00 },
+};
+
+static bool msm_sdw_is_readable_register(struct device *dev, unsigned int reg)
+{
+	return msm_sdw_reg_readable[reg];
+}
+
+static bool msm_sdw_is_volatile_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case MSM_SDW_AHB_BRIDGE_WR_DATA_0:
+	case MSM_SDW_AHB_BRIDGE_WR_DATA_1:
+	case MSM_SDW_AHB_BRIDGE_WR_DATA_2:
+	case MSM_SDW_AHB_BRIDGE_WR_DATA_3:
+	case MSM_SDW_AHB_BRIDGE_WR_ADDR_0:
+	case MSM_SDW_AHB_BRIDGE_WR_ADDR_1:
+	case MSM_SDW_AHB_BRIDGE_WR_ADDR_2:
+	case MSM_SDW_AHB_BRIDGE_WR_ADDR_3:
+	case MSM_SDW_AHB_BRIDGE_RD_DATA_0:
+	case MSM_SDW_AHB_BRIDGE_RD_DATA_1:
+	case MSM_SDW_AHB_BRIDGE_RD_DATA_2:
+	case MSM_SDW_AHB_BRIDGE_RD_DATA_3:
+	case MSM_SDW_AHB_BRIDGE_RD_ADDR_0:
+	case MSM_SDW_AHB_BRIDGE_RD_ADDR_1:
+	case MSM_SDW_AHB_BRIDGE_RD_ADDR_2:
+	case MSM_SDW_AHB_BRIDGE_RD_ADDR_3:
+	case MSM_SDW_CLK_RST_CTRL_MCLK_CONTROL:
+	case MSM_SDW_CLK_RST_CTRL_FS_CNT_CONTROL:
+		return true;
+	default:
+		return false;
+	}
+}
+
+const struct regmap_config msm_sdw_regmap_config = {
+	.reg_bits = 16,
+	.val_bits = 8,
+	.reg_stride = 4,
+	.cache_type = REGCACHE_RBTREE,
+	.reg_defaults = msm_sdw_defaults,
+	.num_reg_defaults = ARRAY_SIZE(msm_sdw_defaults),
+	.max_register = MSM_SDW_MAX_REGISTER,
+	.volatile_reg = msm_sdw_is_volatile_register,
+	.readable_reg = msm_sdw_is_readable_register,
+};
diff --git a/sound/soc/codecs/msm8x16/Kconfig b/sound/soc/codecs/sdm660_cdc/Kconfig
similarity index 61%
rename from sound/soc/codecs/msm8x16/Kconfig
rename to sound/soc/codecs/sdm660_cdc/Kconfig
index d225b7a..d370da3 100644
--- a/sound/soc/codecs/msm8x16/Kconfig
+++ b/sound/soc/codecs/sdm660_cdc/Kconfig
@@ -1,3 +1,3 @@
 
-config SND_SOC_MSM8X16_WCD
+config SND_SOC_SDM660_CDC
 	tristate "MSM Internal PMIC based codec"
diff --git a/sound/soc/codecs/sdm660_cdc/Makefile b/sound/soc/codecs/sdm660_cdc/Makefile
new file mode 100644
index 0000000..d846fae
--- /dev/null
+++ b/sound/soc/codecs/sdm660_cdc/Makefile
@@ -0,0 +1,2 @@
+snd-soc-sdm660-cdc-objs := msm-analog-cdc.o msm-digital-cdc.o sdm660-regmap.o
+obj-$(CONFIG_SND_SOC_SDM660_CDC) += snd-soc-sdm660-cdc.o sdm660-cdc-irq.o
diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
new file mode 100644
index 0000000..5f8e3fd
--- /dev/null
+++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
@@ -0,0 +1,4607 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/qdsp6v2/apr.h>
+#include <linux/workqueue.h>
+#include <linux/regmap.h>
+#include <linux/qdsp6v2/audio_notifier.h>
+#include <sound/q6afe-v2.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+#include <sound/q6core.h>
+#include "msm-analog-cdc.h"
+#include "sdm660-cdc-irq.h"
+#include "sdm660-cdc-registers.h"
+#include "msm-cdc-common.h"
+#include "../../msm/sdm660-common.h"
+#include "../wcd-mbhc-v2-api.h"
+
+#define DRV_NAME "pmic_analog_codec"
+#define SDM660_CDC_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+			SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |\
+			SNDRV_PCM_RATE_48000)
+#define SDM660_CDC_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+		SNDRV_PCM_FMTBIT_S24_LE)
+#define MSM_DIG_CDC_STRING_LEN 80
+#define MSM_ANLG_CDC_VERSION_ENTRY_SIZE 32
+
+#define CODEC_DT_MAX_PROP_SIZE			40
+#define MAX_ON_DEMAND_SUPPLY_NAME_LENGTH	64
+#define BUS_DOWN 1
+
+/*
+ * 50 Milliseconds sufficient for DSP bring up in the lpass
+ * after Sub System Restart
+ */
+#define ADSP_STATE_READY_TIMEOUT_MS 50
+
+#define EAR_PMD 0
+#define EAR_PMU 1
+#define SPK_PMD 2
+#define SPK_PMU 3
+
+#define MICBIAS_DEFAULT_VAL 1800000
+#define MICBIAS_MIN_VAL 1600000
+#define MICBIAS_STEP_SIZE 50000
+
+#define DEFAULT_BOOST_VOLTAGE 5000
+#define MIN_BOOST_VOLTAGE 4000
+#define MAX_BOOST_VOLTAGE 5550
+#define BOOST_VOLTAGE_STEP 50
+
+#define SDM660_CDC_MBHC_BTN_COARSE_ADJ  100 /* in mV */
+#define SDM660_CDC_MBHC_BTN_FINE_ADJ 12 /* in mV */
+
+#define VOLTAGE_CONVERTER(value, min_value, step_size)\
+	((value - min_value)/step_size)
+
+enum {
+	BOOST_SWITCH = 0,
+	BOOST_ALWAYS,
+	BYPASS_ALWAYS,
+	BOOST_ON_FOREVER,
+};
+
+static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
+static struct snd_soc_dai_driver msm_anlg_cdc_i2s_dai[];
+/* By default enable the internal speaker boost */
+static bool spkr_boost_en = true;
+
+static char on_demand_supply_name[][MAX_ON_DEMAND_SUPPLY_NAME_LENGTH] = {
+	"cdc-vdd-mic-bias",
+};
+
+static struct wcd_mbhc_register
+	wcd_mbhc_registers[WCD_MBHC_REG_FUNC_MAX] = {
+	WCD_MBHC_REGISTER("WCD_MBHC_L_DET_EN",
+			  MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1, 0x80, 7, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_GND_DET_EN",
+			  MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1, 0x40, 6, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_MECH_DETECTION_TYPE",
+			  MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1, 0x20, 5, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_MIC_CLAMP_CTL",
+			  MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1, 0x18, 3, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_ELECT_DETECTION_TYPE",
+			  MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0x01, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HS_L_DET_PULL_UP_CTRL",
+			  MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0xC0, 6, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HS_L_DET_PULL_UP_COMP_CTRL",
+			  MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0x20, 5, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPHL_PLUG_TYPE",
+			  MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0x10, 4, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_GND_PLUG_TYPE",
+			  MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0x08, 3, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_SW_HPH_LP_100K_TO_GND",
+			  MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0x01, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_ELECT_SCHMT_ISRC",
+			  MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, 0x06, 1, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_FSM_EN",
+			  MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL, 0x80, 7, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_INSREM_DBNC",
+			  MSM89XX_PMIC_ANALOG_MBHC_DBNC_TIMER, 0xF0, 4, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_BTN_DBNC",
+			  MSM89XX_PMIC_ANALOG_MBHC_DBNC_TIMER, 0x0C, 2, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HS_VREF",
+			  MSM89XX_PMIC_ANALOG_MBHC_BTN3_CTL, 0x03, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HS_COMP_RESULT",
+			  MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT, 0x01,
+			  0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_MIC_SCHMT_RESULT",
+			  MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT, 0x02,
+			  1, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPHL_SCHMT_RESULT",
+			  MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT, 0x08,
+			  3, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPHR_SCHMT_RESULT",
+			  MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT, 0x04,
+			  2, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_OCP_FSM_EN",
+			  MSM89XX_PMIC_ANALOG_RX_COM_OCP_CTL, 0x10, 4, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_BTN_RESULT",
+			  MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT, 0xFF, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_BTN_ISRC_CTL",
+			  MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL, 0x70, 4, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_ELECT_RESULT",
+			  MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT, 0xFF,
+			  0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_MICB_CTRL",
+			  MSM89XX_PMIC_ANALOG_MICB_2_EN, 0xC0, 6, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPH_CNP_WG_TIME",
+			  MSM89XX_PMIC_ANALOG_RX_HPH_CNP_WG_TIME, 0xFC, 2, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPHR_PA_EN",
+			  MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN, 0x10, 4, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPHL_PA_EN",
+			  MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN, 0x20, 5, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_HPH_PA_EN",
+			  MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN, 0x30, 4, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_SWCH_LEVEL_REMOVE",
+			  MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT,
+			  0x10, 4, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_PULLDOWN_CTRL",
+			  MSM89XX_PMIC_ANALOG_MICB_2_EN, 0x20, 5, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_ANC_DET_EN", 0, 0, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_FSM_STATUS", 0, 0, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_MUX_CTL", 0, 0, 0, 0),
+};
+
+/* Multiply gain_adj and offset by 1000 and 100 to avoid float arithmetic */
+static const struct wcd_imped_i_ref imped_i_ref[] = {
+	{I_h4_UA, 8, 800, 9000, 10000},
+	{I_pt5_UA, 10, 100, 990, 4600},
+	{I_14_UA, 17, 14, 1050, 700},
+	{I_l4_UA, 10, 4, 1165, 110},
+	{I_1_UA, 0, 1, 1200, 65},
+};
+
+static const struct wcd_mbhc_intr intr_ids = {
+	.mbhc_sw_intr =  MSM89XX_IRQ_MBHC_HS_DET,
+	.mbhc_btn_press_intr = MSM89XX_IRQ_MBHC_PRESS,
+	.mbhc_btn_release_intr = MSM89XX_IRQ_MBHC_RELEASE,
+	.mbhc_hs_ins_intr = MSM89XX_IRQ_MBHC_INSREM_DET1,
+	.mbhc_hs_rem_intr = MSM89XX_IRQ_MBHC_INSREM_DET,
+	.hph_left_ocp = MSM89XX_IRQ_HPHL_OCP,
+	.hph_right_ocp = MSM89XX_IRQ_HPHR_OCP,
+};
+
+static int msm_anlg_cdc_dt_parse_vreg_info(struct device *dev,
+					   struct sdm660_cdc_regulator *vreg,
+					   const char *vreg_name,
+					   bool ondemand);
+static struct sdm660_cdc_pdata *msm_anlg_cdc_populate_dt_pdata(
+						struct device *dev);
+static int msm_anlg_cdc_enable_ext_mb_source(struct wcd_mbhc *wcd_mbhc,
+					     bool turn_on);
+static void msm_anlg_cdc_trim_btn_reg(struct snd_soc_codec *codec);
+static void msm_anlg_cdc_set_micb_v(struct snd_soc_codec *codec);
+static void msm_anlg_cdc_set_boost_v(struct snd_soc_codec *codec);
+static void msm_anlg_cdc_set_auto_zeroing(struct snd_soc_codec *codec,
+					  bool enable);
+static void msm_anlg_cdc_configure_cap(struct snd_soc_codec *codec,
+				       bool micbias1, bool micbias2);
+static bool msm_anlg_cdc_use_mb(struct snd_soc_codec *codec);
+
+static int get_codec_version(struct sdm660_cdc_priv *sdm660_cdc)
+{
+	if (sdm660_cdc->codec_version == DRAX_CDC)
+		return DRAX_CDC;
+	else if (sdm660_cdc->codec_version == DIANGU)
+		return DIANGU;
+	else if (sdm660_cdc->codec_version == CAJON_2_0)
+		return CAJON_2_0;
+	else if (sdm660_cdc->codec_version == CAJON)
+		return CAJON;
+	else if (sdm660_cdc->codec_version == CONGA)
+		return CONGA;
+	else if (sdm660_cdc->pmic_rev == TOMBAK_2_0)
+		return TOMBAK_2_0;
+	else if (sdm660_cdc->pmic_rev == TOMBAK_1_0)
+		return TOMBAK_1_0;
+
+	pr_err("%s: unsupported codec version\n", __func__);
+	return UNSUPPORTED;
+}
+
+static void wcd_mbhc_meas_imped(struct snd_soc_codec *codec,
+				s16 *impedance_l, s16 *impedance_r)
+{
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	if ((sdm660_cdc->imped_det_pin == WCD_MBHC_DET_BOTH) ||
+	    (sdm660_cdc->imped_det_pin == WCD_MBHC_DET_HPHL)) {
+		/* Enable ZDET_L_MEAS_EN */
+		snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+				0x08, 0x08);
+		/* Wait for 2ms for measurement to complete */
+		usleep_range(2000, 2100);
+		/* Read Left impedance value from Result1 */
+		*impedance_l = snd_soc_read(codec,
+				MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT);
+		/* Enable ZDET_R_MEAS_EN */
+		snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+				0x08, 0x00);
+	}
+	if ((sdm660_cdc->imped_det_pin == WCD_MBHC_DET_BOTH) ||
+	    (sdm660_cdc->imped_det_pin == WCD_MBHC_DET_HPHR)) {
+		snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+				0x04, 0x04);
+		/* Wait for 2ms for measurement to complete */
+		usleep_range(2000, 2100);
+		/* Read Right impedance value from Result1 */
+		*impedance_r = snd_soc_read(codec,
+				MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT);
+		snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+				0x04, 0x00);
+	}
+}
+
+static void msm_anlg_cdc_set_ref_current(struct snd_soc_codec *codec,
+					 enum wcd_curr_ref curr_ref)
+{
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s: curr_ref: %d\n", __func__, curr_ref);
+
+	if (get_codec_version(sdm660_cdc) < CAJON)
+		dev_dbg(codec->dev, "%s: Setting ref current not required\n",
+			__func__);
+
+	sdm660_cdc->imped_i_ref = imped_i_ref[curr_ref];
+
+	switch (curr_ref) {
+	case I_h4_UA:
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MICB_2_EN,
+			0x07, 0x01);
+		break;
+	case I_pt5_UA:
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MICB_2_EN,
+			0x07, 0x04);
+		break;
+	case I_14_UA:
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MICB_2_EN,
+			0x07, 0x03);
+		break;
+	case I_l4_UA:
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MICB_2_EN,
+			0x07, 0x01);
+		break;
+	case I_1_UA:
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MICB_2_EN,
+			0x07, 0x00);
+		break;
+	default:
+		pr_debug("%s: No ref current set\n", __func__);
+		break;
+	}
+}
+
+static bool msm_anlg_cdc_adj_ref_current(struct snd_soc_codec *codec,
+					 s16 *impedance_l, s16 *impedance_r)
+{
+	int i = 2;
+	s16 compare_imp = 0;
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	if (sdm660_cdc->imped_det_pin == WCD_MBHC_DET_HPHR)
+		compare_imp = *impedance_r;
+	else
+		compare_imp = *impedance_l;
+
+	if (get_codec_version(sdm660_cdc) < CAJON) {
+		dev_dbg(codec->dev,
+			"%s: Reference current adjustment not required\n",
+			 __func__);
+		return false;
+	}
+
+	while (compare_imp < imped_i_ref[i].min_val) {
+		msm_anlg_cdc_set_ref_current(codec, imped_i_ref[++i].curr_ref);
+		wcd_mbhc_meas_imped(codec, impedance_l, impedance_r);
+		compare_imp = (sdm660_cdc->imped_det_pin ==
+			       WCD_MBHC_DET_HPHR) ? *impedance_r : *impedance_l;
+		if (i >= I_1_UA)
+			break;
+	}
+	return true;
+}
+
+void msm_anlg_cdc_spk_ext_pa_cb(
+		int (*codec_spk_ext_pa)(struct snd_soc_codec *codec,
+			int enable), struct snd_soc_codec *codec)
+{
+	struct sdm660_cdc_priv *sdm660_cdc;
+
+	if (!codec) {
+		pr_err("%s: NULL codec pointer!\n", __func__);
+		return;
+	}
+
+	sdm660_cdc = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s: Enter\n", __func__);
+	sdm660_cdc->codec_spk_ext_pa_cb = codec_spk_ext_pa;
+}
+
+static void msm_anlg_cdc_compute_impedance(struct snd_soc_codec *codec, s16 l,
+					   s16 r, uint32_t *zl, uint32_t *zr,
+					   bool high)
+{
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+	uint32_t rl = 0, rr = 0;
+	struct wcd_imped_i_ref R = sdm660_cdc->imped_i_ref;
+	int codec_ver = get_codec_version(sdm660_cdc);
+
+	switch (codec_ver) {
+	case TOMBAK_1_0:
+	case TOMBAK_2_0:
+	case CONGA:
+		if (high) {
+			dev_dbg(codec->dev,
+				"%s: This plug has high range impedance\n",
+				 __func__);
+			rl = (uint32_t)(((100 * (l * 400 - 200))/96) - 230);
+			rr = (uint32_t)(((100 * (r * 400 - 200))/96) - 230);
+		} else {
+			dev_dbg(codec->dev,
+				"%s: This plug has low range impedance\n",
+				 __func__);
+			rl = (uint32_t)(((1000 * (l * 2 - 1))/1165) - (13/10));
+			rr = (uint32_t)(((1000 * (r * 2 - 1))/1165) - (13/10));
+		}
+		break;
+	case CAJON:
+	case CAJON_2_0:
+	case DIANGU:
+	case DRAX_CDC:
+		if (sdm660_cdc->imped_det_pin == WCD_MBHC_DET_HPHL) {
+			rr = (uint32_t)(((DEFAULT_MULTIPLIER * (10 * r - 5)) -
+			   (DEFAULT_OFFSET * DEFAULT_GAIN))/DEFAULT_GAIN);
+			rl = (uint32_t)(((10000 * (R.multiplier * (10 * l - 5)))
+			      - R.offset * R.gain_adj)/(R.gain_adj * 100));
+		} else if (sdm660_cdc->imped_det_pin == WCD_MBHC_DET_HPHR) {
+			rr = (uint32_t)(((10000 * (R.multiplier * (10 * r - 5)))
+			      - R.offset * R.gain_adj)/(R.gain_adj * 100));
+			rl = (uint32_t)(((DEFAULT_MULTIPLIER * (10 * l - 5))-
+			   (DEFAULT_OFFSET * DEFAULT_GAIN))/DEFAULT_GAIN);
+		} else if (sdm660_cdc->imped_det_pin == WCD_MBHC_DET_NONE) {
+			rr = (uint32_t)(((DEFAULT_MULTIPLIER * (10 * r - 5)) -
+			   (DEFAULT_OFFSET * DEFAULT_GAIN))/DEFAULT_GAIN);
+			rl = (uint32_t)(((DEFAULT_MULTIPLIER * (10 * l - 5))-
+			   (DEFAULT_OFFSET * DEFAULT_GAIN))/DEFAULT_GAIN);
+		} else {
+			rr = (uint32_t)(((10000 * (R.multiplier * (10 * r - 5)))
+			      - R.offset * R.gain_adj)/(R.gain_adj * 100));
+			rl = (uint32_t)(((10000 * (R.multiplier * (10 * l - 5)))
+			      - R.offset * R.gain_adj)/(R.gain_adj * 100));
+		}
+		break;
+	default:
+		dev_dbg(codec->dev, "%s: No codec mentioned\n", __func__);
+		break;
+	}
+	*zl = rl;
+	*zr = rr;
+}
+
+static struct firmware_cal *msm_anlg_cdc_get_hwdep_fw_cal(
+		struct wcd_mbhc *wcd_mbhc,
+		enum wcd_cal_type type)
+{
+	struct sdm660_cdc_priv *sdm660_cdc;
+	struct firmware_cal *hwdep_cal;
+	struct snd_soc_codec *codec = wcd_mbhc->codec;
+
+	if (!codec) {
+		pr_err("%s: NULL codec pointer\n", __func__);
+		return NULL;
+	}
+	sdm660_cdc = snd_soc_codec_get_drvdata(codec);
+	hwdep_cal = wcdcal_get_fw_cal(sdm660_cdc->fw_data, type);
+	if (!hwdep_cal) {
+		dev_err(codec->dev, "%s: cal not sent by %d\n",
+				__func__, type);
+		return NULL;
+	}
+	return hwdep_cal;
+}
+
+static void wcd9xxx_spmi_irq_control(struct snd_soc_codec *codec,
+				     int irq, bool enable)
+{
+	if (enable)
+		wcd9xxx_spmi_enable_irq(irq);
+	else
+		wcd9xxx_spmi_disable_irq(irq);
+}
+
+static void msm_anlg_cdc_mbhc_clk_setup(struct snd_soc_codec *codec,
+					bool enable)
+{
+	if (enable)
+		snd_soc_update_bits(codec,
+				MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+				0x08, 0x08);
+	else
+		snd_soc_update_bits(codec,
+				MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+				0x08, 0x00);
+}
+
+static int msm_anlg_cdc_mbhc_map_btn_code_to_num(struct snd_soc_codec *codec)
+{
+	int btn_code;
+	int btn;
+
+	btn_code = snd_soc_read(codec, MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT);
+
+	switch (btn_code) {
+	case 0:
+		btn = 0;
+		break;
+	case 1:
+		btn = 1;
+		break;
+	case 3:
+		btn = 2;
+		break;
+	case 7:
+		btn = 3;
+		break;
+	case 15:
+		btn = 4;
+		break;
+	default:
+		btn = -EINVAL;
+		break;
+	};
+
+	return btn;
+}
+
+static bool msm_anlg_cdc_spmi_lock_sleep(struct wcd_mbhc *mbhc, bool lock)
+{
+	if (lock)
+		return wcd9xxx_spmi_lock_sleep();
+	wcd9xxx_spmi_unlock_sleep();
+	return 0;
+}
+
+static bool msm_anlg_cdc_micb_en_status(struct wcd_mbhc *mbhc, int micb_num)
+{
+	if (micb_num == MIC_BIAS_1)
+		return (snd_soc_read(mbhc->codec,
+				     MSM89XX_PMIC_ANALOG_MICB_1_EN) &
+			0x80);
+	if (micb_num == MIC_BIAS_2)
+		return (snd_soc_read(mbhc->codec,
+				     MSM89XX_PMIC_ANALOG_MICB_2_EN) &
+			0x80);
+	return false;
+}
+
+static void msm_anlg_cdc_enable_master_bias(struct snd_soc_codec *codec,
+					    bool enable)
+{
+	if (enable)
+		snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL,
+				    0x30, 0x30);
+	else
+		snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL,
+				    0x30, 0x00);
+}
+
+static void msm_anlg_cdc_mbhc_common_micb_ctrl(struct snd_soc_codec *codec,
+					       int event, bool enable)
+{
+	u16 reg;
+	u8 mask;
+	u8 val;
+
+	switch (event) {
+	case MBHC_COMMON_MICB_PRECHARGE:
+		reg = MSM89XX_PMIC_ANALOG_MICB_1_CTL;
+		mask = 0x60;
+		val = (enable ? 0x60 : 0x00);
+		break;
+	case MBHC_COMMON_MICB_SET_VAL:
+		reg = MSM89XX_PMIC_ANALOG_MICB_1_VAL;
+		mask = 0xFF;
+		val = (enable ? 0xC0 : 0x00);
+		break;
+	case MBHC_COMMON_MICB_TAIL_CURR:
+		reg = MSM89XX_PMIC_ANALOG_MICB_1_EN;
+		mask = 0x04;
+		val = (enable ? 0x04 : 0x00);
+		break;
+	default:
+		dev_err(codec->dev,
+			"%s: Invalid event received\n", __func__);
+		return;
+	};
+	snd_soc_update_bits(codec, reg, mask, val);
+}
+
+static void msm_anlg_cdc_mbhc_internal_micbias_ctrl(struct snd_soc_codec *codec,
+						    int micbias_num,
+						    bool enable)
+{
+	if (micbias_num == 1) {
+		if (enable)
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_MICB_1_INT_RBIAS,
+				0x10, 0x10);
+		else
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_MICB_1_INT_RBIAS,
+				0x10, 0x00);
+	}
+}
+
+static bool msm_anlg_cdc_mbhc_hph_pa_on_status(struct snd_soc_codec *codec)
+{
+	return (snd_soc_read(codec, MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN) &
+		0x30) ? true : false;
+}
+
+static void msm_anlg_cdc_mbhc_program_btn_thr(struct snd_soc_codec *codec,
+					      s16 *btn_low, s16 *btn_high,
+					      int num_btn, bool is_micbias)
+{
+	int i;
+	u32 course, fine, reg_val;
+	u16 reg_addr = MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL;
+	s16 *btn_voltage;
+
+	btn_voltage = ((is_micbias) ? btn_high : btn_low);
+
+	for (i = 0; i <  num_btn; i++) {
+		course = (btn_voltage[i] / SDM660_CDC_MBHC_BTN_COARSE_ADJ);
+		fine = ((btn_voltage[i] % SDM660_CDC_MBHC_BTN_COARSE_ADJ) /
+				SDM660_CDC_MBHC_BTN_FINE_ADJ);
+
+		reg_val = (course << 5) | (fine << 2);
+		snd_soc_update_bits(codec, reg_addr, 0xFC, reg_val);
+		dev_dbg(codec->dev,
+			"%s: course: %d fine: %d reg_addr: %x reg_val: %x\n",
+			  __func__, course, fine, reg_addr, reg_val);
+		reg_addr++;
+	}
+}
+
+static void msm_anlg_cdc_mbhc_calc_impedance(struct wcd_mbhc *mbhc,
+					     uint32_t *zl, uint32_t *zr)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+	s16 impedance_l, impedance_r;
+	s16 impedance_l_fixed;
+	s16 reg0, reg1, reg2, reg3, reg4;
+	bool high = false;
+	bool min_range_used =  false;
+
+	WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
+	reg0 = snd_soc_read(codec, MSM89XX_PMIC_ANALOG_MBHC_DBNC_TIMER);
+	reg1 = snd_soc_read(codec, MSM89XX_PMIC_ANALOG_MBHC_BTN2_ZDETH_CTL);
+	reg2 = snd_soc_read(codec, MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2);
+	reg3 = snd_soc_read(codec, MSM89XX_PMIC_ANALOG_MICB_2_EN);
+	reg4 = snd_soc_read(codec, MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL);
+
+	sdm660_cdc->imped_det_pin = WCD_MBHC_DET_BOTH;
+	mbhc->hph_type = WCD_MBHC_HPH_NONE;
+
+	/* disable FSM and micbias and enable pullup*/
+	snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+			0x80, 0x00);
+	snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MICB_2_EN,
+			0xA5, 0x25);
+	/*
+	 * Enable legacy electrical detection current sources
+	 * and disable fast ramp and enable manual switching
+	 * of extra capacitance
+	 */
+	dev_dbg(codec->dev, "%s: Setup for impedance det\n", __func__);
+
+	msm_anlg_cdc_set_ref_current(codec, I_h4_UA);
+
+	snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2,
+			0x06, 0x02);
+	snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MBHC_DBNC_TIMER,
+			0x02, 0x02);
+	snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MBHC_BTN2_ZDETH_CTL,
+			0x02, 0x00);
+
+	dev_dbg(codec->dev, "%s: Start performing impedance detection\n",
+		 __func__);
+
+	wcd_mbhc_meas_imped(codec, &impedance_l, &impedance_r);
+
+	if (impedance_l > 2 || impedance_r > 2) {
+		high = true;
+		if (!mbhc->mbhc_cfg->mono_stero_detection) {
+			/* Set ZDET_CHG to 0  to discharge ramp */
+			snd_soc_update_bits(codec,
+					MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+					0x02, 0x00);
+			/* wait 40ms for the discharge ramp to complete */
+			usleep_range(40000, 40100);
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
+				0x03, 0x00);
+			sdm660_cdc->imped_det_pin = (impedance_l > 2 &&
+						      impedance_r > 2) ?
+						      WCD_MBHC_DET_NONE :
+						      ((impedance_l > 2) ?
+						      WCD_MBHC_DET_HPHR :
+						      WCD_MBHC_DET_HPHL);
+			if (sdm660_cdc->imped_det_pin == WCD_MBHC_DET_NONE)
+				goto exit;
+		} else {
+			if (get_codec_version(sdm660_cdc) >= CAJON) {
+				if (impedance_l == 63 && impedance_r == 63) {
+					dev_dbg(codec->dev,
+						"%s: HPHL and HPHR are floating\n",
+						 __func__);
+					sdm660_cdc->imped_det_pin =
+							WCD_MBHC_DET_NONE;
+					mbhc->hph_type = WCD_MBHC_HPH_NONE;
+				} else if (impedance_l == 63
+					   && impedance_r < 63) {
+					dev_dbg(codec->dev,
+						"%s: Mono HS with HPHL floating\n",
+						 __func__);
+					sdm660_cdc->imped_det_pin =
+							WCD_MBHC_DET_HPHR;
+					mbhc->hph_type = WCD_MBHC_HPH_MONO;
+				} else if (impedance_r == 63 &&
+					   impedance_l < 63) {
+					dev_dbg(codec->dev,
+						"%s: Mono HS with HPHR floating\n",
+						 __func__);
+					sdm660_cdc->imped_det_pin =
+							WCD_MBHC_DET_HPHL;
+					mbhc->hph_type = WCD_MBHC_HPH_MONO;
+				} else if (impedance_l > 3 && impedance_r > 3 &&
+					(impedance_l == impedance_r)) {
+					snd_soc_update_bits(codec,
+					MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2,
+					0x06, 0x06);
+					wcd_mbhc_meas_imped(codec, &impedance_l,
+							    &impedance_r);
+					if (impedance_r == impedance_l)
+						dev_dbg(codec->dev,
+							"%s: Mono Headset\n",
+							__func__);
+						sdm660_cdc->imped_det_pin =
+							WCD_MBHC_DET_NONE;
+						mbhc->hph_type =
+							WCD_MBHC_HPH_MONO;
+				} else {
+					dev_dbg(codec->dev,
+						"%s: STEREO headset is found\n",
+						 __func__);
+					sdm660_cdc->imped_det_pin =
+							WCD_MBHC_DET_BOTH;
+					mbhc->hph_type = WCD_MBHC_HPH_STEREO;
+				}
+			}
+		}
+	}
+
+	msm_anlg_cdc_set_ref_current(codec, I_pt5_UA);
+	msm_anlg_cdc_set_ref_current(codec, I_14_UA);
+
+	/* Enable RAMP_L , RAMP_R & ZDET_CHG*/
+	snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
+			0x03, 0x03);
+	snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+			0x02, 0x02);
+	/* wait for 50msec for the HW to apply ramp on HPHL and HPHR */
+	usleep_range(50000, 50100);
+	/* Enable ZDET_DISCHG_CAP_CTL  to add extra capacitance */
+	snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+			0x01, 0x01);
+	/* wait for 5msec for the voltage to get stable */
+	usleep_range(5000, 5100);
+
+	wcd_mbhc_meas_imped(codec, &impedance_l, &impedance_r);
+
+	min_range_used = msm_anlg_cdc_adj_ref_current(codec,
+						&impedance_l, &impedance_r);
+	if (!mbhc->mbhc_cfg->mono_stero_detection) {
+		/* Set ZDET_CHG to 0  to discharge ramp */
+		snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+				0x02, 0x00);
+		/* wait for 40msec for the capacitor to discharge */
+		usleep_range(40000, 40100);
+		snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
+				0x03, 0x00);
+		goto exit;
+	}
+
+	/* we are setting ref current to the minimun range or the measured
+	 * value larger than the minimum value, so min_range_used is true.
+	 * If the headset is mono headset with either HPHL or HPHR floating
+	 * then we have already done the mono stereo detection and do not
+	 * need to continue further.
+	 */
+
+	if (!min_range_used ||
+	    sdm660_cdc->imped_det_pin == WCD_MBHC_DET_HPHL ||
+	    sdm660_cdc->imped_det_pin == WCD_MBHC_DET_HPHR)
+		goto exit;
+
+
+	/* Disable Set ZDET_CONN_RAMP_L and enable ZDET_CONN_FIXED_L */
+	snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
+			0x02, 0x00);
+	snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MBHC_BTN1_ZDETM_CTL,
+			0x02, 0x02);
+	/* Set ZDET_CHG to 0  */
+	snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+			0x02, 0x00);
+	/* wait for 40msec for the capacitor to discharge */
+	usleep_range(40000, 40100);
+
+	/* Set ZDET_CONN_RAMP_R to 0  */
+	snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
+			0x01, 0x00);
+	/* Enable ZDET_L_MEAS_EN */
+	snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+			0x08, 0x08);
+	/* wait for 2msec for the HW to compute left inpedance value */
+	usleep_range(2000, 2100);
+	/* Read Left impedance value from Result1 */
+	impedance_l_fixed = snd_soc_read(codec,
+			MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT);
+	/* Disable ZDET_L_MEAS_EN */
+	snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+			0x08, 0x00);
+	/*
+	 * Assume impedance_l is L1, impedance_l_fixed is L2.
+	 * If the following condition is met, we can take this
+	 * headset as mono one with impedance of L2.
+	 * Otherwise, take it as stereo with impedance of L1.
+	 * Condition:
+	 * abs[(L2-0.5L1)/(L2+0.5L1)] < abs [(L2-L1)/(L2+L1)]
+	 */
+	if ((abs(impedance_l_fixed - impedance_l/2) *
+		(impedance_l_fixed + impedance_l)) >=
+		(abs(impedance_l_fixed - impedance_l) *
+		(impedance_l_fixed + impedance_l/2))) {
+		dev_dbg(codec->dev,
+			"%s: STEREO plug type detected\n",
+			 __func__);
+		mbhc->hph_type = WCD_MBHC_HPH_STEREO;
+	} else {
+		dev_dbg(codec->dev,
+			"%s: MONO plug type detected\n",
+			__func__);
+		mbhc->hph_type = WCD_MBHC_HPH_MONO;
+		impedance_l = impedance_l_fixed;
+	}
+	/* Enable ZDET_CHG  */
+	snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+			0x02, 0x02);
+	/* wait for 10msec for the capacitor to charge */
+	usleep_range(10000, 10100);
+	snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
+			0x02, 0x02);
+	snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MBHC_BTN1_ZDETM_CTL,
+			0x02, 0x00);
+	/* Set ZDET_CHG to 0  to discharge HPHL */
+	snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL,
+			0x02, 0x00);
+	/* wait for 40msec for the capacitor to discharge */
+	usleep_range(40000, 40100);
+	snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MBHC_BTN0_ZDETL_CTL,
+			0x02, 0x00);
+
+exit:
+	snd_soc_write(codec, MSM89XX_PMIC_ANALOG_MBHC_FSM_CTL, reg4);
+	snd_soc_write(codec, MSM89XX_PMIC_ANALOG_MICB_2_EN, reg3);
+	snd_soc_write(codec, MSM89XX_PMIC_ANALOG_MBHC_BTN2_ZDETH_CTL, reg1);
+	snd_soc_write(codec, MSM89XX_PMIC_ANALOG_MBHC_DBNC_TIMER, reg0);
+	snd_soc_write(codec, MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_2, reg2);
+	msm_anlg_cdc_compute_impedance(codec, impedance_l, impedance_r,
+				      zl, zr, high);
+
+	dev_dbg(codec->dev, "%s: RL %d ohm, RR %d ohm\n", __func__, *zl, *zr);
+	dev_dbg(codec->dev, "%s: Impedance detection completed\n", __func__);
+}
+
+static int msm_anlg_cdc_dig_register_notifier(void *handle,
+					      struct notifier_block *nblock,
+					      bool enable)
+{
+	struct sdm660_cdc_priv *handle_cdc = handle;
+
+	if (enable)
+		return blocking_notifier_chain_register(&handle_cdc->notifier,
+							nblock);
+
+	return blocking_notifier_chain_unregister(&handle_cdc->notifier,
+						  nblock);
+}
+
+static int msm_anlg_cdc_mbhc_register_notifier(struct wcd_mbhc *wcd_mbhc,
+					       struct notifier_block *nblock,
+					       bool enable)
+{
+	struct snd_soc_codec *codec = wcd_mbhc->codec;
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	if (enable)
+		return blocking_notifier_chain_register(
+						&sdm660_cdc->notifier_mbhc,
+						nblock);
+
+	return blocking_notifier_chain_unregister(&sdm660_cdc->notifier_mbhc,
+						  nblock);
+}
+
+static int msm_anlg_cdc_request_irq(struct snd_soc_codec *codec,
+				    int irq, irq_handler_t handler,
+				    const char *name, void *data)
+{
+	return wcd9xxx_spmi_request_irq(irq, handler, name, data);
+}
+
+static int msm_anlg_cdc_free_irq(struct snd_soc_codec *codec,
+				 int irq, void *data)
+{
+	return wcd9xxx_spmi_free_irq(irq, data);
+}
+
+static const struct wcd_mbhc_cb mbhc_cb = {
+	.enable_mb_source = msm_anlg_cdc_enable_ext_mb_source,
+	.trim_btn_reg = msm_anlg_cdc_trim_btn_reg,
+	.compute_impedance = msm_anlg_cdc_mbhc_calc_impedance,
+	.set_micbias_value = msm_anlg_cdc_set_micb_v,
+	.set_auto_zeroing = msm_anlg_cdc_set_auto_zeroing,
+	.get_hwdep_fw_cal = msm_anlg_cdc_get_hwdep_fw_cal,
+	.set_cap_mode = msm_anlg_cdc_configure_cap,
+	.register_notifier = msm_anlg_cdc_mbhc_register_notifier,
+	.request_irq = msm_anlg_cdc_request_irq,
+	.irq_control = wcd9xxx_spmi_irq_control,
+	.free_irq = msm_anlg_cdc_free_irq,
+	.clk_setup = msm_anlg_cdc_mbhc_clk_setup,
+	.map_btn_code_to_num = msm_anlg_cdc_mbhc_map_btn_code_to_num,
+	.lock_sleep = msm_anlg_cdc_spmi_lock_sleep,
+	.micbias_enable_status = msm_anlg_cdc_micb_en_status,
+	.mbhc_bias = msm_anlg_cdc_enable_master_bias,
+	.mbhc_common_micb_ctrl = msm_anlg_cdc_mbhc_common_micb_ctrl,
+	.micb_internal = msm_anlg_cdc_mbhc_internal_micbias_ctrl,
+	.hph_pa_on_status = msm_anlg_cdc_mbhc_hph_pa_on_status,
+	.set_btn_thr = msm_anlg_cdc_mbhc_program_btn_thr,
+	.extn_use_mb = msm_anlg_cdc_use_mb,
+};
+
+static const uint32_t wcd_imped_val[] = {4, 8, 12, 13, 16,
+					20, 24, 28, 32,
+					36, 40, 44, 48};
+
+static void msm_anlg_cdc_dig_notifier_call(struct snd_soc_codec *codec,
+					const enum dig_cdc_notify_event event)
+{
+	struct sdm660_cdc_priv *sdm660_cdc = snd_soc_codec_get_drvdata(codec);
+
+	pr_debug("%s: notifier call event %d\n", __func__, event);
+	blocking_notifier_call_chain(&sdm660_cdc->notifier,
+				     event, NULL);
+}
+
+static void msm_anlg_cdc_notifier_call(struct snd_soc_codec *codec,
+				       const enum wcd_notify_event event)
+{
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s: notifier call event %d\n", __func__, event);
+	blocking_notifier_call_chain(&sdm660_cdc->notifier_mbhc, event,
+				     &sdm660_cdc->mbhc);
+}
+
+static void msm_anlg_cdc_boost_on(struct snd_soc_codec *codec)
+{
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	snd_soc_update_bits(codec,
+			MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL3, 0x0F, 0x0F);
+	snd_soc_write(codec, MSM89XX_PMIC_ANALOG_SEC_ACCESS, 0xA5);
+	snd_soc_write(codec, MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3, 0x0F);
+	snd_soc_write(codec, MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL, 0x30);
+	if (get_codec_version(sdm660_cdc) < CAJON_2_0)
+		snd_soc_write(codec, MSM89XX_PMIC_ANALOG_CURRENT_LIMIT, 0x82);
+	else
+		snd_soc_write(codec, MSM89XX_PMIC_ANALOG_CURRENT_LIMIT, 0xA2);
+	snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
+			    0x69, 0x69);
+	snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_SPKR_DRV_DBG,
+			    0x01, 0x01);
+	snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_SLOPE_COMP_IP_ZERO,
+			    0x88, 0x88);
+	snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL,
+			    0x03, 0x03);
+	snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL,
+			    0xE1, 0xE1);
+	if (get_codec_version(sdm660_cdc) < CAJON_2_0) {
+		snd_soc_update_bits(codec, MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+				    0x20, 0x20);
+		/* Wait for 1ms after clock ctl enable */
+		usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+		snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
+				    0xDF, 0xDF);
+		usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+	} else {
+		snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
+				    0x40, 0x00);
+		snd_soc_update_bits(codec, MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+				    0x20, 0x20);
+		snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
+				    0x80, 0x80);
+		/* Wait for 500us after BOOST_EN to happen */
+		usleep_range(500, 510);
+		snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
+				    0x40, 0x40);
+		/* Wait for 500us after BOOST pulse_skip */
+		usleep_range(500, 510);
+	}
+}
+
+static void msm_anlg_cdc_boost_off(struct snd_soc_codec *codec)
+{
+	snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
+			    0xDF, 0x5F);
+	snd_soc_update_bits(codec, MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+			    0x20, 0x00);
+}
+
+static void msm_anlg_cdc_bypass_on(struct snd_soc_codec *codec)
+{
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	if (get_codec_version(sdm660_cdc) < CAJON_2_0) {
+		snd_soc_write(codec,
+			MSM89XX_PMIC_ANALOG_SEC_ACCESS,
+			0xA5);
+		snd_soc_write(codec,
+			MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3,
+			0x07);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_BYPASS_MODE,
+			0x02, 0x02);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_BYPASS_MODE,
+			0x01, 0x00);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_BYPASS_MODE,
+			0x40, 0x40);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_BYPASS_MODE,
+			0x80, 0x80);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
+			0xDF, 0xDF);
+	} else {
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+			0x20, 0x20);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_BYPASS_MODE,
+			0x20, 0x20);
+	}
+}
+
+static void msm_anlg_cdc_bypass_off(struct snd_soc_codec *codec)
+{
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	if (get_codec_version(sdm660_cdc) < CAJON_2_0) {
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_BOOST_EN_CTL,
+			0x80, 0x00);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_BYPASS_MODE,
+			0x80, 0x00);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_BYPASS_MODE,
+			0x02, 0x00);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_BYPASS_MODE,
+			0x40, 0x00);
+	} else {
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_BYPASS_MODE,
+			0x20, 0x00);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+			0x20, 0x00);
+	}
+}
+
+static void msm_anlg_cdc_boost_mode_sequence(struct snd_soc_codec *codec,
+					     int flag)
+{
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	if (flag == EAR_PMU) {
+		switch (sdm660_cdc->boost_option) {
+		case BOOST_SWITCH:
+			if (sdm660_cdc->ear_pa_boost_set) {
+				msm_anlg_cdc_boost_off(codec);
+				msm_anlg_cdc_bypass_on(codec);
+			}
+			break;
+		case BOOST_ALWAYS:
+			msm_anlg_cdc_boost_on(codec);
+			break;
+		case BYPASS_ALWAYS:
+			msm_anlg_cdc_bypass_on(codec);
+			break;
+		case BOOST_ON_FOREVER:
+			msm_anlg_cdc_boost_on(codec);
+			break;
+		default:
+			dev_err(codec->dev,
+				"%s: invalid boost option: %d\n", __func__,
+				sdm660_cdc->boost_option);
+			break;
+		}
+	} else if (flag == EAR_PMD) {
+		switch (sdm660_cdc->boost_option) {
+		case BOOST_SWITCH:
+			if (sdm660_cdc->ear_pa_boost_set)
+				msm_anlg_cdc_bypass_off(codec);
+			break;
+		case BOOST_ALWAYS:
+			msm_anlg_cdc_boost_off(codec);
+			/* 80ms for EAR boost to settle down */
+			msleep(80);
+			break;
+		case BYPASS_ALWAYS:
+			/* nothing to do as bypass on always */
+			break;
+		case BOOST_ON_FOREVER:
+			/* nothing to do as boost on forever */
+			break;
+		default:
+			dev_err(codec->dev,
+				"%s: invalid boost option: %d\n", __func__,
+				sdm660_cdc->boost_option);
+			break;
+		}
+	} else if (flag == SPK_PMU) {
+		switch (sdm660_cdc->boost_option) {
+		case BOOST_SWITCH:
+			if (sdm660_cdc->spk_boost_set) {
+				msm_anlg_cdc_bypass_off(codec);
+				msm_anlg_cdc_boost_on(codec);
+			}
+			break;
+		case BOOST_ALWAYS:
+			msm_anlg_cdc_boost_on(codec);
+			break;
+		case BYPASS_ALWAYS:
+			msm_anlg_cdc_bypass_on(codec);
+			break;
+		case BOOST_ON_FOREVER:
+			msm_anlg_cdc_boost_on(codec);
+			break;
+		default:
+			dev_err(codec->dev,
+				"%s: invalid boost option: %d\n", __func__,
+				sdm660_cdc->boost_option);
+			break;
+		}
+	} else if (flag == SPK_PMD) {
+		switch (sdm660_cdc->boost_option) {
+		case BOOST_SWITCH:
+			if (sdm660_cdc->spk_boost_set) {
+				msm_anlg_cdc_boost_off(codec);
+				/*
+				 * Add 40 ms sleep for the spk
+				 * boost to settle down
+				 */
+				msleep(40);
+			}
+			break;
+		case BOOST_ALWAYS:
+			msm_anlg_cdc_boost_off(codec);
+			/*
+			 * Add 40 ms sleep for the spk
+			 * boost to settle down
+			 */
+			msleep(40);
+			break;
+		case BYPASS_ALWAYS:
+			/* nothing to do as bypass on always */
+			break;
+		case BOOST_ON_FOREVER:
+			/* nothing to do as boost on forever */
+			break;
+		default:
+			dev_err(codec->dev,
+				"%s: invalid boost option: %d\n", __func__,
+				sdm660_cdc->boost_option);
+			break;
+		}
+	}
+}
+
+static int msm_anlg_cdc_dt_parse_vreg_info(struct device *dev,
+	struct sdm660_cdc_regulator *vreg, const char *vreg_name,
+	bool ondemand)
+{
+	int len, ret = 0;
+	const __be32 *prop;
+	char prop_name[CODEC_DT_MAX_PROP_SIZE];
+	struct device_node *regnode = NULL;
+	u32 prop_val;
+
+	snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE, "%s-supply",
+		vreg_name);
+	regnode = of_parse_phandle(dev->of_node, prop_name, 0);
+
+	if (!regnode) {
+		dev_err(dev, "Looking up %s property in node %s failed\n",
+			prop_name, dev->of_node->full_name);
+		return -ENODEV;
+	}
+
+	dev_dbg(dev, "Looking up %s property in node %s\n",
+		prop_name, dev->of_node->full_name);
+
+	vreg->name = vreg_name;
+	vreg->ondemand = ondemand;
+
+	snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE,
+		"qcom,%s-voltage", vreg_name);
+	prop = of_get_property(dev->of_node, prop_name, &len);
+
+	if (!prop || (len != (2 * sizeof(__be32)))) {
+		dev_err(dev, "%s %s property\n",
+			prop ? "invalid format" : "no", prop_name);
+		return -EINVAL;
+	}
+	vreg->min_uv = be32_to_cpup(&prop[0]);
+	vreg->max_uv = be32_to_cpup(&prop[1]);
+
+	snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE,
+		"qcom,%s-current", vreg_name);
+
+	ret = of_property_read_u32(dev->of_node, prop_name, &prop_val);
+	if (ret) {
+		dev_err(dev, "Looking up %s property in node %s failed",
+			prop_name, dev->of_node->full_name);
+		return -EFAULT;
+	}
+	vreg->optimum_ua = prop_val;
+
+	dev_dbg(dev, "%s: vol=[%d %d]uV, curr=[%d]uA, ond %d\n\n", vreg->name,
+		 vreg->min_uv, vreg->max_uv, vreg->optimum_ua, vreg->ondemand);
+	return 0;
+}
+
+static void msm_anlg_cdc_dt_parse_boost_info(struct snd_soc_codec *codec)
+{
+	struct sdm660_cdc_priv *sdm660_cdc_priv =
+		snd_soc_codec_get_drvdata(codec);
+	const char *prop_name = "qcom,cdc-boost-voltage";
+	int boost_voltage, ret;
+
+	ret = of_property_read_u32(codec->dev->of_node, prop_name,
+			&boost_voltage);
+	if (ret) {
+		dev_dbg(codec->dev, "Looking up %s property in node %s failed\n",
+			prop_name, codec->dev->of_node->full_name);
+		boost_voltage = DEFAULT_BOOST_VOLTAGE;
+	}
+	if (boost_voltage < MIN_BOOST_VOLTAGE ||
+			boost_voltage > MAX_BOOST_VOLTAGE) {
+		dev_err(codec->dev,
+				"Incorrect boost voltage. Reverting to default\n");
+		boost_voltage = DEFAULT_BOOST_VOLTAGE;
+	}
+
+	sdm660_cdc_priv->boost_voltage =
+		VOLTAGE_CONVERTER(boost_voltage, MIN_BOOST_VOLTAGE,
+				BOOST_VOLTAGE_STEP);
+	dev_dbg(codec->dev, "Boost voltage value is: %d\n",
+			boost_voltage);
+}
+
+static void msm_anlg_cdc_dt_parse_micbias_info(struct device *dev,
+				struct wcd_micbias_setting *micbias)
+{
+	const char *prop_name = "qcom,cdc-micbias-cfilt-mv";
+	int ret;
+
+	ret = of_property_read_u32(dev->of_node, prop_name,
+			&micbias->cfilt1_mv);
+	if (ret) {
+		dev_dbg(dev, "Looking up %s property in node %s failed",
+			prop_name, dev->of_node->full_name);
+		micbias->cfilt1_mv = MICBIAS_DEFAULT_VAL;
+	}
+}
+
+static struct sdm660_cdc_pdata *msm_anlg_cdc_populate_dt_pdata(
+						struct device *dev)
+{
+	struct sdm660_cdc_pdata *pdata;
+	int ret, static_cnt, ond_cnt, idx, i;
+	const char *name = NULL;
+	const char *static_prop_name = "qcom,cdc-static-supplies";
+	const char *ond_prop_name = "qcom,cdc-on-demand-supplies";
+
+	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return NULL;
+
+	static_cnt = of_property_count_strings(dev->of_node, static_prop_name);
+	if (static_cnt < 0) {
+		dev_err(dev, "%s: Failed to get static supplies %d\n", __func__,
+			static_cnt);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* On-demand supply list is an optional property */
+	ond_cnt = of_property_count_strings(dev->of_node, ond_prop_name);
+	if (ond_cnt < 0)
+		ond_cnt = 0;
+
+	WARN_ON(static_cnt <= 0 || ond_cnt < 0);
+	if ((static_cnt + ond_cnt) > ARRAY_SIZE(pdata->regulator)) {
+		dev_err(dev, "%s: Num of supplies %u > max supported %zd\n",
+				__func__, (static_cnt + ond_cnt),
+					ARRAY_SIZE(pdata->regulator));
+		ret = -EINVAL;
+		goto err;
+	}
+
+	for (idx = 0; idx < static_cnt; idx++) {
+		ret = of_property_read_string_index(dev->of_node,
+						    static_prop_name, idx,
+						    &name);
+		if (ret) {
+			dev_err(dev, "%s: of read string %s idx %d error %d\n",
+				__func__, static_prop_name, idx, ret);
+			goto err;
+		}
+
+		dev_dbg(dev, "%s: Found static cdc supply %s\n", __func__,
+			name);
+		ret = msm_anlg_cdc_dt_parse_vreg_info(dev,
+						&pdata->regulator[idx],
+						name, false);
+		if (ret) {
+			dev_err(dev, "%s:err parsing vreg for %s idx %d\n",
+				__func__, name, idx);
+			goto err;
+		}
+	}
+
+	for (i = 0; i < ond_cnt; i++, idx++) {
+		ret = of_property_read_string_index(dev->of_node, ond_prop_name,
+						    i, &name);
+		if (ret) {
+			dev_err(dev, "%s: err parsing on_demand for %s idx %d\n",
+				__func__, ond_prop_name, i);
+			goto err;
+		}
+
+		dev_dbg(dev, "%s: Found on-demand cdc supply %s\n", __func__,
+			name);
+		ret = msm_anlg_cdc_dt_parse_vreg_info(dev,
+						&pdata->regulator[idx],
+						name, true);
+		if (ret) {
+			dev_err(dev, "%s: err parsing vreg on_demand for %s idx %d\n",
+				__func__, name, idx);
+			goto err;
+		}
+	}
+	msm_anlg_cdc_dt_parse_micbias_info(dev, &pdata->micbias);
+
+	return pdata;
+err:
+	devm_kfree(dev, pdata);
+	dev_err(dev, "%s: Failed to populate DT data ret = %d\n",
+		__func__, ret);
+	return NULL;
+}
+
+static int msm_anlg_cdc_codec_enable_on_demand_supply(
+		struct snd_soc_dapm_widget *w,
+		struct snd_kcontrol *kcontrol, int event)
+{
+	int ret = 0;
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+	struct on_demand_supply *supply;
+
+	if (w->shift >= ON_DEMAND_SUPPLIES_MAX) {
+		dev_err(codec->dev, "%s: error index > MAX Demand supplies",
+			__func__);
+		ret = -EINVAL;
+		goto out;
+	}
+	dev_dbg(codec->dev, "%s: supply: %s event: %d ref: %d\n",
+		__func__, on_demand_supply_name[w->shift], event,
+		atomic_read(&sdm660_cdc->on_demand_list[w->shift].ref));
+
+	supply = &sdm660_cdc->on_demand_list[w->shift];
+	WARN_ONCE(!supply->supply, "%s isn't defined\n",
+		  on_demand_supply_name[w->shift]);
+	if (!supply->supply) {
+		dev_err(codec->dev, "%s: err supply not present ond for %d",
+			__func__, w->shift);
+		goto out;
+	}
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (atomic_inc_return(&supply->ref) == 1)
+			ret = regulator_enable(supply->supply);
+		if (ret)
+			dev_err(codec->dev, "%s: Failed to enable %s\n",
+				__func__,
+				on_demand_supply_name[w->shift]);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		if (atomic_read(&supply->ref) == 0) {
+			dev_dbg(codec->dev, "%s: %s supply has been disabled.\n",
+				 __func__, on_demand_supply_name[w->shift]);
+			goto out;
+		}
+		if (atomic_dec_return(&supply->ref) == 0)
+			ret = regulator_disable(supply->supply);
+			if (ret)
+				dev_err(codec->dev, "%s: Failed to disable %s\n",
+					__func__,
+					on_demand_supply_name[w->shift]);
+		break;
+	default:
+		break;
+	}
+out:
+	return ret;
+}
+
+static int msm_anlg_cdc_codec_enable_clock_block(struct snd_soc_codec *codec,
+						 int enable)
+{
+	struct msm_asoc_mach_data *pdata = NULL;
+
+	pdata = snd_soc_card_get_drvdata(codec->component.card);
+	if (enable) {
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL, 0x30, 0x30);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80, 0x80);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_DIGITAL_CDC_TOP_CLK_CTL, 0x0C, 0x0C);
+		msm_anlg_cdc_dig_notifier_call(codec, DIG_CDC_EVENT_CLK_ON);
+	} else {
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_DIGITAL_CDC_TOP_CLK_CTL, 0x0C, 0x00);
+	}
+	return 0;
+}
+
+static int msm_anlg_cdc_codec_enable_charge_pump(struct snd_soc_dapm_widget *w,
+						 struct snd_kcontrol *kcontrol,
+						 int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s: event = %d\n", __func__, event);
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		msm_anlg_cdc_codec_enable_clock_block(codec, 1);
+		if (!(strcmp(w->name, "EAR CP"))) {
+			snd_soc_update_bits(codec,
+					MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+					0x80, 0x80);
+			msm_anlg_cdc_boost_mode_sequence(codec, EAR_PMU);
+		} else if (get_codec_version(sdm660_cdc) >= DIANGU) {
+			snd_soc_update_bits(codec,
+					MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+					0x80, 0x80);
+		} else {
+			snd_soc_update_bits(codec,
+					MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+					0xC0, 0xC0);
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		/* Wait for 1ms post powerup of chargepump */
+		usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/* Wait for 1ms post powerdown of chargepump */
+		usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+		if (!(strcmp(w->name, "EAR CP"))) {
+			snd_soc_update_bits(codec,
+					MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+					0x80, 0x00);
+			if (sdm660_cdc->boost_option != BOOST_ALWAYS) {
+				dev_dbg(codec->dev,
+					"%s: boost_option:%d, tear down ear\n",
+					__func__, sdm660_cdc->boost_option);
+				msm_anlg_cdc_boost_mode_sequence(codec,
+								 EAR_PMD);
+			}
+			/*
+			 * Reset pa select bit from ear to hph after ear pa
+			 * is disabled and HPH DAC disable to reduce ear
+			 * turn off pop and avoid HPH pop in concurrency
+			 */
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_RX_EAR_CTL, 0x80, 0x00);
+		} else {
+			if (get_codec_version(sdm660_cdc) < DIANGU)
+				snd_soc_update_bits(codec,
+					MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+					0x40, 0x00);
+			if (sdm660_cdc->rx_bias_count == 0)
+				snd_soc_update_bits(codec,
+					MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+					0x80, 0x00);
+			dev_dbg(codec->dev, "%s: rx_bias_count = %d\n",
+					__func__, sdm660_cdc->rx_bias_count);
+		}
+		break;
+	}
+	return 0;
+}
+
+static int msm_anlg_cdc_ear_pa_boost_get(struct snd_kcontrol *kcontrol,
+					 struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] =
+		(sdm660_cdc->ear_pa_boost_set ? 1 : 0);
+	dev_dbg(codec->dev, "%s: sdm660_cdc->ear_pa_boost_set = %d\n",
+			__func__, sdm660_cdc->ear_pa_boost_set);
+	return 0;
+}
+
+static int msm_anlg_cdc_ear_pa_boost_set(struct snd_kcontrol *kcontrol,
+					 struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+		__func__, ucontrol->value.integer.value[0]);
+	sdm660_cdc->ear_pa_boost_set =
+		(ucontrol->value.integer.value[0] ? true : false);
+	return 0;
+}
+
+static int msm_anlg_cdc_loopback_mode_get(struct snd_kcontrol *kcontrol,
+					  struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct msm_asoc_mach_data *pdata = NULL;
+
+	pdata = snd_soc_card_get_drvdata(codec->component.card);
+	dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+		__func__, ucontrol->value.integer.value[0]);
+
+	return pdata->lb_mode;
+}
+
+static int msm_anlg_cdc_loopback_mode_put(struct snd_kcontrol *kcontrol,
+					  struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct msm_asoc_mach_data *pdata = NULL;
+
+	pdata = snd_soc_card_get_drvdata(codec->component.card);
+	dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+		__func__, ucontrol->value.integer.value[0]);
+
+	switch (ucontrol->value.integer.value[0]) {
+	case 0:
+		pdata->lb_mode = false;
+		break;
+	case 1:
+		pdata->lb_mode = true;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int msm_anlg_cdc_pa_gain_get(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	u8 ear_pa_gain;
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	if (get_codec_version(sdm660_cdc) >= DIANGU) {
+		ear_pa_gain = snd_soc_read(codec,
+					MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC);
+		ear_pa_gain = (ear_pa_gain >> 1) & 0x3;
+
+		if (ear_pa_gain == 0x00) {
+			ucontrol->value.integer.value[0] = 3;
+		} else if (ear_pa_gain == 0x01) {
+			ucontrol->value.integer.value[1] = 2;
+		} else if (ear_pa_gain == 0x02) {
+			ucontrol->value.integer.value[2] = 1;
+		} else if (ear_pa_gain == 0x03) {
+			ucontrol->value.integer.value[3] = 0;
+		} else {
+			dev_err(codec->dev,
+				"%s: ERROR: Unsupported Ear Gain = 0x%x\n",
+				__func__, ear_pa_gain);
+			return -EINVAL;
+		}
+	} else {
+		ear_pa_gain = snd_soc_read(codec,
+					   MSM89XX_PMIC_ANALOG_RX_EAR_CTL);
+		ear_pa_gain = (ear_pa_gain >> 5) & 0x1;
+		if (ear_pa_gain == 0x00) {
+			ucontrol->value.integer.value[0] = 0;
+		} else if (ear_pa_gain == 0x01) {
+			ucontrol->value.integer.value[0] = 3;
+		} else  {
+			dev_err(codec->dev,
+				"%s: ERROR: Unsupported Ear Gain = 0x%x\n",
+				__func__, ear_pa_gain);
+			return -EINVAL;
+		}
+	}
+	ucontrol->value.integer.value[0] = ear_pa_gain;
+	dev_dbg(codec->dev, "%s: ear_pa_gain = 0x%x\n", __func__, ear_pa_gain);
+	return 0;
+}
+
+static int msm_anlg_cdc_pa_gain_put(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	u8 ear_pa_gain;
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+		__func__, ucontrol->value.integer.value[0]);
+
+	if (get_codec_version(sdm660_cdc) >= DIANGU) {
+		switch (ucontrol->value.integer.value[0]) {
+		case 0:
+			ear_pa_gain = 0x06;
+			break;
+		case 1:
+			ear_pa_gain = 0x04;
+			break;
+		case 2:
+			ear_pa_gain = 0x02;
+			break;
+		case 3:
+			ear_pa_gain = 0x00;
+			break;
+		default:
+			return -EINVAL;
+		}
+		snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC,
+			    0x06, ear_pa_gain);
+	} else {
+		switch (ucontrol->value.integer.value[0]) {
+		case 0:
+			ear_pa_gain = 0x00;
+			break;
+		case 3:
+			ear_pa_gain = 0x20;
+			break;
+		case 1:
+		case 2:
+		default:
+			return -EINVAL;
+		}
+		snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
+			    0x20, ear_pa_gain);
+	}
+	return 0;
+}
+
+static int msm_anlg_cdc_hph_mode_get(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	if (sdm660_cdc->hph_mode == NORMAL_MODE) {
+		ucontrol->value.integer.value[0] = 0;
+	} else if (sdm660_cdc->hph_mode == HD2_MODE) {
+		ucontrol->value.integer.value[0] = 1;
+	} else  {
+		dev_err(codec->dev, "%s: ERROR: Default HPH Mode= %d\n",
+			__func__, sdm660_cdc->hph_mode);
+	}
+
+	dev_dbg(codec->dev, "%s: sdm660_cdc->hph_mode = %d\n", __func__,
+			sdm660_cdc->hph_mode);
+	return 0;
+}
+
+static int msm_anlg_cdc_hph_mode_set(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+		__func__, ucontrol->value.integer.value[0]);
+
+	switch (ucontrol->value.integer.value[0]) {
+	case 0:
+		sdm660_cdc->hph_mode = NORMAL_MODE;
+		break;
+	case 1:
+		if (get_codec_version(sdm660_cdc) >= DIANGU)
+			sdm660_cdc->hph_mode = HD2_MODE;
+		break;
+	default:
+		sdm660_cdc->hph_mode = NORMAL_MODE;
+		break;
+	}
+	dev_dbg(codec->dev, "%s: sdm660_cdc->hph_mode_set = %d\n",
+		__func__, sdm660_cdc->hph_mode);
+	return 0;
+}
+
+static int msm_anlg_cdc_boost_option_get(struct snd_kcontrol *kcontrol,
+					 struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	if (sdm660_cdc->boost_option == BOOST_SWITCH) {
+		ucontrol->value.integer.value[0] = 0;
+	} else if (sdm660_cdc->boost_option == BOOST_ALWAYS) {
+		ucontrol->value.integer.value[0] = 1;
+	} else if (sdm660_cdc->boost_option == BYPASS_ALWAYS) {
+		ucontrol->value.integer.value[0] = 2;
+	} else if (sdm660_cdc->boost_option == BOOST_ON_FOREVER) {
+		ucontrol->value.integer.value[0] = 3;
+	} else  {
+		dev_err(codec->dev, "%s: ERROR: Unsupported Boost option= %d\n",
+			__func__, sdm660_cdc->boost_option);
+		return -EINVAL;
+	}
+
+	dev_dbg(codec->dev, "%s: sdm660_cdc->boost_option = %d\n", __func__,
+			sdm660_cdc->boost_option);
+	return 0;
+}
+
+static int msm_anlg_cdc_boost_option_set(struct snd_kcontrol *kcontrol,
+					 struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+		__func__, ucontrol->value.integer.value[0]);
+
+	switch (ucontrol->value.integer.value[0]) {
+	case 0:
+		sdm660_cdc->boost_option = BOOST_SWITCH;
+		break;
+	case 1:
+		sdm660_cdc->boost_option = BOOST_ALWAYS;
+		break;
+	case 2:
+		sdm660_cdc->boost_option = BYPASS_ALWAYS;
+		msm_anlg_cdc_bypass_on(codec);
+		break;
+	case 3:
+		sdm660_cdc->boost_option = BOOST_ON_FOREVER;
+		msm_anlg_cdc_boost_on(codec);
+		break;
+	default:
+		pr_err("%s: invalid boost option: %d\n", __func__,
+					sdm660_cdc->boost_option);
+		return -EINVAL;
+	}
+	dev_dbg(codec->dev, "%s: sdm660_cdc->boost_option_set = %d\n",
+		__func__, sdm660_cdc->boost_option);
+	return 0;
+}
+
+static int msm_anlg_cdc_spk_boost_get(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	if (sdm660_cdc->spk_boost_set == false) {
+		ucontrol->value.integer.value[0] = 0;
+	} else if (sdm660_cdc->spk_boost_set == true) {
+		ucontrol->value.integer.value[0] = 1;
+	} else  {
+		dev_err(codec->dev, "%s: ERROR: Unsupported Speaker Boost = %d\n",
+				__func__, sdm660_cdc->spk_boost_set);
+		return -EINVAL;
+	}
+
+	dev_dbg(codec->dev, "%s: sdm660_cdc->spk_boost_set = %d\n", __func__,
+			sdm660_cdc->spk_boost_set);
+	return 0;
+}
+
+static int msm_anlg_cdc_spk_boost_set(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+			__func__, ucontrol->value.integer.value[0]);
+
+	switch (ucontrol->value.integer.value[0]) {
+	case 0:
+		sdm660_cdc->spk_boost_set = false;
+		break;
+	case 1:
+		sdm660_cdc->spk_boost_set = true;
+		break;
+	default:
+		return -EINVAL;
+	}
+	dev_dbg(codec->dev, "%s: sdm660_cdc->spk_boost_set = %d\n",
+		__func__, sdm660_cdc->spk_boost_set);
+	return 0;
+}
+
+static int msm_anlg_cdc_ext_spk_boost_get(struct snd_kcontrol *kcontrol,
+					  struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	if (sdm660_cdc->ext_spk_boost_set == false)
+		ucontrol->value.integer.value[0] = 0;
+	else
+		ucontrol->value.integer.value[0] = 1;
+
+	dev_dbg(codec->dev, "%s: sdm660_cdc->ext_spk_boost_set = %d\n",
+				__func__, sdm660_cdc->ext_spk_boost_set);
+	return 0;
+}
+
+static int msm_anlg_cdc_ext_spk_boost_set(struct snd_kcontrol *kcontrol,
+					  struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+		__func__, ucontrol->value.integer.value[0]);
+
+	switch (ucontrol->value.integer.value[0]) {
+	case 0:
+		sdm660_cdc->ext_spk_boost_set = false;
+		break;
+	case 1:
+		sdm660_cdc->ext_spk_boost_set = true;
+		break;
+	default:
+		return -EINVAL;
+	}
+	dev_dbg(codec->dev, "%s: sdm660_cdc->spk_boost_set = %d\n",
+		__func__, sdm660_cdc->spk_boost_set);
+	return 0;
+}
+
+
+static const char * const msm_anlg_cdc_loopback_mode_ctrl_text[] = {
+		"DISABLE", "ENABLE"};
+static const struct soc_enum msm_anlg_cdc_loopback_mode_ctl_enum[] = {
+		SOC_ENUM_SINGLE_EXT(2, msm_anlg_cdc_loopback_mode_ctrl_text),
+};
+
+static const char * const msm_anlg_cdc_ear_pa_boost_ctrl_text[] = {
+		"DISABLE", "ENABLE"};
+static const struct soc_enum msm_anlg_cdc_ear_pa_boost_ctl_enum[] = {
+		SOC_ENUM_SINGLE_EXT(2, msm_anlg_cdc_ear_pa_boost_ctrl_text),
+};
+
+static const char * const msm_anlg_cdc_ear_pa_gain_text[] = {
+		"POS_1P5_DB", "POS_3_DB", "POS_4P5_DB", "POS_6_DB"};
+static const struct soc_enum msm_anlg_cdc_ear_pa_gain_enum[] = {
+		SOC_ENUM_SINGLE_EXT(4, msm_anlg_cdc_ear_pa_gain_text),
+};
+
+static const char * const msm_anlg_cdc_boost_option_ctrl_text[] = {
+		"BOOST_SWITCH", "BOOST_ALWAYS", "BYPASS_ALWAYS",
+		"BOOST_ON_FOREVER"};
+static const struct soc_enum msm_anlg_cdc_boost_option_ctl_enum[] = {
+		SOC_ENUM_SINGLE_EXT(4, msm_anlg_cdc_boost_option_ctrl_text),
+};
+static const char * const msm_anlg_cdc_spk_boost_ctrl_text[] = {
+		"DISABLE", "ENABLE"};
+static const struct soc_enum msm_anlg_cdc_spk_boost_ctl_enum[] = {
+		SOC_ENUM_SINGLE_EXT(2, msm_anlg_cdc_spk_boost_ctrl_text),
+};
+
+static const char * const msm_anlg_cdc_ext_spk_boost_ctrl_text[] = {
+		"DISABLE", "ENABLE"};
+static const struct soc_enum msm_anlg_cdc_ext_spk_boost_ctl_enum[] = {
+		SOC_ENUM_SINGLE_EXT(2, msm_anlg_cdc_ext_spk_boost_ctrl_text),
+};
+
+static const char * const msm_anlg_cdc_hph_mode_ctrl_text[] = {
+		"NORMAL", "HD2"};
+static const struct soc_enum msm_anlg_cdc_hph_mode_ctl_enum[] = {
+		SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(msm_anlg_cdc_hph_mode_ctrl_text),
+			msm_anlg_cdc_hph_mode_ctrl_text),
+};
+
+/*cut of frequency for high pass filter*/
+static const char * const cf_text[] = {
+	"MIN_3DB_4Hz", "MIN_3DB_75Hz", "MIN_3DB_150Hz"
+};
+
+
+static const struct snd_kcontrol_new msm_anlg_cdc_snd_controls[] = {
+
+	SOC_ENUM_EXT("RX HPH Mode", msm_anlg_cdc_hph_mode_ctl_enum[0],
+		msm_anlg_cdc_hph_mode_get, msm_anlg_cdc_hph_mode_set),
+
+	SOC_ENUM_EXT("Boost Option", msm_anlg_cdc_boost_option_ctl_enum[0],
+		msm_anlg_cdc_boost_option_get, msm_anlg_cdc_boost_option_set),
+
+	SOC_ENUM_EXT("EAR PA Boost", msm_anlg_cdc_ear_pa_boost_ctl_enum[0],
+		msm_anlg_cdc_ear_pa_boost_get, msm_anlg_cdc_ear_pa_boost_set),
+
+	SOC_ENUM_EXT("EAR PA Gain", msm_anlg_cdc_ear_pa_gain_enum[0],
+		msm_anlg_cdc_pa_gain_get, msm_anlg_cdc_pa_gain_put),
+
+	SOC_ENUM_EXT("Speaker Boost", msm_anlg_cdc_spk_boost_ctl_enum[0],
+		msm_anlg_cdc_spk_boost_get, msm_anlg_cdc_spk_boost_set),
+
+	SOC_ENUM_EXT("Ext Spk Boost", msm_anlg_cdc_ext_spk_boost_ctl_enum[0],
+		msm_anlg_cdc_ext_spk_boost_get, msm_anlg_cdc_ext_spk_boost_set),
+
+	SOC_ENUM_EXT("LOOPBACK Mode", msm_anlg_cdc_loopback_mode_ctl_enum[0],
+		msm_anlg_cdc_loopback_mode_get, msm_anlg_cdc_loopback_mode_put),
+	SOC_SINGLE_TLV("ADC1 Volume", MSM89XX_PMIC_ANALOG_TX_1_EN, 3,
+					8, 0, analog_gain),
+	SOC_SINGLE_TLV("ADC2 Volume", MSM89XX_PMIC_ANALOG_TX_2_EN, 3,
+					8, 0, analog_gain),
+	SOC_SINGLE_TLV("ADC3 Volume", MSM89XX_PMIC_ANALOG_TX_3_EN, 3,
+					8, 0, analog_gain),
+
+
+};
+
+static int tombak_hph_impedance_get(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	int ret;
+	uint32_t zl, zr;
+	bool hphr;
+	struct soc_multi_mixer_control *mc;
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct sdm660_cdc_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+	mc = (struct soc_multi_mixer_control *)(kcontrol->private_value);
+
+	hphr = mc->shift;
+	ret = wcd_mbhc_get_impedance(&priv->mbhc, &zl, &zr);
+	if (ret)
+		dev_dbg(codec->dev, "%s: Failed to get mbhc imped", __func__);
+	dev_dbg(codec->dev, "%s: zl %u, zr %u\n", __func__, zl, zr);
+	ucontrol->value.integer.value[0] = hphr ? zr : zl;
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new impedance_detect_controls[] = {
+	SOC_SINGLE_EXT("HPHL Impedance", 0, 0, UINT_MAX, 0,
+			tombak_hph_impedance_get, NULL),
+	SOC_SINGLE_EXT("HPHR Impedance", 0, 1, UINT_MAX, 0,
+			tombak_hph_impedance_get, NULL),
+};
+
+static int tombak_get_hph_type(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct sdm660_cdc_priv *priv = snd_soc_codec_get_drvdata(codec);
+	struct wcd_mbhc *mbhc;
+
+	if (!priv) {
+		dev_err(codec->dev,
+			"%s: sdm660_cdc-wcd private data is NULL\n",
+			 __func__);
+		return -EINVAL;
+	}
+
+	mbhc = &priv->mbhc;
+	if (!mbhc) {
+		dev_err(codec->dev, "%s: mbhc not initialized\n", __func__);
+		return -EINVAL;
+	}
+
+	ucontrol->value.integer.value[0] = (u32) mbhc->hph_type;
+	dev_dbg(codec->dev, "%s: hph_type = %u\n", __func__, mbhc->hph_type);
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new hph_type_detect_controls[] = {
+	SOC_SINGLE_EXT("HPH Type", 0, 0, UINT_MAX, 0,
+	tombak_get_hph_type, NULL),
+};
+
+static const char * const rdac2_mux_text[] = {
+	"ZERO", "RX2", "RX1"
+};
+
+static const struct soc_enum rdac2_mux_enum =
+	SOC_ENUM_SINGLE(MSM89XX_PMIC_DIGITAL_CDC_CONN_HPHR_DAC_CTL,
+		0, 3, rdac2_mux_text);
+
+static const char * const adc2_mux_text[] = {
+	"ZERO", "INP2", "INP3"
+};
+
+static const char * const ext_spk_text[] = {
+	"Off", "On"
+};
+
+static const char * const wsa_spk_text[] = {
+	"ZERO", "WSA"
+};
+
+static const struct soc_enum adc2_enum =
+	SOC_ENUM_SINGLE(SND_SOC_NOPM, 0,
+		ARRAY_SIZE(adc2_mux_text), adc2_mux_text);
+
+static const struct soc_enum ext_spk_enum =
+	SOC_ENUM_SINGLE(SND_SOC_NOPM, 0,
+		ARRAY_SIZE(ext_spk_text), ext_spk_text);
+
+static const struct soc_enum wsa_spk_enum =
+	SOC_ENUM_SINGLE(SND_SOC_NOPM, 0,
+		ARRAY_SIZE(wsa_spk_text), wsa_spk_text);
+
+
+
+static const struct snd_kcontrol_new ext_spk_mux =
+	SOC_DAPM_ENUM("Ext Spk Switch Mux", ext_spk_enum);
+
+
+
+static const struct snd_kcontrol_new tx_adc2_mux =
+	SOC_DAPM_ENUM("ADC2 MUX Mux", adc2_enum);
+
+
+static const struct snd_kcontrol_new rdac2_mux =
+	SOC_DAPM_ENUM("RDAC2 MUX Mux", rdac2_mux_enum);
+
+static const char * const ear_text[] = {
+	"ZERO", "Switch",
+};
+
+static const struct soc_enum ear_enum =
+	SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(ear_text), ear_text);
+
+static const struct snd_kcontrol_new ear_pa_mux[] = {
+	SOC_DAPM_ENUM("EAR_S", ear_enum)
+};
+
+static const struct snd_kcontrol_new wsa_spk_mux[] = {
+	SOC_DAPM_ENUM("WSA Spk Switch", wsa_spk_enum)
+};
+
+
+
+static const char * const hph_text[] = {
+	"ZERO", "Switch",
+};
+
+static const struct soc_enum hph_enum =
+	SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(hph_text), hph_text);
+
+static const struct snd_kcontrol_new hphl_mux[] = {
+	SOC_DAPM_ENUM("HPHL", hph_enum)
+};
+
+static const struct snd_kcontrol_new hphr_mux[] = {
+	SOC_DAPM_ENUM("HPHR", hph_enum)
+};
+
+static const struct snd_kcontrol_new spkr_mux[] = {
+	SOC_DAPM_ENUM("SPK", hph_enum)
+};
+
+static const char * const lo_text[] = {
+	"ZERO", "Switch",
+};
+
+static const struct soc_enum lo_enum =
+	SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(hph_text), hph_text);
+
+static const struct snd_kcontrol_new lo_mux[] = {
+	SOC_DAPM_ENUM("LINE_OUT", lo_enum)
+};
+
+static void msm_anlg_cdc_codec_enable_adc_block(struct snd_soc_codec *codec,
+					 int enable)
+{
+	struct sdm660_cdc_priv *wcd8x16 = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s %d\n", __func__, enable);
+
+	if (enable) {
+		wcd8x16->adc_count++;
+		snd_soc_update_bits(codec,
+				    MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL,
+				    0x20, 0x20);
+		snd_soc_update_bits(codec,
+				    MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+				    0x10, 0x10);
+	} else {
+		wcd8x16->adc_count--;
+		if (!wcd8x16->adc_count) {
+			snd_soc_update_bits(codec,
+				    MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+				    0x10, 0x00);
+			snd_soc_update_bits(codec,
+				    MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL,
+					    0x20, 0x0);
+		}
+	}
+}
+
+static int msm_anlg_cdc_codec_enable_adc(struct snd_soc_dapm_widget *w,
+					 struct snd_kcontrol *kcontrol,
+					 int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	u16 adc_reg;
+	u8 init_bit_shift;
+
+	dev_dbg(codec->dev, "%s %d\n", __func__, event);
+
+	adc_reg = MSM89XX_PMIC_ANALOG_TX_1_2_TEST_CTL_2;
+
+	if (w->reg == MSM89XX_PMIC_ANALOG_TX_1_EN)
+		init_bit_shift = 5;
+	else if ((w->reg == MSM89XX_PMIC_ANALOG_TX_2_EN) ||
+		 (w->reg == MSM89XX_PMIC_ANALOG_TX_3_EN))
+		init_bit_shift = 4;
+	else {
+		dev_err(codec->dev, "%s: Error, invalid adc register\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		msm_anlg_cdc_codec_enable_adc_block(codec, 1);
+		if (w->reg == MSM89XX_PMIC_ANALOG_TX_2_EN)
+			snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MICB_1_CTL, 0x02, 0x02);
+		/*
+		 * Add delay of 10 ms to give sufficient time for the voltage
+		 * to shoot up and settle so that the txfe init does not
+		 * happen when the input voltage is changing too much.
+		 */
+		usleep_range(10000, 10010);
+		snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift,
+				1 << init_bit_shift);
+		if (w->reg == MSM89XX_PMIC_ANALOG_TX_1_EN)
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_DIGITAL_CDC_CONN_TX1_CTL,
+				0x03, 0x00);
+		else if ((w->reg == MSM89XX_PMIC_ANALOG_TX_2_EN) ||
+			(w->reg == MSM89XX_PMIC_ANALOG_TX_3_EN))
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_DIGITAL_CDC_CONN_TX2_CTL,
+				0x03, 0x00);
+		/* Wait for 1ms to allow txfe settling time */
+		usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		/*
+		 * Add delay of 12 ms before deasserting the init
+		 * to reduce the tx pop
+		 */
+		usleep_range(12000, 12010);
+		snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift, 0x00);
+		/* Wait for 1ms to allow txfe settling time post powerup */
+		usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		msm_anlg_cdc_codec_enable_adc_block(codec, 0);
+		if (w->reg == MSM89XX_PMIC_ANALOG_TX_2_EN)
+			snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_MICB_1_CTL, 0x02, 0x00);
+		if (w->reg == MSM89XX_PMIC_ANALOG_TX_1_EN)
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_DIGITAL_CDC_CONN_TX1_CTL,
+				0x03, 0x02);
+		else if ((w->reg == MSM89XX_PMIC_ANALOG_TX_2_EN) ||
+			(w->reg == MSM89XX_PMIC_ANALOG_TX_3_EN))
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_DIGITAL_CDC_CONN_TX2_CTL,
+				0x03, 0x02);
+
+		break;
+	}
+	return 0;
+}
+
+static int msm_anlg_cdc_codec_enable_spk_pa(struct snd_soc_dapm_widget *w,
+					    struct snd_kcontrol *kcontrol,
+					    int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s %d %s\n", __func__, event, w->name);
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x10, 0x10);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL, 0x01, 0x01);
+		switch (sdm660_cdc->boost_option) {
+		case BOOST_SWITCH:
+			if (!sdm660_cdc->spk_boost_set)
+				snd_soc_update_bits(codec,
+					MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL,
+					0x10, 0x10);
+			break;
+		case BOOST_ALWAYS:
+		case BOOST_ON_FOREVER:
+			break;
+		case BYPASS_ALWAYS:
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL,
+				0x10, 0x10);
+			break;
+		default:
+			dev_err(codec->dev,
+				"%s: invalid boost option: %d\n", __func__,
+				sdm660_cdc->boost_option);
+			break;
+		}
+		/* Wait for 1ms after SPK_DAC CTL setting */
+		usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL, 0xE0, 0xE0);
+		if (get_codec_version(sdm660_cdc) != TOMBAK_1_0)
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_RX_EAR_CTL, 0x01, 0x01);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		/* Wait for 1ms after SPK_VBAT_LDO Enable */
+		usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+		switch (sdm660_cdc->boost_option) {
+		case BOOST_SWITCH:
+			if (sdm660_cdc->spk_boost_set)
+				snd_soc_update_bits(codec,
+					MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
+					0xEF, 0xEF);
+			else
+				snd_soc_update_bits(codec,
+					MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL,
+					0x10, 0x00);
+			break;
+		case BOOST_ALWAYS:
+		case BOOST_ON_FOREVER:
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
+				0xEF, 0xEF);
+			break;
+		case BYPASS_ALWAYS:
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x10, 0x00);
+			break;
+		default:
+			dev_err(codec->dev,
+				"%s: invalid boost option: %d\n", __func__,
+				sdm660_cdc->boost_option);
+			break;
+		}
+		msm_anlg_cdc_dig_notifier_call(codec,
+					       DIG_CDC_EVENT_RX3_MUTE_OFF);
+		snd_soc_update_bits(codec, w->reg, 0x80, 0x80);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		msm_anlg_cdc_dig_notifier_call(codec,
+					       DIG_CDC_EVENT_RX3_MUTE_ON);
+		/*
+		 * Add 1 ms sleep for the mute to take effect
+		 */
+		usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x10, 0x10);
+		if (get_codec_version(sdm660_cdc) < CAJON_2_0)
+			msm_anlg_cdc_boost_mode_sequence(codec, SPK_PMD);
+		snd_soc_update_bits(codec, w->reg, 0x80, 0x00);
+		switch (sdm660_cdc->boost_option) {
+		case BOOST_SWITCH:
+			if (sdm660_cdc->spk_boost_set)
+				snd_soc_update_bits(codec,
+					MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
+					0xEF, 0x69);
+			break;
+		case BOOST_ALWAYS:
+		case BOOST_ON_FOREVER:
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
+				0xEF, 0x69);
+			break;
+		case BYPASS_ALWAYS:
+			break;
+		default:
+			dev_err(codec->dev,
+				"%s: invalid boost option: %d\n", __func__,
+				sdm660_cdc->boost_option);
+			break;
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL, 0xE0, 0x00);
+		/* Wait for 1ms to allow setting time for spkr path disable */
+		usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL, 0x01, 0x00);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x10, 0x00);
+		if (get_codec_version(sdm660_cdc) != TOMBAK_1_0)
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_RX_EAR_CTL, 0x01, 0x00);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x10, 0x00);
+		if (get_codec_version(sdm660_cdc) >= CAJON_2_0)
+			msm_anlg_cdc_boost_mode_sequence(codec, SPK_PMD);
+		break;
+	}
+	return 0;
+}
+
+static int msm_anlg_cdc_codec_enable_dig_clk(struct snd_soc_dapm_widget *w,
+					     struct snd_kcontrol *kcontrol,
+					     int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+	struct msm_asoc_mach_data *pdata = NULL;
+
+	pdata = snd_soc_card_get_drvdata(codec->component.card);
+
+	dev_dbg(codec->dev, "%s event %d w->name %s\n", __func__,
+			event, w->name);
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		msm_anlg_cdc_codec_enable_clock_block(codec, 1);
+		snd_soc_update_bits(codec, w->reg, 0x80, 0x80);
+		msm_anlg_cdc_boost_mode_sequence(codec, SPK_PMU);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		if (sdm660_cdc->rx_bias_count == 0)
+			snd_soc_update_bits(codec,
+					MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+					0x80, 0x00);
+	}
+	return 0;
+}
+
+
+
+static bool msm_anlg_cdc_use_mb(struct snd_soc_codec *codec)
+{
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	if (get_codec_version(sdm660_cdc) < CAJON)
+		return true;
+	else
+		return false;
+}
+
+static void msm_anlg_cdc_set_auto_zeroing(struct snd_soc_codec *codec,
+					  bool enable)
+{
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	if (get_codec_version(sdm660_cdc) < CONGA) {
+		if (enable)
+			/*
+			 * Set autozeroing for special headset detection and
+			 * buttons to work.
+			 */
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_MICB_2_EN,
+				0x18, 0x10);
+		else
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_MICB_2_EN,
+				0x18, 0x00);
+
+	} else {
+		dev_dbg(codec->dev,
+			"%s: Auto Zeroing is not required from CONGA\n",
+			__func__);
+	}
+}
+
+static void msm_anlg_cdc_trim_btn_reg(struct snd_soc_codec *codec)
+{
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	if (get_codec_version(sdm660_cdc) == TOMBAK_1_0) {
+		pr_debug("%s: This device needs to be trimmed\n", __func__);
+		/*
+		 * Calculate the trim value for each device used
+		 * till is comes in production by hardware team
+		 */
+		snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_SEC_ACCESS,
+				0xA5, 0xA5);
+		snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_TRIM_CTRL2,
+				0xFF, 0x30);
+	} else {
+		dev_dbg(codec->dev, "%s: This device is trimmed at ATE\n",
+			__func__);
+	}
+}
+
+static int msm_anlg_cdc_enable_ext_mb_source(struct wcd_mbhc *wcd_mbhc,
+					     bool turn_on)
+{
+	int ret = 0;
+	static int count;
+	struct snd_soc_codec *codec = wcd_mbhc->codec;
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+
+	dev_dbg(codec->dev, "%s turn_on: %d count: %d\n", __func__, turn_on,
+			count);
+	if (turn_on) {
+		if (!count) {
+			ret = snd_soc_dapm_force_enable_pin(dapm,
+				"MICBIAS_REGULATOR");
+			snd_soc_dapm_sync(dapm);
+		}
+		count++;
+	} else {
+		if (count > 0)
+			count--;
+		if (!count) {
+			ret = snd_soc_dapm_disable_pin(dapm,
+				"MICBIAS_REGULATOR");
+			snd_soc_dapm_sync(dapm);
+		}
+	}
+
+	if (ret)
+		dev_err(codec->dev, "%s: Failed to %s external micbias source\n",
+			__func__, turn_on ? "enable" : "disabled");
+	else
+		dev_dbg(codec->dev, "%s: %s external micbias source\n",
+			 __func__, turn_on ? "Enabled" : "Disabled");
+
+	return ret;
+}
+
+static int msm_anlg_cdc_codec_enable_micbias(struct snd_soc_dapm_widget *w,
+					     struct snd_kcontrol *kcontrol,
+					     int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct sdm660_cdc_priv *sdm660_cdc =
+				snd_soc_codec_get_drvdata(codec);
+	u16 micb_int_reg;
+	char *internal1_text = "Internal1";
+	char *internal2_text = "Internal2";
+	char *internal3_text = "Internal3";
+	char *external2_text = "External2";
+	char *external_text = "External";
+	bool micbias2;
+
+	dev_dbg(codec->dev, "%s %d\n", __func__, event);
+	switch (w->reg) {
+	case MSM89XX_PMIC_ANALOG_MICB_1_EN:
+	case MSM89XX_PMIC_ANALOG_MICB_2_EN:
+		micb_int_reg = MSM89XX_PMIC_ANALOG_MICB_1_INT_RBIAS;
+		break;
+	default:
+		dev_err(codec->dev,
+			"%s: Error, invalid micbias register 0x%x\n",
+			__func__, w->reg);
+		return -EINVAL;
+	}
+
+	micbias2 = (snd_soc_read(codec, MSM89XX_PMIC_ANALOG_MICB_2_EN) & 0x80);
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (strnstr(w->name, internal1_text, strlen(w->name))) {
+			if (get_codec_version(sdm660_cdc) >= CAJON)
+				snd_soc_update_bits(codec,
+					MSM89XX_PMIC_ANALOG_TX_1_2_ATEST_CTL_2,
+					0x02, 0x02);
+			snd_soc_update_bits(codec, micb_int_reg, 0x80, 0x80);
+		} else if (strnstr(w->name, internal2_text, strlen(w->name))) {
+			snd_soc_update_bits(codec, micb_int_reg, 0x10, 0x10);
+			snd_soc_update_bits(codec, w->reg, 0x60, 0x00);
+		} else if (strnstr(w->name, internal3_text, strlen(w->name))) {
+			snd_soc_update_bits(codec, micb_int_reg, 0x2, 0x2);
+		/*
+		 * update MSM89XX_PMIC_ANALOG_TX_1_2_ATEST_CTL_2
+		 * for external bias only, not for external2.
+		*/
+		} else if (!strnstr(w->name, external2_text, strlen(w->name)) &&
+					strnstr(w->name, external_text,
+						strlen(w->name))) {
+			snd_soc_update_bits(codec,
+					MSM89XX_PMIC_ANALOG_TX_1_2_ATEST_CTL_2,
+					0x02, 0x02);
+		}
+		if (!strnstr(w->name, external_text, strlen(w->name)))
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_MICB_1_EN, 0x05, 0x04);
+		if (w->reg == MSM89XX_PMIC_ANALOG_MICB_1_EN)
+			msm_anlg_cdc_configure_cap(codec, true, micbias2);
+
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		if (get_codec_version(sdm660_cdc) <= TOMBAK_2_0)
+			/*
+			 * Wait for 20ms post micbias enable
+			 * for version < tombak 2.0.
+			 */
+			usleep_range(20000, 20100);
+		if (strnstr(w->name, internal1_text, strlen(w->name))) {
+			snd_soc_update_bits(codec, micb_int_reg, 0x40, 0x40);
+		} else if (strnstr(w->name, internal2_text,  strlen(w->name))) {
+			snd_soc_update_bits(codec, micb_int_reg, 0x08, 0x08);
+			msm_anlg_cdc_notifier_call(codec,
+					WCD_EVENT_POST_MICBIAS_2_ON);
+		} else if (strnstr(w->name, internal3_text, 30)) {
+			snd_soc_update_bits(codec, micb_int_reg, 0x01, 0x01);
+		} else if (strnstr(w->name, external2_text, strlen(w->name))) {
+			msm_anlg_cdc_notifier_call(codec,
+					WCD_EVENT_POST_MICBIAS_2_ON);
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		if (strnstr(w->name, internal1_text, strlen(w->name))) {
+			snd_soc_update_bits(codec, micb_int_reg, 0xC0, 0x40);
+		} else if (strnstr(w->name, internal2_text, strlen(w->name))) {
+			msm_anlg_cdc_notifier_call(codec,
+					WCD_EVENT_POST_MICBIAS_2_OFF);
+		} else if (strnstr(w->name, internal3_text, 30)) {
+			snd_soc_update_bits(codec, micb_int_reg, 0x2, 0x0);
+		} else if (strnstr(w->name, external2_text, strlen(w->name))) {
+			/*
+			 * send micbias turn off event to mbhc driver and then
+			 * break, as no need to set MICB_1_EN register.
+			 */
+			msm_anlg_cdc_notifier_call(codec,
+					WCD_EVENT_POST_MICBIAS_2_OFF);
+			break;
+		}
+		if (w->reg == MSM89XX_PMIC_ANALOG_MICB_1_EN)
+			msm_anlg_cdc_configure_cap(codec, false, micbias2);
+		break;
+	}
+	return 0;
+}
+
+static void update_clkdiv(void *handle, int val)
+{
+	struct sdm660_cdc_priv *handle_cdc = handle;
+	struct snd_soc_codec *codec = handle_cdc->codec;
+
+	snd_soc_update_bits(codec,
+			    MSM89XX_PMIC_ANALOG_TX_1_2_TXFE_CLKDIV,
+			    0xFF, val);
+}
+
+static int get_cdc_version(void *handle)
+{
+	struct sdm660_cdc_priv *sdm660_cdc = handle;
+
+	return get_codec_version(sdm660_cdc);
+}
+
+static int sdm660_wcd_codec_enable_vdd_spkr(struct snd_soc_dapm_widget *w,
+					       struct snd_kcontrol *kcontrol,
+					       int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+	int ret = 0;
+
+	if (!sdm660_cdc->ext_spk_boost_set) {
+		dev_dbg(codec->dev, "%s: ext_boost not supported/disabled\n",
+								__func__);
+		return 0;
+	}
+	dev_dbg(codec->dev, "%s: %s %d\n", __func__, w->name, event);
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (sdm660_cdc->spkdrv_reg) {
+			ret = regulator_enable(sdm660_cdc->spkdrv_reg);
+			if (ret)
+				dev_err(codec->dev,
+					"%s Failed to enable spkdrv reg %s\n",
+					__func__, MSM89XX_VDD_SPKDRV_NAME);
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		if (sdm660_cdc->spkdrv_reg) {
+			ret = regulator_disable(sdm660_cdc->spkdrv_reg);
+			if (ret)
+				dev_err(codec->dev,
+					"%s: Failed to disable spkdrv_reg %s\n",
+					__func__, MSM89XX_VDD_SPKDRV_NAME);
+		}
+		break;
+	}
+	return 0;
+}
+
+
+/* The register address is the same as other codec so it can use resmgr */
+static int msm_anlg_cdc_codec_enable_rx_bias(struct snd_soc_dapm_widget *w,
+					     struct snd_kcontrol *kcontrol,
+					     int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		sdm660_cdc->rx_bias_count++;
+		if (sdm660_cdc->rx_bias_count == 1) {
+			snd_soc_update_bits(codec,
+					MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC,
+					0x80, 0x80);
+			snd_soc_update_bits(codec,
+					MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC,
+					0x01, 0x01);
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		sdm660_cdc->rx_bias_count--;
+		if (sdm660_cdc->rx_bias_count == 0) {
+			snd_soc_update_bits(codec,
+					MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC,
+					0x01, 0x00);
+			snd_soc_update_bits(codec,
+					MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC,
+					0x80, 0x00);
+		}
+		break;
+	}
+	dev_dbg(codec->dev, "%s rx_bias_count = %d\n",
+			__func__, sdm660_cdc->rx_bias_count);
+	return 0;
+}
+
+static uint32_t wcd_get_impedance_value(uint32_t imped)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(wcd_imped_val) - 1; i++) {
+		if (imped >= wcd_imped_val[i] &&
+			imped < wcd_imped_val[i + 1])
+			break;
+	}
+
+	pr_debug("%s: selected impedance value = %d\n",
+		 __func__, wcd_imped_val[i]);
+	return wcd_imped_val[i];
+}
+
+static void wcd_imped_config(struct snd_soc_codec *codec,
+			     uint32_t imped, bool set_gain)
+{
+	uint32_t value;
+	int codec_version;
+	struct sdm660_cdc_priv *sdm660_cdc =
+				snd_soc_codec_get_drvdata(codec);
+
+	value = wcd_get_impedance_value(imped);
+
+	if (value < wcd_imped_val[0]) {
+		dev_dbg(codec->dev,
+			"%s, detected impedance is less than 4 Ohm\n",
+			 __func__);
+		return;
+	}
+
+	codec_version = get_codec_version(sdm660_cdc);
+
+	if (set_gain) {
+		switch (codec_version) {
+		case TOMBAK_1_0:
+		case TOMBAK_2_0:
+		case CONGA:
+			/*
+			 * For 32Ohm load and higher loads, Set 0x19E
+			 * bit 5 to 1 (POS_0_DB_DI). For loads lower
+			 * than 32Ohm (such as 16Ohm load), Set 0x19E
+			 * bit 5 to 0 (POS_M4P5_DB_DI)
+			 */
+			if (value >= 32)
+				snd_soc_update_bits(codec,
+					MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
+					0x20, 0x20);
+			else
+				snd_soc_update_bits(codec,
+					MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
+					0x20, 0x00);
+			break;
+		case CAJON:
+		case CAJON_2_0:
+		case DIANGU:
+		case DRAX_CDC:
+			if (value >= 13) {
+				snd_soc_update_bits(codec,
+					MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
+					0x20, 0x20);
+				snd_soc_update_bits(codec,
+					MSM89XX_PMIC_ANALOG_NCP_VCTRL,
+					0x07, 0x07);
+			} else {
+				snd_soc_update_bits(codec,
+					MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
+					0x20, 0x00);
+				snd_soc_update_bits(codec,
+					MSM89XX_PMIC_ANALOG_NCP_VCTRL,
+					0x07, 0x04);
+			}
+			break;
+		}
+	} else {
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
+			0x20, 0x00);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_NCP_VCTRL,
+			0x07, 0x04);
+	}
+
+	dev_dbg(codec->dev, "%s: Exit\n", __func__);
+}
+
+static int msm_anlg_cdc_hphl_dac_event(struct snd_soc_dapm_widget *w,
+				       struct snd_kcontrol *kcontrol,
+				       int event)
+{
+	uint32_t impedl, impedr;
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+	int ret;
+
+	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+	ret = wcd_mbhc_get_impedance(&sdm660_cdc->mbhc,
+			&impedl, &impedr);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (get_codec_version(sdm660_cdc) > CAJON)
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN,
+				0x08, 0x08);
+		if (get_codec_version(sdm660_cdc) == CAJON ||
+			get_codec_version(sdm660_cdc) == CAJON_2_0) {
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_RX_HPH_L_TEST,
+				0x80, 0x80);
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_RX_HPH_R_TEST,
+				0x80, 0x80);
+		}
+		if (get_codec_version(sdm660_cdc) > CAJON)
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN,
+				0x08, 0x00);
+		if (sdm660_cdc->hph_mode == HD2_MODE)
+			msm_anlg_cdc_dig_notifier_call(codec,
+					DIG_CDC_EVENT_PRE_RX1_INT_ON);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_RX_HPH_L_PA_DAC_CTL, 0x02, 0x02);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL, 0x01, 0x01);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x02, 0x02);
+		if (!ret)
+			wcd_imped_config(codec, impedl, true);
+		else
+			dev_dbg(codec->dev, "Failed to get mbhc impedance %d\n",
+				ret);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_RX_HPH_L_PA_DAC_CTL, 0x02, 0x00);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		wcd_imped_config(codec, impedl, false);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x02, 0x00);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL, 0x01, 0x00);
+		if (sdm660_cdc->hph_mode == HD2_MODE)
+			msm_anlg_cdc_dig_notifier_call(codec,
+					DIG_CDC_EVENT_POST_RX1_INT_OFF);
+		break;
+	}
+	return 0;
+}
+
+static int msm_anlg_cdc_lo_dac_event(struct snd_soc_dapm_widget *w,
+				     struct snd_kcontrol *kcontrol,
+				     int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x10, 0x10);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x20, 0x20);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x80, 0x80);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x08, 0x08);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x40, 0x40);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x80, 0x80);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x08, 0x00);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x40, 0x40);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/* Wait for 20ms before powerdown of lineout_dac */
+		usleep_range(20000, 20100);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x80, 0x00);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x40, 0x00);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_RX_LO_DAC_CTL, 0x08, 0x00);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x80, 0x00);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x40, 0x00);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL, 0x20, 0x00);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x10, 0x00);
+		break;
+	}
+	return 0;
+}
+
+static int msm_anlg_cdc_hphr_dac_event(struct snd_soc_dapm_widget *w,
+				       struct snd_kcontrol *kcontrol,
+				       int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (sdm660_cdc->hph_mode == HD2_MODE)
+			msm_anlg_cdc_dig_notifier_call(codec,
+					DIG_CDC_EVENT_PRE_RX2_INT_ON);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_RX_HPH_R_PA_DAC_CTL, 0x02, 0x02);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL, 0x02, 0x02);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x01, 0x01);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_RX_HPH_R_PA_DAC_CTL, 0x02, 0x00);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x01, 0x00);
+		snd_soc_update_bits(codec,
+			MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL, 0x02, 0x00);
+		if (sdm660_cdc->hph_mode == HD2_MODE)
+			msm_anlg_cdc_dig_notifier_call(codec,
+					DIG_CDC_EVENT_POST_RX2_INT_OFF);
+		break;
+	}
+	return 0;
+}
+
+static int msm_anlg_cdc_hph_pa_event(struct snd_soc_dapm_widget *w,
+				     struct snd_kcontrol *kcontrol,
+				     int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s: %s event = %d\n", __func__, w->name, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (w->shift == 5)
+			msm_anlg_cdc_notifier_call(codec,
+					WCD_EVENT_PRE_HPHL_PA_ON);
+		else if (w->shift == 4)
+			msm_anlg_cdc_notifier_call(codec,
+					WCD_EVENT_PRE_HPHR_PA_ON);
+		snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_NCP_FBCTRL, 0x20, 0x20);
+		break;
+
+	case SND_SOC_DAPM_POST_PMU:
+		/* Wait for 7ms to allow setting time for HPH_PA Enable */
+		usleep_range(7000, 7100);
+		if (w->shift == 5) {
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_RX_HPH_L_TEST, 0x04, 0x04);
+			msm_anlg_cdc_dig_notifier_call(codec,
+					       DIG_CDC_EVENT_RX1_MUTE_OFF);
+		} else if (w->shift == 4) {
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_RX_HPH_R_TEST, 0x04, 0x04);
+			msm_anlg_cdc_dig_notifier_call(codec,
+					       DIG_CDC_EVENT_RX2_MUTE_OFF);
+		}
+		break;
+
+	case SND_SOC_DAPM_PRE_PMD:
+		if (w->shift == 5) {
+			msm_anlg_cdc_dig_notifier_call(codec,
+					       DIG_CDC_EVENT_RX1_MUTE_ON);
+			/* Wait for 20ms after HPHL RX digital mute */
+			msleep(20);
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_RX_HPH_L_TEST, 0x04, 0x00);
+			msm_anlg_cdc_notifier_call(codec,
+					WCD_EVENT_PRE_HPHL_PA_OFF);
+		} else if (w->shift == 4) {
+			msm_anlg_cdc_dig_notifier_call(codec,
+					       DIG_CDC_EVENT_RX2_MUTE_ON);
+			/* Wait for 20ms after HPHR RX digital mute */
+			msleep(20);
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_RX_HPH_R_TEST, 0x04, 0x00);
+			msm_anlg_cdc_notifier_call(codec,
+					WCD_EVENT_PRE_HPHR_PA_OFF);
+		}
+		if (get_codec_version(sdm660_cdc) >= CAJON) {
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_RX_HPH_BIAS_CNP,
+				0xF0, 0x30);
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		if (w->shift == 5) {
+			clear_bit(WCD_MBHC_HPHL_PA_OFF_ACK,
+				&sdm660_cdc->mbhc.hph_pa_dac_state);
+			msm_anlg_cdc_notifier_call(codec,
+					WCD_EVENT_POST_HPHL_PA_OFF);
+		} else if (w->shift == 4) {
+			clear_bit(WCD_MBHC_HPHR_PA_OFF_ACK,
+				&sdm660_cdc->mbhc.hph_pa_dac_state);
+			msm_anlg_cdc_notifier_call(codec,
+					WCD_EVENT_POST_HPHR_PA_OFF);
+		}
+		/* Wait for 15ms after HPH RX teardown */
+		usleep_range(15000, 15100);
+		break;
+	}
+	return 0;
+}
+
+static const struct snd_soc_dapm_route audio_map[] = {
+	/* RDAC Connections */
+	{"HPHR DAC", NULL, "RDAC2 MUX"},
+	{"RDAC2 MUX", "RX1", "PDM_IN_RX1"},
+	{"RDAC2 MUX", "RX2", "PDM_IN_RX2"},
+
+	/* WSA */
+	{"WSA_SPK OUT", NULL, "WSA Spk Switch"},
+	{"WSA Spk Switch", "WSA", "EAR PA"},
+
+	/* Earpiece (RX MIX1) */
+	{"EAR", NULL, "EAR_S"},
+	{"EAR_S", "Switch", "EAR PA"},
+	{"EAR PA", NULL, "RX_BIAS"},
+	{"EAR PA", NULL, "HPHL DAC"},
+	{"EAR PA", NULL, "HPHR DAC"},
+	{"EAR PA", NULL, "EAR CP"},
+
+	/* Headset (RX MIX1 and RX MIX2) */
+	{"HEADPHONE", NULL, "HPHL PA"},
+	{"HEADPHONE", NULL, "HPHR PA"},
+
+	{"Ext Spk", NULL, "Ext Spk Switch"},
+	{"Ext Spk Switch", "On", "HPHL PA"},
+	{"Ext Spk Switch", "On", "HPHR PA"},
+
+	{"HPHL PA", NULL, "HPHL"},
+	{"HPHR PA", NULL, "HPHR"},
+	{"HPHL", "Switch", "HPHL DAC"},
+	{"HPHR", "Switch", "HPHR DAC"},
+	{"HPHL PA", NULL, "CP"},
+	{"HPHL PA", NULL, "RX_BIAS"},
+	{"HPHR PA", NULL, "CP"},
+	{"HPHR PA", NULL, "RX_BIAS"},
+	{"HPHL DAC", NULL, "PDM_IN_RX1"},
+
+	{"SPK_OUT", NULL, "SPK PA"},
+	{"SPK PA", NULL, "SPK_RX_BIAS"},
+	{"SPK PA", NULL, "SPK"},
+	{"SPK", "Switch", "SPK DAC"},
+	{"SPK DAC", NULL, "PDM_IN_RX3"},
+	{"SPK DAC", NULL, "VDD_SPKDRV"},
+
+	/* lineout */
+	{"LINEOUT", NULL, "LINEOUT PA"},
+	{"LINEOUT PA", NULL, "SPK_RX_BIAS"},
+	{"LINEOUT PA", NULL, "LINE_OUT"},
+	{"LINE_OUT", "Switch", "LINEOUT DAC"},
+	{"LINEOUT DAC", NULL, "PDM_IN_RX3"},
+
+	/* lineout to WSA */
+	{"WSA_SPK OUT", NULL, "LINEOUT PA"},
+
+	{"PDM_IN_RX1", NULL, "RX1 CLK"},
+	{"PDM_IN_RX2", NULL, "RX2 CLK"},
+	{"PDM_IN_RX3", NULL, "RX3 CLK"},
+
+	{"ADC1_OUT", NULL, "ADC1"},
+	{"ADC2_OUT", NULL, "ADC2"},
+	{"ADC3_OUT", NULL, "ADC3"},
+
+	/* ADC Connections */
+	{"ADC2", NULL, "ADC2 MUX"},
+	{"ADC3", NULL, "ADC2 MUX"},
+	{"ADC2 MUX", "INP2", "ADC2_INP2"},
+	{"ADC2 MUX", "INP3", "ADC2_INP3"},
+
+	{"ADC1", NULL, "AMIC1"},
+	{"ADC2_INP2", NULL, "AMIC2"},
+	{"ADC2_INP3", NULL, "AMIC3"},
+
+	{"MIC BIAS Internal1", NULL, "INT_LDO_H"},
+	{"MIC BIAS Internal2", NULL, "INT_LDO_H"},
+	{"MIC BIAS External", NULL, "INT_LDO_H"},
+	{"MIC BIAS External2", NULL, "INT_LDO_H"},
+	{"MIC BIAS Internal1", NULL, "MICBIAS_REGULATOR"},
+	{"MIC BIAS Internal2", NULL, "MICBIAS_REGULATOR"},
+	{"MIC BIAS External", NULL, "MICBIAS_REGULATOR"},
+	{"MIC BIAS External2", NULL, "MICBIAS_REGULATOR"},
+};
+
+static int msm_anlg_cdc_startup(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct sdm660_cdc_priv *sdm660_cdc =
+		snd_soc_codec_get_drvdata(dai->codec);
+
+	dev_dbg(dai->codec->dev, "%s(): substream = %s  stream = %d\n",
+		__func__,
+		substream->name, substream->stream);
+	/*
+	 * If status_mask is BUS_DOWN it means SSR is not complete.
+	 * So return error.
+	 */
+	if (test_bit(BUS_DOWN, &sdm660_cdc->status_mask)) {
+		dev_err(dai->codec->dev, "Error, Device is not up post SSR\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void msm_anlg_cdc_shutdown(struct snd_pcm_substream *substream,
+				  struct snd_soc_dai *dai)
+{
+	dev_dbg(dai->codec->dev,
+		"%s(): substream = %s  stream = %d\n", __func__,
+		substream->name, substream->stream);
+}
+
+int msm_anlg_cdc_mclk_enable(struct snd_soc_codec *codec,
+			     int mclk_enable, bool dapm)
+{
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s: mclk_enable = %u, dapm = %d\n",
+		__func__, mclk_enable, dapm);
+	if (mclk_enable) {
+		sdm660_cdc->int_mclk0_enabled = true;
+		msm_anlg_cdc_codec_enable_clock_block(codec, 1);
+	} else {
+		if (!sdm660_cdc->int_mclk0_enabled) {
+			dev_err(codec->dev, "Error, MCLK already diabled\n");
+			return -EINVAL;
+		}
+		sdm660_cdc->int_mclk0_enabled = false;
+		msm_anlg_cdc_codec_enable_clock_block(codec, 0);
+	}
+	return 0;
+}
+
+static int msm_anlg_cdc_set_dai_sysclk(struct snd_soc_dai *dai,
+		int clk_id, unsigned int freq, int dir)
+{
+	dev_dbg(dai->codec->dev, "%s\n", __func__);
+	return 0;
+}
+
+static int msm_anlg_cdc_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	dev_dbg(dai->codec->dev, "%s\n", __func__);
+	return 0;
+}
+
+static int msm_anlg_cdc_set_channel_map(struct snd_soc_dai *dai,
+				unsigned int tx_num, unsigned int *tx_slot,
+				unsigned int rx_num, unsigned int *rx_slot)
+
+{
+	dev_dbg(dai->codec->dev, "%s\n", __func__);
+	return 0;
+}
+
+static int msm_anlg_cdc_get_channel_map(struct snd_soc_dai *dai,
+				 unsigned int *tx_num, unsigned int *tx_slot,
+				 unsigned int *rx_num, unsigned int *rx_slot)
+
+{
+	dev_dbg(dai->codec->dev, "%s\n", __func__);
+	return 0;
+}
+
+static struct snd_soc_dai_ops msm_anlg_cdc_dai_ops = {
+	.startup = msm_anlg_cdc_startup,
+	.shutdown = msm_anlg_cdc_shutdown,
+	.set_sysclk = msm_anlg_cdc_set_dai_sysclk,
+	.set_fmt = msm_anlg_cdc_set_dai_fmt,
+	.set_channel_map = msm_anlg_cdc_set_channel_map,
+	.get_channel_map = msm_anlg_cdc_get_channel_map,
+};
+
+static struct snd_soc_dai_driver msm_anlg_cdc_i2s_dai[] = {
+	{
+		.name = "msm_anlg_cdc_i2s_rx1",
+		.id = AIF1_PB,
+		.playback = {
+			.stream_name = "Playback",
+			.rates = SDM660_CDC_RATES,
+			.formats = SDM660_CDC_FORMATS,
+			.rate_max = 192000,
+			.rate_min = 8000,
+			.channels_min = 1,
+			.channels_max = 3,
+		},
+		.ops = &msm_anlg_cdc_dai_ops,
+	},
+	{
+		.name = "msm_anlg_cdc_i2s_tx1",
+		.id = AIF1_CAP,
+		.capture = {
+			.stream_name = "Record",
+			.rates = SDM660_CDC_RATES,
+			.formats = SDM660_CDC_FORMATS,
+			.rate_max = 48000,
+			.rate_min = 8000,
+			.channels_min = 1,
+			.channels_max = 4,
+		},
+		.ops = &msm_anlg_cdc_dai_ops,
+	},
+	{
+		.name = "msm_anlg_cdc_i2s_tx2",
+		.id = AIF3_SVA,
+		.capture = {
+			.stream_name = "RecordSVA",
+			.rates = SDM660_CDC_RATES,
+			.formats = SDM660_CDC_FORMATS,
+			.rate_max = 48000,
+			.rate_min = 8000,
+			.channels_min = 1,
+			.channels_max = 2,
+		},
+		.ops = &msm_anlg_cdc_dai_ops,
+	},
+	{
+		.name = "msm_anlg_vifeedback",
+		.id = AIF2_VIFEED,
+		.capture = {
+			.stream_name = "VIfeed",
+			.rates = SDM660_CDC_RATES,
+			.formats = SDM660_CDC_FORMATS,
+			.rate_max = 48000,
+			.rate_min = 48000,
+			.channels_min = 2,
+			.channels_max = 2,
+		},
+		.ops = &msm_anlg_cdc_dai_ops,
+	},
+};
+
+
+static int msm_anlg_cdc_codec_enable_lo_pa(struct snd_soc_dapm_widget *w,
+					   struct snd_kcontrol *kcontrol,
+					   int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	dev_dbg(codec->dev, "%s: %d %s\n", __func__, event, w->name);
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		msm_anlg_cdc_dig_notifier_call(codec,
+				       DIG_CDC_EVENT_RX3_MUTE_OFF);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		msm_anlg_cdc_dig_notifier_call(codec,
+				       DIG_CDC_EVENT_RX3_MUTE_ON);
+		break;
+	}
+
+	return 0;
+}
+
+static int msm_anlg_cdc_codec_enable_spk_ext_pa(struct snd_soc_dapm_widget *w,
+						struct snd_kcontrol *kcontrol,
+						int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s: %s event = %d\n", __func__, w->name, event);
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		dev_dbg(codec->dev,
+			"%s: enable external speaker PA\n", __func__);
+		if (sdm660_cdc->codec_spk_ext_pa_cb)
+			sdm660_cdc->codec_spk_ext_pa_cb(codec, 1);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		dev_dbg(codec->dev,
+			"%s: enable external speaker PA\n", __func__);
+		if (sdm660_cdc->codec_spk_ext_pa_cb)
+			sdm660_cdc->codec_spk_ext_pa_cb(codec, 0);
+		break;
+	}
+	return 0;
+}
+
+static int msm_anlg_cdc_codec_enable_ear_pa(struct snd_soc_dapm_widget *w,
+					    struct snd_kcontrol *kcontrol,
+					    int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		dev_dbg(codec->dev,
+			"%s: Sleeping 20ms after select EAR PA\n",
+			__func__);
+		snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
+			    0x80, 0x80);
+		if (get_codec_version(sdm660_cdc) < CONGA)
+			snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_RX_HPH_CNP_WG_TIME, 0xFF, 0x2A);
+		if (get_codec_version(sdm660_cdc) >= DIANGU) {
+			snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC, 0x08, 0x00);
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_RX_HPH_L_TEST, 0x04, 0x04);
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_RX_HPH_R_TEST, 0x04, 0x04);
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		dev_dbg(codec->dev,
+			"%s: Sleeping 20ms after enabling EAR PA\n",
+			__func__);
+		snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
+			    0x40, 0x40);
+		/* Wait for 7ms after EAR PA enable */
+		usleep_range(7000, 7100);
+		msm_anlg_cdc_dig_notifier_call(codec,
+				       DIG_CDC_EVENT_RX1_MUTE_OFF);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		msm_anlg_cdc_dig_notifier_call(codec,
+				       DIG_CDC_EVENT_RX1_MUTE_ON);
+		/* Wait for 20ms for RX digital mute to take effect */
+		msleep(20);
+		if (sdm660_cdc->boost_option == BOOST_ALWAYS) {
+			dev_dbg(codec->dev,
+				"%s: boost_option:%d, tear down ear\n",
+				__func__, sdm660_cdc->boost_option);
+			msm_anlg_cdc_boost_mode_sequence(codec, EAR_PMD);
+		}
+		if (get_codec_version(sdm660_cdc) >= DIANGU) {
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_RX_HPH_L_TEST, 0x04, 0x0);
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_RX_HPH_R_TEST, 0x04, 0x0);
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		dev_dbg(codec->dev,
+			"%s: Sleeping 7ms after disabling EAR PA\n",
+			__func__);
+		snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
+			    0x40, 0x00);
+		/* Wait for 7ms after EAR PA teardown */
+		usleep_range(7000, 7100);
+		if (get_codec_version(sdm660_cdc) < CONGA)
+			snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_RX_HPH_CNP_WG_TIME, 0xFF, 0x16);
+		if (get_codec_version(sdm660_cdc) >= DIANGU)
+			snd_soc_update_bits(codec,
+			MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC, 0x08, 0x08);
+		break;
+	}
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget msm_anlg_cdc_dapm_widgets[] = {
+	SND_SOC_DAPM_PGA_E("EAR PA", SND_SOC_NOPM,
+			0, 0, NULL, 0, msm_anlg_cdc_codec_enable_ear_pa,
+			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("HPHL PA", MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN,
+		5, 0, NULL, 0,
+		msm_anlg_cdc_hph_pa_event, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("HPHR PA", MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN,
+		4, 0, NULL, 0,
+		msm_anlg_cdc_hph_pa_event, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("SPK PA", SND_SOC_NOPM,
+			0, 0, NULL, 0, msm_anlg_cdc_codec_enable_spk_pa,
+			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("LINEOUT PA", MSM89XX_PMIC_ANALOG_RX_LO_EN_CTL,
+			5, 0, NULL, 0, msm_anlg_cdc_codec_enable_lo_pa,
+			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX("EAR_S", SND_SOC_NOPM, 0, 0, ear_pa_mux),
+	SND_SOC_DAPM_MUX("SPK", SND_SOC_NOPM, 0, 0, spkr_mux),
+	SND_SOC_DAPM_MUX("HPHL", SND_SOC_NOPM, 0, 0, hphl_mux),
+	SND_SOC_DAPM_MUX("HPHR", SND_SOC_NOPM, 0, 0, hphr_mux),
+	SND_SOC_DAPM_MUX("RDAC2 MUX", SND_SOC_NOPM, 0, 0, &rdac2_mux),
+	SND_SOC_DAPM_MUX("WSA Spk Switch", SND_SOC_NOPM, 0, 0, wsa_spk_mux),
+	SND_SOC_DAPM_MUX("Ext Spk Switch", SND_SOC_NOPM, 0, 0, &ext_spk_mux),
+	SND_SOC_DAPM_MUX("LINE_OUT", SND_SOC_NOPM, 0, 0, lo_mux),
+	SND_SOC_DAPM_MUX("ADC2 MUX", SND_SOC_NOPM, 0, 0, &tx_adc2_mux),
+
+	SND_SOC_DAPM_MIXER_E("HPHL DAC",
+		MSM89XX_PMIC_ANALOG_RX_HPH_L_PA_DAC_CTL, 3, 0, NULL,
+		0, msm_anlg_cdc_hphl_dac_event,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MIXER_E("HPHR DAC",
+		MSM89XX_PMIC_ANALOG_RX_HPH_R_PA_DAC_CTL, 3, 0, NULL,
+		0, msm_anlg_cdc_hphr_dac_event,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MIXER("ADC2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("ADC3", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	SND_SOC_DAPM_DAC("SPK DAC", NULL, MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL,
+			 7, 0),
+	SND_SOC_DAPM_DAC_E("LINEOUT DAC", NULL,
+		SND_SOC_NOPM, 0, 0, msm_anlg_cdc_lo_dac_event,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_SPK("Ext Spk", msm_anlg_cdc_codec_enable_spk_ext_pa),
+
+	SND_SOC_DAPM_SUPPLY("RX1 CLK", MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+			    0, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("RX2 CLK", MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+			    1, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("RX3 CLK", MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+			    2, 0, msm_anlg_cdc_codec_enable_dig_clk,
+			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY("CP", MSM89XX_PMIC_ANALOG_NCP_EN, 0, 0,
+			    msm_anlg_cdc_codec_enable_charge_pump,
+			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			    SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY("EAR CP", MSM89XX_PMIC_ANALOG_NCP_EN, 4, 0,
+			    msm_anlg_cdc_codec_enable_charge_pump,
+			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			    SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY_S("RX_BIAS", 1, SND_SOC_NOPM,
+		0, 0, msm_anlg_cdc_codec_enable_rx_bias,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY_S("SPK_RX_BIAS", 1, SND_SOC_NOPM, 0, 0,
+		msm_anlg_cdc_codec_enable_rx_bias, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY("VDD_SPKDRV", SND_SOC_NOPM, 0, 0,
+			    sdm660_wcd_codec_enable_vdd_spkr,
+			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY("INT_LDO_H", SND_SOC_NOPM, 1, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("MICBIAS_REGULATOR", SND_SOC_NOPM,
+		ON_DEMAND_MICBIAS, 0,
+		msm_anlg_cdc_codec_enable_on_demand_supply,
+		SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MICBIAS_E("MIC BIAS Internal1",
+		MSM89XX_PMIC_ANALOG_MICB_1_EN, 7, 0,
+		msm_anlg_cdc_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MICBIAS_E("MIC BIAS Internal2",
+		MSM89XX_PMIC_ANALOG_MICB_2_EN, 7, 0,
+		msm_anlg_cdc_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MICBIAS_E("MIC BIAS Internal3",
+		MSM89XX_PMIC_ANALOG_MICB_1_EN, 7, 0,
+		msm_anlg_cdc_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_ADC_E("ADC1", NULL, MSM89XX_PMIC_ANALOG_TX_1_EN, 7, 0,
+		msm_anlg_cdc_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_ADC_E("ADC2_INP2",
+		NULL, MSM89XX_PMIC_ANALOG_TX_2_EN, 7, 0,
+		msm_anlg_cdc_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_ADC_E("ADC2_INP3",
+		NULL, MSM89XX_PMIC_ANALOG_TX_3_EN, 7, 0,
+		msm_anlg_cdc_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MICBIAS_E("MIC BIAS External",
+		MSM89XX_PMIC_ANALOG_MICB_1_EN, 7, 0,
+		msm_anlg_cdc_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MICBIAS_E("MIC BIAS External2",
+		MSM89XX_PMIC_ANALOG_MICB_2_EN, 7, 0,
+		msm_anlg_cdc_codec_enable_micbias, SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_INPUT("AMIC1"),
+	SND_SOC_DAPM_INPUT("AMIC2"),
+	SND_SOC_DAPM_INPUT("AMIC3"),
+	SND_SOC_DAPM_AIF_IN("PDM_IN_RX1", "PDM Playback",
+		0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PDM_IN_RX2", "PDM Playback",
+		0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PDM_IN_RX3", "PDM Playback",
+		0, SND_SOC_NOPM, 0, 0),
+
+	SND_SOC_DAPM_OUTPUT("EAR"),
+	SND_SOC_DAPM_OUTPUT("WSA_SPK OUT"),
+	SND_SOC_DAPM_OUTPUT("HEADPHONE"),
+	SND_SOC_DAPM_OUTPUT("SPK_OUT"),
+	SND_SOC_DAPM_OUTPUT("LINEOUT"),
+	SND_SOC_DAPM_AIF_OUT("ADC1_OUT", "PDM Capture",
+		0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("ADC2_OUT", "PDM Capture",
+		0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("ADC3_OUT", "PDM Capture",
+		0, SND_SOC_NOPM, 0, 0),
+};
+
+static const struct sdm660_cdc_reg_mask_val msm_anlg_cdc_reg_defaults[] = {
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x03),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_CURRENT_LIMIT, 0x82),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL, 0xE1),
+};
+
+static const struct sdm660_cdc_reg_mask_val
+					msm_anlg_cdc_reg_defaults_2_0[] = {
+	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_SEC_ACCESS, 0xA5),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL3, 0x0F),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_TX_1_2_OPAMP_BIAS, 0x4F),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_FBCTRL, 0x28),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL, 0x69),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_DBG, 0x01),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_BOOST_EN_CTL, 0x5F),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SLOPE_COMP_IP_ZERO, 0x88),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SEC_ACCESS, 0xA5),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3, 0x0F),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_CURRENT_LIMIT, 0x82),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x03),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL, 0xE1),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80),
+};
+
+static const struct sdm660_cdc_reg_mask_val conga_wcd_reg_defaults[] = {
+	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_SEC_ACCESS, 0xA5),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL3, 0x0F),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SEC_ACCESS, 0xA5),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3, 0x0F),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_TX_1_2_OPAMP_BIAS, 0x4C),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_FBCTRL, 0x28),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL, 0x69),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_DBG, 0x01),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_PERPH_SUBTYPE, 0x0A),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x03),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL, 0xE1),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80),
+};
+
+static const struct sdm660_cdc_reg_mask_val cajon_wcd_reg_defaults[] = {
+	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_SEC_ACCESS, 0xA5),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL3, 0x0F),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SEC_ACCESS, 0xA5),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3, 0x0F),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_TX_1_2_OPAMP_BIAS, 0x4C),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_CURRENT_LIMIT, 0x82),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_FBCTRL, 0xA8),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_VCTRL, 0xA4),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_ANA_BIAS_SET, 0x41),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL, 0x69),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_DBG, 0x01),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL, 0xE1),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x03),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_RX_HPH_BIAS_PA, 0xFA),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80),
+};
+
+static const struct sdm660_cdc_reg_mask_val cajon2p0_wcd_reg_defaults[] = {
+	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_SEC_ACCESS, 0xA5),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL3, 0x0F),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SEC_ACCESS, 0xA5),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL3, 0x0F),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_TX_1_2_OPAMP_BIAS, 0x4C),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_CURRENT_LIMIT, 0xA2),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_FBCTRL, 0xA8),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_NCP_VCTRL, 0xA4),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_ANA_BIAS_SET, 0x41),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL, 0x69),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DRV_DBG, 0x01),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_OCP_CTL, 0xE1),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x03),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_RX_EAR_STATUS, 0x10),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_BYPASS_MODE, 0x18),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_ANALOG_RX_HPH_BIAS_PA, 0xFA),
+	MSM89XX_REG_VAL(MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80),
+};
+
+static void msm_anlg_cdc_update_reg_defaults(struct snd_soc_codec *codec)
+{
+	u32 i, version;
+	struct sdm660_cdc_priv *sdm660_cdc =
+					snd_soc_codec_get_drvdata(codec);
+
+	version = get_codec_version(sdm660_cdc);
+	if (version == TOMBAK_1_0) {
+		for (i = 0; i < ARRAY_SIZE(msm_anlg_cdc_reg_defaults); i++)
+			snd_soc_write(codec, msm_anlg_cdc_reg_defaults[i].reg,
+					msm_anlg_cdc_reg_defaults[i].val);
+	} else if (version == TOMBAK_2_0) {
+		for (i = 0; i < ARRAY_SIZE(msm_anlg_cdc_reg_defaults_2_0); i++)
+			snd_soc_write(codec,
+				msm_anlg_cdc_reg_defaults_2_0[i].reg,
+				msm_anlg_cdc_reg_defaults_2_0[i].val);
+	} else if (version == CONGA) {
+		for (i = 0; i < ARRAY_SIZE(conga_wcd_reg_defaults); i++)
+			snd_soc_write(codec,
+				conga_wcd_reg_defaults[i].reg,
+				conga_wcd_reg_defaults[i].val);
+	} else if (version == CAJON) {
+		for (i = 0; i < ARRAY_SIZE(cajon_wcd_reg_defaults); i++)
+			snd_soc_write(codec,
+				cajon_wcd_reg_defaults[i].reg,
+				cajon_wcd_reg_defaults[i].val);
+	} else if (version == CAJON_2_0 || version == DIANGU
+				|| version == DRAX_CDC) {
+		for (i = 0; i < ARRAY_SIZE(cajon2p0_wcd_reg_defaults); i++)
+			snd_soc_write(codec,
+				cajon2p0_wcd_reg_defaults[i].reg,
+				cajon2p0_wcd_reg_defaults[i].val);
+	}
+}
+
+static const struct sdm660_cdc_reg_mask_val
+	msm_anlg_cdc_codec_reg_init_val[] = {
+
+	/* Initialize current threshold to 350MA
+	 * number of wait and run cycles to 4096
+	 */
+	{MSM89XX_PMIC_ANALOG_RX_COM_OCP_CTL, 0xFF, 0x12},
+	{MSM89XX_PMIC_ANALOG_RX_COM_OCP_COUNT, 0xFF, 0xFF},
+};
+
+static void msm_anlg_cdc_codec_init_cache(struct snd_soc_codec *codec)
+{
+	u32 i;
+
+	regcache_cache_only(codec->component.regmap, true);
+	/* update cache with POR values */
+	for (i = 0; i < ARRAY_SIZE(msm89xx_pmic_cdc_defaults); i++)
+		snd_soc_write(codec, msm89xx_pmic_cdc_defaults[i].reg,
+			      msm89xx_pmic_cdc_defaults[i].def);
+	regcache_cache_only(codec->component.regmap, false);
+}
+
+static void msm_anlg_cdc_codec_init_reg(struct snd_soc_codec *codec)
+{
+	u32 i;
+
+	for (i = 0; i < ARRAY_SIZE(msm_anlg_cdc_codec_reg_init_val); i++)
+		snd_soc_update_bits(codec,
+				    msm_anlg_cdc_codec_reg_init_val[i].reg,
+				    msm_anlg_cdc_codec_reg_init_val[i].mask,
+				    msm_anlg_cdc_codec_reg_init_val[i].val);
+}
+
+static int msm_anlg_cdc_bringup(struct snd_soc_codec *codec)
+{
+	snd_soc_write(codec,
+		MSM89XX_PMIC_DIGITAL_SEC_ACCESS,
+		0xA5);
+	snd_soc_write(codec, MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL4, 0x01);
+	snd_soc_write(codec,
+		MSM89XX_PMIC_ANALOG_SEC_ACCESS,
+		0xA5);
+	snd_soc_write(codec, MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL4, 0x01);
+	snd_soc_write(codec,
+		MSM89XX_PMIC_DIGITAL_SEC_ACCESS,
+		0xA5);
+	snd_soc_write(codec, MSM89XX_PMIC_DIGITAL_PERPH_RESET_CTL4, 0x00);
+	snd_soc_write(codec,
+		MSM89XX_PMIC_ANALOG_SEC_ACCESS,
+		0xA5);
+	snd_soc_write(codec, MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL4, 0x00);
+
+	return 0;
+}
+
+static struct regulator *msm_anlg_cdc_find_regulator(
+				const struct sdm660_cdc_priv *sdm660_cdc,
+				const char *name)
+{
+	int i;
+
+	for (i = 0; i < sdm660_cdc->num_of_supplies; i++) {
+		if (sdm660_cdc->supplies[i].supply &&
+		    !strcmp(sdm660_cdc->supplies[i].supply, name))
+			return sdm660_cdc->supplies[i].consumer;
+	}
+
+	dev_err(sdm660_cdc->dev, "Error: regulator not found:%s\n"
+				, name);
+	return NULL;
+}
+
+static int msm_anlg_cdc_device_down(struct snd_soc_codec *codec)
+{
+	struct msm_asoc_mach_data *pdata = NULL;
+	struct sdm660_cdc_priv *sdm660_cdc_priv =
+		snd_soc_codec_get_drvdata(codec);
+	unsigned int tx_1_en;
+	unsigned int tx_2_en;
+
+	pdata = snd_soc_card_get_drvdata(codec->component.card);
+	dev_dbg(codec->dev, "%s: device down!\n", __func__);
+
+	tx_1_en = snd_soc_read(codec, MSM89XX_PMIC_ANALOG_TX_1_EN);
+	tx_2_en = snd_soc_read(codec, MSM89XX_PMIC_ANALOG_TX_2_EN);
+	tx_1_en = tx_1_en & 0x7f;
+	tx_2_en = tx_2_en & 0x7f;
+	snd_soc_write(codec,
+		MSM89XX_PMIC_ANALOG_TX_1_EN, tx_1_en);
+	snd_soc_write(codec,
+		MSM89XX_PMIC_ANALOG_TX_2_EN, tx_2_en);
+	if (sdm660_cdc_priv->boost_option == BOOST_ON_FOREVER) {
+		if ((snd_soc_read(codec, MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL)
+			& 0x80) == 0) {
+			msm_anlg_cdc_dig_notifier_call(codec,
+						       DIG_CDC_EVENT_CLK_ON);
+			snd_soc_write(codec,
+				MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL, 0x30);
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80, 0x80);
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_DIGITAL_CDC_TOP_CLK_CTL,
+				0x0C, 0x0C);
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
+				0x84, 0x84);
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL,
+				0x10, 0x10);
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL,
+				0x1F, 0x1F);
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC,
+				0x90, 0x90);
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_RX_EAR_CTL,
+				0xFF, 0xFF);
+			/* Wait for 20us for boost settings to take effect */
+			usleep_range(20, 21);
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_SPKR_PWRSTG_CTL,
+				0xFF, 0xFF);
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
+				0xE9, 0xE9);
+		}
+	}
+	msm_anlg_cdc_boost_off(codec);
+	sdm660_cdc_priv->hph_mode = NORMAL_MODE;
+
+	/* 40ms to allow boost to discharge */
+	msleep(40);
+	/* Disable PA to avoid pop during codec bring up */
+	snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN,
+			0x30, 0x00);
+	snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
+			0x80, 0x00);
+	snd_soc_write(codec,
+		MSM89XX_PMIC_ANALOG_RX_HPH_L_PA_DAC_CTL, 0x20);
+	snd_soc_write(codec,
+		MSM89XX_PMIC_ANALOG_RX_HPH_R_PA_DAC_CTL, 0x20);
+	snd_soc_write(codec,
+		MSM89XX_PMIC_ANALOG_RX_EAR_CTL, 0x12);
+	snd_soc_write(codec,
+		MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x93);
+
+	atomic_set(&pdata->int_mclk0_enabled, false);
+	msm_anlg_cdc_dig_notifier_call(codec, DIG_CDC_EVENT_SSR_DOWN);
+	set_bit(BUS_DOWN, &sdm660_cdc_priv->status_mask);
+	snd_soc_card_change_online_state(codec->component.card, 0);
+
+	return 0;
+}
+
+static int msm_anlg_cdc_device_up(struct snd_soc_codec *codec)
+{
+	struct sdm660_cdc_priv *sdm660_cdc_priv =
+		snd_soc_codec_get_drvdata(codec);
+	int ret = 0;
+
+	dev_dbg(codec->dev, "%s: device up!\n", __func__);
+
+	msm_anlg_cdc_dig_notifier_call(codec, DIG_CDC_EVENT_SSR_UP);
+	clear_bit(BUS_DOWN, &sdm660_cdc_priv->status_mask);
+	snd_soc_card_change_online_state(codec->component.card, 1);
+	/* delay is required to make sure sound card state updated */
+	usleep_range(5000, 5100);
+
+	snd_soc_write(codec, MSM89XX_PMIC_DIGITAL_INT_EN_SET,
+				MSM89XX_PMIC_DIGITAL_INT_EN_SET__POR);
+	snd_soc_write(codec, MSM89XX_PMIC_DIGITAL_INT_EN_CLR,
+				MSM89XX_PMIC_DIGITAL_INT_EN_CLR__POR);
+
+	msm_anlg_cdc_set_boost_v(codec);
+	msm_anlg_cdc_set_micb_v(codec);
+	if (sdm660_cdc_priv->boost_option == BOOST_ON_FOREVER)
+		msm_anlg_cdc_boost_on(codec);
+	else if (sdm660_cdc_priv->boost_option == BYPASS_ALWAYS)
+		msm_anlg_cdc_bypass_on(codec);
+
+	msm_anlg_cdc_configure_cap(codec, false, false);
+	wcd_mbhc_stop(&sdm660_cdc_priv->mbhc);
+	wcd_mbhc_deinit(&sdm660_cdc_priv->mbhc);
+	ret = wcd_mbhc_init(&sdm660_cdc_priv->mbhc, codec, &mbhc_cb,
+			    &intr_ids, wcd_mbhc_registers, true);
+	if (ret)
+		dev_err(codec->dev, "%s: mbhc initialization failed\n",
+			__func__);
+	else
+		wcd_mbhc_start(&sdm660_cdc_priv->mbhc,
+			sdm660_cdc_priv->mbhc.mbhc_cfg);
+
+	return 0;
+}
+
+static int sdm660_cdc_notifier_service_cb(struct notifier_block *nb,
+					     unsigned long opcode, void *ptr)
+{
+	struct snd_soc_codec *codec;
+	struct sdm660_cdc_priv *sdm660_cdc_priv =
+				container_of(nb, struct sdm660_cdc_priv,
+					     audio_ssr_nb);
+	bool adsp_ready = false;
+	bool timedout;
+	unsigned long timeout;
+
+	codec = sdm660_cdc_priv->codec;
+	dev_dbg(codec->dev, "%s: Service opcode 0x%lx\n", __func__, opcode);
+
+	switch (opcode) {
+	case AUDIO_NOTIFIER_SERVICE_DOWN:
+		dev_dbg(codec->dev,
+			"ADSP is about to power down. teardown/reset codec\n");
+		msm_anlg_cdc_device_down(codec);
+		break;
+	case AUDIO_NOTIFIER_SERVICE_UP:
+		dev_dbg(codec->dev,
+			"ADSP is about to power up. bring up codec\n");
+
+		if (!q6core_is_adsp_ready()) {
+			dev_dbg(codec->dev,
+				"ADSP isn't ready\n");
+			timeout = jiffies +
+				  msecs_to_jiffies(ADSP_STATE_READY_TIMEOUT_MS);
+			while (!(timedout = time_after(jiffies, timeout))) {
+				if (!q6core_is_adsp_ready()) {
+					dev_dbg(codec->dev,
+						"ADSP isn't ready\n");
+				} else {
+					dev_dbg(codec->dev,
+						"ADSP is ready\n");
+					adsp_ready = true;
+					goto powerup;
+				}
+			}
+		} else {
+			adsp_ready = true;
+			dev_dbg(codec->dev, "%s: DSP is ready\n", __func__);
+		}
+powerup:
+		if (adsp_ready)
+			msm_anlg_cdc_device_up(codec);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+int msm_anlg_cdc_hs_detect(struct snd_soc_codec *codec,
+			   struct wcd_mbhc_config *mbhc_cfg)
+{
+	struct sdm660_cdc_priv *sdm660_cdc_priv =
+		snd_soc_codec_get_drvdata(codec);
+
+	return wcd_mbhc_start(&sdm660_cdc_priv->mbhc, mbhc_cfg);
+}
+EXPORT_SYMBOL(msm_anlg_cdc_hs_detect);
+
+void msm_anlg_cdc_hs_detect_exit(struct snd_soc_codec *codec)
+{
+	struct sdm660_cdc_priv *sdm660_cdc_priv =
+		snd_soc_codec_get_drvdata(codec);
+
+	wcd_mbhc_stop(&sdm660_cdc_priv->mbhc);
+}
+EXPORT_SYMBOL(msm_anlg_cdc_hs_detect_exit);
+
+void msm_anlg_cdc_update_int_spk_boost(bool enable)
+{
+	pr_debug("%s: enable = %d\n", __func__, enable);
+	spkr_boost_en = enable;
+}
+EXPORT_SYMBOL(msm_anlg_cdc_update_int_spk_boost);
+
+static void msm_anlg_cdc_set_micb_v(struct snd_soc_codec *codec)
+{
+
+	struct sdm660_cdc_priv *sdm660_cdc = snd_soc_codec_get_drvdata(codec);
+	struct sdm660_cdc_pdata *pdata = sdm660_cdc->dev->platform_data;
+	u8 reg_val;
+
+	reg_val = VOLTAGE_CONVERTER(pdata->micbias.cfilt1_mv, MICBIAS_MIN_VAL,
+			MICBIAS_STEP_SIZE);
+	dev_dbg(codec->dev, "cfilt1_mv %d reg_val %x\n",
+			(u32)pdata->micbias.cfilt1_mv, reg_val);
+	snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_MICB_1_VAL,
+			0xF8, (reg_val << 3));
+}
+
+static void msm_anlg_cdc_set_boost_v(struct snd_soc_codec *codec)
+{
+	struct sdm660_cdc_priv *sdm660_cdc_priv =
+				snd_soc_codec_get_drvdata(codec);
+
+	snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_OUTPUT_VOLTAGE,
+			0x1F, sdm660_cdc_priv->boost_voltage);
+}
+
+static void msm_anlg_cdc_configure_cap(struct snd_soc_codec *codec,
+				       bool micbias1, bool micbias2)
+{
+
+	struct msm_asoc_mach_data *pdata = NULL;
+
+	pdata = snd_soc_card_get_drvdata(codec->component.card);
+
+	pr_debug("\n %s: micbias1 %x micbias2 = %d\n", __func__, micbias1,
+			micbias2);
+	if (micbias1 && micbias2) {
+		if ((pdata->micbias1_cap_mode
+		     == MICBIAS_EXT_BYP_CAP) ||
+		    (pdata->micbias2_cap_mode
+		     == MICBIAS_EXT_BYP_CAP))
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_MICB_1_EN,
+				0x40, (MICBIAS_EXT_BYP_CAP << 6));
+		else
+			snd_soc_update_bits(codec,
+				MSM89XX_PMIC_ANALOG_MICB_1_EN,
+				0x40, (MICBIAS_NO_EXT_BYP_CAP << 6));
+	} else if (micbias2) {
+		snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_MICB_1_EN,
+				0x40, (pdata->micbias2_cap_mode << 6));
+	} else if (micbias1) {
+		snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_MICB_1_EN,
+				0x40, (pdata->micbias1_cap_mode << 6));
+	} else {
+		snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_MICB_1_EN,
+				0x40, 0x00);
+	}
+}
+
+static ssize_t msm_anlg_codec_version_read(struct snd_info_entry *entry,
+					   void *file_private_data,
+					   struct file *file,
+					   char __user *buf, size_t count,
+					   loff_t pos)
+{
+	struct sdm660_cdc_priv *sdm660_cdc_priv;
+	char buffer[MSM_ANLG_CDC_VERSION_ENTRY_SIZE];
+	int len = 0;
+
+	sdm660_cdc_priv = (struct sdm660_cdc_priv *) entry->private_data;
+	if (!sdm660_cdc_priv) {
+		pr_err("%s: sdm660_cdc_priv is null\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (get_codec_version(sdm660_cdc_priv)) {
+	case DRAX_CDC:
+		len = snprintf(buffer, sizeof(buffer), "DRAX-CDC_1_0\n");
+		break;
+	default:
+		len = snprintf(buffer, sizeof(buffer), "VER_UNDEFINED\n");
+	}
+
+	return simple_read_from_buffer(buf, count, &pos, buffer, len);
+}
+
+static struct snd_info_entry_ops msm_anlg_codec_info_ops = {
+	.read = msm_anlg_codec_version_read,
+};
+
+/*
+ * msm_anlg_codec_info_create_codec_entry - creates pmic_analog module
+ * @codec_root: The parent directory
+ * @codec: Codec instance
+ *
+ * Creates pmic_analog module and version entry under the given
+ * parent directory.
+ *
+ * Return: 0 on success or negative error code on failure.
+ */
+int msm_anlg_codec_info_create_codec_entry(struct snd_info_entry *codec_root,
+					   struct snd_soc_codec *codec)
+{
+	struct snd_info_entry *version_entry;
+	struct sdm660_cdc_priv *sdm660_cdc_priv;
+	struct snd_soc_card *card;
+	int ret;
+
+	if (!codec_root || !codec)
+		return -EINVAL;
+
+	sdm660_cdc_priv = snd_soc_codec_get_drvdata(codec);
+	card = codec->component.card;
+	sdm660_cdc_priv->entry = snd_register_module_info(codec_root->module,
+							     "spmi0-03",
+							     codec_root);
+	if (!sdm660_cdc_priv->entry) {
+		dev_dbg(codec->dev, "%s: failed to create pmic_analog entry\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	version_entry = snd_info_create_card_entry(card->snd_card,
+						   "version",
+						   sdm660_cdc_priv->entry);
+	if (!version_entry) {
+		dev_dbg(codec->dev, "%s: failed to create pmic_analog version entry\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	version_entry->private_data = sdm660_cdc_priv;
+	version_entry->size = MSM_ANLG_CDC_VERSION_ENTRY_SIZE;
+	version_entry->content = SNDRV_INFO_CONTENT_DATA;
+	version_entry->c.ops = &msm_anlg_codec_info_ops;
+
+	if (snd_info_register(version_entry) < 0) {
+		snd_info_free_entry(version_entry);
+		return -ENOMEM;
+	}
+	sdm660_cdc_priv->version_entry = version_entry;
+
+	sdm660_cdc_priv->audio_ssr_nb.notifier_call =
+				sdm660_cdc_notifier_service_cb;
+	ret = audio_notifier_register("pmic_analog_cdc",
+				      AUDIO_NOTIFIER_ADSP_DOMAIN,
+				      &sdm660_cdc_priv->audio_ssr_nb);
+	if (ret < 0) {
+		pr_err("%s: Audio notifier register failed ret = %d\n",
+			__func__, ret);
+		return ret;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(msm_anlg_codec_info_create_codec_entry);
+
+static int msm_anlg_cdc_soc_probe(struct snd_soc_codec *codec)
+{
+	struct sdm660_cdc_priv *sdm660_cdc;
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+	int ret;
+
+	sdm660_cdc = dev_get_drvdata(codec->dev);
+	sdm660_cdc->codec = codec;
+
+	/* codec resmgr module init */
+	sdm660_cdc->spkdrv_reg =
+				msm_anlg_cdc_find_regulator(sdm660_cdc,
+						MSM89XX_VDD_SPKDRV_NAME);
+	sdm660_cdc->pmic_rev =
+				snd_soc_read(codec,
+					     MSM89XX_PMIC_DIGITAL_REVISION1);
+	sdm660_cdc->codec_version =
+				snd_soc_read(codec,
+					MSM89XX_PMIC_DIGITAL_PERPH_SUBTYPE);
+	sdm660_cdc->analog_major_rev =
+				snd_soc_read(codec,
+					     MSM89XX_PMIC_ANALOG_REVISION4);
+
+	if (sdm660_cdc->codec_version == CONGA) {
+		dev_dbg(codec->dev, "%s :Conga REV: %d\n", __func__,
+					sdm660_cdc->codec_version);
+		sdm660_cdc->ext_spk_boost_set = true;
+	} else {
+		dev_dbg(codec->dev, "%s :PMIC REV: %d\n", __func__,
+					sdm660_cdc->pmic_rev);
+		if (sdm660_cdc->pmic_rev == TOMBAK_1_0 &&
+			sdm660_cdc->codec_version == CAJON_2_0) {
+			if (sdm660_cdc->analog_major_rev == 0x02) {
+				sdm660_cdc->codec_version = DRAX_CDC;
+				dev_dbg(codec->dev,
+					"%s : Drax codec detected\n", __func__);
+			} else {
+				sdm660_cdc->codec_version = DIANGU;
+				dev_dbg(codec->dev, "%s : Diangu detected\n",
+					__func__);
+			}
+		} else if (sdm660_cdc->pmic_rev == TOMBAK_1_0 &&
+			(snd_soc_read(codec, MSM89XX_PMIC_ANALOG_NCP_FBCTRL)
+			 & 0x80)) {
+			sdm660_cdc->codec_version = CAJON;
+			dev_dbg(codec->dev, "%s : Cajon detected\n", __func__);
+		} else if (sdm660_cdc->pmic_rev == TOMBAK_2_0 &&
+			(snd_soc_read(codec, MSM89XX_PMIC_ANALOG_NCP_FBCTRL)
+			 & 0x80)) {
+			sdm660_cdc->codec_version = CAJON_2_0;
+			dev_dbg(codec->dev, "%s : Cajon 2.0 detected\n",
+						__func__);
+		}
+	}
+	/*
+	 * set to default boost option BOOST_SWITCH, user mixer path can change
+	 * it to BOOST_ALWAYS or BOOST_BYPASS based on solution chosen.
+	 */
+	sdm660_cdc->boost_option = BOOST_SWITCH;
+	sdm660_cdc->hph_mode = NORMAL_MODE;
+
+	msm_anlg_cdc_dt_parse_boost_info(codec);
+	msm_anlg_cdc_set_boost_v(codec);
+
+	snd_soc_add_codec_controls(codec, impedance_detect_controls,
+				   ARRAY_SIZE(impedance_detect_controls));
+	snd_soc_add_codec_controls(codec, hph_type_detect_controls,
+				  ARRAY_SIZE(hph_type_detect_controls));
+
+	msm_anlg_cdc_bringup(codec);
+	msm_anlg_cdc_codec_init_cache(codec);
+	msm_anlg_cdc_codec_init_reg(codec);
+	msm_anlg_cdc_update_reg_defaults(codec);
+
+	wcd9xxx_spmi_set_codec(codec);
+
+	sdm660_cdc->on_demand_list[ON_DEMAND_MICBIAS].supply =
+				msm_anlg_cdc_find_regulator(
+				sdm660_cdc,
+				on_demand_supply_name[ON_DEMAND_MICBIAS]);
+	atomic_set(&sdm660_cdc->on_demand_list[ON_DEMAND_MICBIAS].ref,
+		   0);
+
+	sdm660_cdc->fw_data = devm_kzalloc(codec->dev,
+					sizeof(*(sdm660_cdc->fw_data)),
+					GFP_KERNEL);
+	if (!sdm660_cdc->fw_data)
+		return -ENOMEM;
+
+	set_bit(WCD9XXX_MBHC_CAL, sdm660_cdc->fw_data->cal_bit);
+	ret = wcd_cal_create_hwdep(sdm660_cdc->fw_data,
+			WCD9XXX_CODEC_HWDEP_NODE, codec);
+	if (ret < 0) {
+		dev_err(codec->dev, "%s hwdep failed %d\n", __func__, ret);
+		return ret;
+	}
+
+	wcd_mbhc_init(&sdm660_cdc->mbhc, codec, &mbhc_cb, &intr_ids,
+		      wcd_mbhc_registers, true);
+
+	sdm660_cdc->int_mclk0_enabled = false;
+	/*Update speaker boost configuration*/
+	sdm660_cdc->spk_boost_set = spkr_boost_en;
+	pr_debug("%s: speaker boost configured = %d\n",
+			__func__, sdm660_cdc->spk_boost_set);
+
+	/* Set initial MICBIAS voltage level */
+	msm_anlg_cdc_set_micb_v(codec);
+
+	/* Set initial cap mode */
+	msm_anlg_cdc_configure_cap(codec, false, false);
+
+	snd_soc_dapm_ignore_suspend(dapm, "PDM Playback");
+	snd_soc_dapm_ignore_suspend(dapm, "PDM Capture");
+
+	return 0;
+}
+
+static int msm_anlg_cdc_soc_remove(struct snd_soc_codec *codec)
+{
+	struct sdm660_cdc_priv *sdm660_cdc_priv =
+					dev_get_drvdata(codec->dev);
+
+	sdm660_cdc_priv->spkdrv_reg = NULL;
+	sdm660_cdc_priv->on_demand_list[ON_DEMAND_MICBIAS].supply = NULL;
+	atomic_set(&sdm660_cdc_priv->on_demand_list[ON_DEMAND_MICBIAS].ref,
+		   0);
+	wcd_mbhc_deinit(&sdm660_cdc_priv->mbhc);
+
+	return 0;
+}
+
+static int msm_anlg_cdc_enable_static_supplies_to_optimum(
+				struct sdm660_cdc_priv *sdm660_cdc,
+				struct sdm660_cdc_pdata *pdata)
+{
+	int i;
+	int ret = 0;
+
+	for (i = 0; i < sdm660_cdc->num_of_supplies; i++) {
+		if (pdata->regulator[i].ondemand)
+			continue;
+		if (regulator_count_voltages(
+				sdm660_cdc->supplies[i].consumer) <=	0)
+			continue;
+
+		ret = regulator_set_voltage(
+				sdm660_cdc->supplies[i].consumer,
+				pdata->regulator[i].min_uv,
+				pdata->regulator[i].max_uv);
+		if (ret) {
+			dev_err(sdm660_cdc->dev,
+				"Setting volt failed for regulator %s err %d\n",
+				sdm660_cdc->supplies[i].supply, ret);
+		}
+
+		ret = regulator_set_load(sdm660_cdc->supplies[i].consumer,
+			pdata->regulator[i].optimum_ua);
+		dev_dbg(sdm660_cdc->dev, "Regulator %s set optimum mode\n",
+			 sdm660_cdc->supplies[i].supply);
+	}
+
+	return ret;
+}
+
+static int msm_anlg_cdc_disable_static_supplies_to_optimum(
+			struct sdm660_cdc_priv *sdm660_cdc,
+			struct sdm660_cdc_pdata *pdata)
+{
+	int i;
+	int ret = 0;
+
+	for (i = 0; i < sdm660_cdc->num_of_supplies; i++) {
+		if (pdata->regulator[i].ondemand)
+			continue;
+		if (regulator_count_voltages(
+				sdm660_cdc->supplies[i].consumer) <=	0)
+			continue;
+		regulator_set_voltage(sdm660_cdc->supplies[i].consumer, 0,
+				pdata->regulator[i].max_uv);
+		regulator_set_load(sdm660_cdc->supplies[i].consumer, 0);
+		dev_dbg(sdm660_cdc->dev, "Regulator %s set optimum mode\n",
+				 sdm660_cdc->supplies[i].supply);
+	}
+
+	return ret;
+}
+
+static int msm_anlg_cdc_suspend(struct snd_soc_codec *codec)
+{
+	struct sdm660_cdc_priv *sdm660_cdc = snd_soc_codec_get_drvdata(codec);
+	struct sdm660_cdc_pdata *sdm660_cdc_pdata =
+					sdm660_cdc->dev->platform_data;
+
+	msm_anlg_cdc_disable_static_supplies_to_optimum(sdm660_cdc,
+							sdm660_cdc_pdata);
+	return 0;
+}
+
+static int msm_anlg_cdc_resume(struct snd_soc_codec *codec)
+{
+	struct msm_asoc_mach_data *pdata = NULL;
+	struct sdm660_cdc_priv *sdm660_cdc = snd_soc_codec_get_drvdata(codec);
+	struct sdm660_cdc_pdata *sdm660_cdc_pdata =
+					sdm660_cdc->dev->platform_data;
+
+	pdata = snd_soc_card_get_drvdata(codec->component.card);
+	msm_anlg_cdc_enable_static_supplies_to_optimum(sdm660_cdc,
+						       sdm660_cdc_pdata);
+	return 0;
+}
+
+static struct regmap *msm_anlg_get_regmap(struct device *dev)
+{
+	return dev_get_regmap(dev->parent, NULL);
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_sdm660_cdc = {
+	.probe	= msm_anlg_cdc_soc_probe,
+	.remove	= msm_anlg_cdc_soc_remove,
+	.suspend = msm_anlg_cdc_suspend,
+	.resume = msm_anlg_cdc_resume,
+	.reg_word_size = 1,
+	.controls = msm_anlg_cdc_snd_controls,
+	.num_controls = ARRAY_SIZE(msm_anlg_cdc_snd_controls),
+	.dapm_widgets = msm_anlg_cdc_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(msm_anlg_cdc_dapm_widgets),
+	.dapm_routes = audio_map,
+	.num_dapm_routes = ARRAY_SIZE(audio_map),
+	.get_regmap = msm_anlg_get_regmap,
+};
+
+static int msm_anlg_cdc_init_supplies(struct sdm660_cdc_priv *sdm660_cdc,
+				struct sdm660_cdc_pdata *pdata)
+{
+	int ret;
+	int i;
+
+	sdm660_cdc->supplies = devm_kzalloc(sdm660_cdc->dev,
+					sizeof(struct regulator_bulk_data) *
+					ARRAY_SIZE(pdata->regulator),
+					GFP_KERNEL);
+	if (!sdm660_cdc->supplies) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	sdm660_cdc->num_of_supplies = 0;
+	if (ARRAY_SIZE(pdata->regulator) > MAX_REGULATOR) {
+		dev_err(sdm660_cdc->dev, "%s: Array Size out of bound\n",
+			__func__);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) {
+		if (pdata->regulator[i].name) {
+			sdm660_cdc->supplies[i].supply =
+						pdata->regulator[i].name;
+			sdm660_cdc->num_of_supplies++;
+		}
+	}
+
+	ret = devm_regulator_bulk_get(sdm660_cdc->dev,
+				      sdm660_cdc->num_of_supplies,
+				      sdm660_cdc->supplies);
+	if (ret != 0) {
+		dev_err(sdm660_cdc->dev,
+			"Failed to get supplies: err = %d\n",
+			ret);
+		goto err_supplies;
+	}
+
+	for (i = 0; i < sdm660_cdc->num_of_supplies; i++) {
+		if (regulator_count_voltages(
+			sdm660_cdc->supplies[i].consumer) <= 0)
+			continue;
+		ret = regulator_set_voltage(sdm660_cdc->supplies[i].consumer,
+					    pdata->regulator[i].min_uv,
+					    pdata->regulator[i].max_uv);
+		if (ret) {
+			dev_err(sdm660_cdc->dev,
+				"Setting regulator voltage failed for regulator %s err = %d\n",
+				sdm660_cdc->supplies[i].supply, ret);
+			goto err_supplies;
+		}
+		ret = regulator_set_load(sdm660_cdc->supplies[i].consumer,
+					 pdata->regulator[i].optimum_ua);
+		if (ret < 0) {
+			dev_err(sdm660_cdc->dev,
+				"Setting regulator optimum mode failed for regulator %s err = %d\n",
+				sdm660_cdc->supplies[i].supply, ret);
+			goto err_supplies;
+		} else {
+			ret = 0;
+		}
+	}
+
+	return ret;
+
+err_supplies:
+	kfree(sdm660_cdc->supplies);
+err:
+	return ret;
+}
+
+static int msm_anlg_cdc_enable_static_supplies(
+					struct sdm660_cdc_priv *sdm660_cdc,
+					struct sdm660_cdc_pdata *pdata)
+{
+	int i;
+	int ret = 0;
+
+	for (i = 0; i < sdm660_cdc->num_of_supplies; i++) {
+		if (pdata->regulator[i].ondemand)
+			continue;
+		ret = regulator_enable(sdm660_cdc->supplies[i].consumer);
+		if (ret) {
+			dev_err(sdm660_cdc->dev, "Failed to enable %s\n",
+			       sdm660_cdc->supplies[i].supply);
+			break;
+		}
+		dev_dbg(sdm660_cdc->dev, "Enabled regulator %s\n",
+				 sdm660_cdc->supplies[i].supply);
+	}
+
+	while (ret && --i)
+		if (!pdata->regulator[i].ondemand)
+			regulator_disable(sdm660_cdc->supplies[i].consumer);
+	return ret;
+}
+
+static void msm_anlg_cdc_disable_supplies(struct sdm660_cdc_priv *sdm660_cdc,
+				     struct sdm660_cdc_pdata *pdata)
+{
+	int i;
+
+	regulator_bulk_disable(sdm660_cdc->num_of_supplies,
+			       sdm660_cdc->supplies);
+	for (i = 0; i < sdm660_cdc->num_of_supplies; i++) {
+		if (regulator_count_voltages(
+				sdm660_cdc->supplies[i].consumer) <= 0)
+			continue;
+		regulator_set_voltage(sdm660_cdc->supplies[i].consumer, 0,
+				pdata->regulator[i].max_uv);
+		regulator_set_load(sdm660_cdc->supplies[i].consumer, 0);
+	}
+	regulator_bulk_free(sdm660_cdc->num_of_supplies,
+			    sdm660_cdc->supplies);
+	kfree(sdm660_cdc->supplies);
+}
+
+static const struct of_device_id sdm660_codec_of_match[] = {
+	{ .compatible = "qcom,pmic-analog-codec", },
+	{},
+};
+
+static void msm_anlg_add_child_devices(struct work_struct *work)
+{
+	struct sdm660_cdc_priv *pdata;
+	struct platform_device *pdev;
+	struct device_node *node;
+	struct msm_dig_ctrl_data *dig_ctrl_data = NULL, *temp;
+	int ret, ctrl_num = 0;
+	struct msm_dig_ctrl_platform_data *platdata;
+	char plat_dev_name[MSM_DIG_CDC_STRING_LEN];
+
+	pdata = container_of(work, struct sdm660_cdc_priv,
+			     msm_anlg_add_child_devices_work);
+	if (!pdata) {
+		pr_err("%s: Memory for pdata does not exist\n",
+			__func__);
+		return;
+	}
+	if (!pdata->dev->of_node) {
+		dev_err(pdata->dev,
+			"%s: DT node for pdata does not exist\n", __func__);
+		return;
+	}
+
+	platdata = &pdata->dig_plat_data;
+
+	for_each_child_of_node(pdata->dev->of_node, node) {
+		if (!strcmp(node->name, "msm-dig-codec"))
+			strlcpy(plat_dev_name, "msm_digital_codec",
+				(MSM_DIG_CDC_STRING_LEN - 1));
+		else
+			continue;
+
+		pdev = platform_device_alloc(plat_dev_name, -1);
+		if (!pdev) {
+			dev_err(pdata->dev, "%s: pdev memory alloc failed\n",
+				__func__);
+			ret = -ENOMEM;
+			goto err;
+		}
+		pdev->dev.parent = pdata->dev;
+		pdev->dev.of_node = node;
+
+		if (!strcmp(node->name, "msm-dig-codec")) {
+			ret = platform_device_add_data(pdev, platdata,
+						       sizeof(*platdata));
+			if (ret) {
+				dev_err(&pdev->dev,
+					"%s: cannot add plat data ctrl:%d\n",
+					__func__, ctrl_num);
+				goto fail_pdev_add;
+			}
+		}
+
+		ret = platform_device_add(pdev);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"%s: Cannot add platform device\n",
+				__func__);
+			goto fail_pdev_add;
+		}
+
+		if (!strcmp(node->name, "msm-dig-codec")) {
+			temp = krealloc(dig_ctrl_data,
+					(ctrl_num + 1) * sizeof(
+					struct msm_dig_ctrl_data),
+					GFP_KERNEL);
+			if (!temp) {
+				dev_err(&pdev->dev, "out of memory\n");
+				ret = -ENOMEM;
+				goto err;
+			}
+			dig_ctrl_data = temp;
+			dig_ctrl_data[ctrl_num].dig_pdev = pdev;
+			ctrl_num++;
+			dev_dbg(&pdev->dev,
+				"%s: Added digital codec device(s)\n",
+				__func__);
+			pdata->dig_ctrl_data = dig_ctrl_data;
+		}
+	}
+
+	return;
+fail_pdev_add:
+	platform_device_put(pdev);
+err:
+	return;
+}
+
+static int msm_anlg_cdc_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct sdm660_cdc_priv *sdm660_cdc = NULL;
+	struct sdm660_cdc_pdata *pdata;
+	int adsp_state;
+
+	adsp_state = apr_get_subsys_state();
+	if (adsp_state != APR_SUBSYS_LOADED) {
+		dev_err(&pdev->dev, "Adsp is not loaded yet %d\n",
+			adsp_state);
+		return -EPROBE_DEFER;
+	}
+	device_init_wakeup(&pdev->dev, true);
+
+	if (pdev->dev.of_node) {
+		dev_dbg(&pdev->dev, "%s:Platform data from device tree\n",
+			__func__);
+		pdata = msm_anlg_cdc_populate_dt_pdata(&pdev->dev);
+		pdev->dev.platform_data = pdata;
+	} else {
+		dev_dbg(&pdev->dev, "%s:Platform data from board file\n",
+			__func__);
+		pdata = pdev->dev.platform_data;
+	}
+	if (pdata == NULL) {
+		dev_err(&pdev->dev, "%s:Platform data failed to populate\n",
+			__func__);
+		goto rtn;
+	}
+	sdm660_cdc = devm_kzalloc(&pdev->dev, sizeof(struct sdm660_cdc_priv),
+				     GFP_KERNEL);
+	if (sdm660_cdc == NULL) {
+		ret = -ENOMEM;
+		goto rtn;
+	}
+
+	sdm660_cdc->dev = &pdev->dev;
+	ret = msm_anlg_cdc_init_supplies(sdm660_cdc, pdata);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: Fail to enable Codec supplies\n",
+			__func__);
+		goto rtn;
+	}
+	ret = msm_anlg_cdc_enable_static_supplies(sdm660_cdc, pdata);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"%s: Fail to enable Codec pre-reset supplies\n",
+			__func__);
+		goto rtn;
+	}
+	/* Allow supplies to be ready */
+	usleep_range(5, 6);
+
+	wcd9xxx_spmi_set_dev(pdev, 0);
+	wcd9xxx_spmi_set_dev(pdev, 1);
+	if (wcd9xxx_spmi_irq_init()) {
+		dev_err(&pdev->dev,
+			"%s: irq initialization failed\n", __func__);
+	} else {
+		dev_dbg(&pdev->dev,
+			"%s: irq initialization passed\n", __func__);
+	}
+	dev_set_drvdata(&pdev->dev, sdm660_cdc);
+
+	ret = snd_soc_register_codec(&pdev->dev,
+				     &soc_codec_dev_sdm660_cdc,
+				     msm_anlg_cdc_i2s_dai,
+				     ARRAY_SIZE(msm_anlg_cdc_i2s_dai));
+	if (ret) {
+		dev_err(&pdev->dev,
+			"%s:snd_soc_register_codec failed with error %d\n",
+			__func__, ret);
+		goto err_supplies;
+	}
+	BLOCKING_INIT_NOTIFIER_HEAD(&sdm660_cdc->notifier);
+	BLOCKING_INIT_NOTIFIER_HEAD(&sdm660_cdc->notifier_mbhc);
+
+	sdm660_cdc->dig_plat_data.handle = (void *) sdm660_cdc;
+	sdm660_cdc->dig_plat_data.update_clkdiv = update_clkdiv;
+	sdm660_cdc->dig_plat_data.get_cdc_version = get_cdc_version;
+	sdm660_cdc->dig_plat_data.register_notifier =
+					msm_anlg_cdc_dig_register_notifier;
+	INIT_WORK(&sdm660_cdc->msm_anlg_add_child_devices_work,
+		  msm_anlg_add_child_devices);
+	schedule_work(&sdm660_cdc->msm_anlg_add_child_devices_work);
+
+	return ret;
+err_supplies:
+	msm_anlg_cdc_disable_supplies(sdm660_cdc, pdata);
+rtn:
+	return ret;
+}
+
+static int msm_anlg_cdc_remove(struct platform_device *pdev)
+{
+	struct sdm660_cdc_priv *sdm660_cdc = dev_get_drvdata(&pdev->dev);
+	struct sdm660_cdc_pdata *pdata = sdm660_cdc->dev->platform_data;
+
+	snd_soc_unregister_codec(&pdev->dev);
+	msm_anlg_cdc_disable_supplies(sdm660_cdc, pdata);
+	return 0;
+}
+
+static struct platform_driver msm_anlg_codec_driver = {
+	.driver		= {
+		.owner          = THIS_MODULE,
+		.name           = DRV_NAME,
+		.of_match_table = of_match_ptr(sdm660_codec_of_match)
+	},
+	.probe          = msm_anlg_cdc_probe,
+	.remove         = msm_anlg_cdc_remove,
+};
+module_platform_driver(msm_anlg_codec_driver);
+
+MODULE_DESCRIPTION("MSM Audio Analog codec driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.h b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.h
new file mode 100644
index 0000000..0c9e9a6
--- /dev/null
+++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.h
@@ -0,0 +1,237 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef MSM_ANALOG_CDC_H
+#define MSM_ANALOG_CDC_H
+
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <sound/q6afe-v2.h>
+#include "../wcd-mbhc-v2.h"
+#include "../wcdcal-hwdep.h"
+#include "sdm660-cdc-registers.h"
+
+#define MICBIAS_EXT_BYP_CAP 0x00
+#define MICBIAS_NO_EXT_BYP_CAP 0x01
+
+#define MSM89XX_NUM_IRQ_REGS	2
+#define MAX_REGULATOR		7
+#define MSM89XX_REG_VAL(reg, val)	{reg, 0, val}
+
+#define MSM89XX_VDD_SPKDRV_NAME "cdc-vdd-spkdrv"
+
+#define DEFAULT_MULTIPLIER 800
+#define DEFAULT_GAIN 9
+#define DEFAULT_OFFSET 100
+
+extern const u8 msm89xx_pmic_cdc_reg_readable[MSM89XX_PMIC_CDC_CACHE_SIZE];
+extern const u8 msm89xx_cdc_core_reg_readable[MSM89XX_CDC_CORE_CACHE_SIZE];
+extern struct regmap_config msm89xx_cdc_core_regmap_config;
+extern struct regmap_config msm89xx_pmic_cdc_regmap_config;
+
+enum wcd_curr_ref {
+	I_h4_UA = 0,
+	I_pt5_UA,
+	I_14_UA,
+	I_l4_UA,
+	I_1_UA,
+};
+
+enum wcd_mbhc_imp_det_pin {
+	WCD_MBHC_DET_NONE = 0,
+	WCD_MBHC_DET_HPHL,
+	WCD_MBHC_DET_HPHR,
+	WCD_MBHC_DET_BOTH,
+};
+
+
+/* Each micbias can be assigned to one of three cfilters
+ * Vbatt_min >= .15V + ldoh_v
+ * ldoh_v >= .15v + cfiltx_mv
+ * If ldoh_v = 1.95 160 mv < cfiltx_mv < 1800 mv
+ * If ldoh_v = 2.35 200 mv < cfiltx_mv < 2200 mv
+ * If ldoh_v = 2.75 240 mv < cfiltx_mv < 2600 mv
+ * If ldoh_v = 2.85 250 mv < cfiltx_mv < 2700 mv
+ */
+
+struct wcd_micbias_setting {
+	u8 ldoh_v;
+	u32 cfilt1_mv; /* in mv */
+	u32 cfilt2_mv; /* in mv */
+	u32 cfilt3_mv; /* in mv */
+	/* Different WCD9xxx series codecs may not
+	 * have 4 mic biases. If a codec has fewer
+	 * mic biases, some of these properties will
+	 * not be used.
+	 */
+	u8 bias1_cfilt_sel;
+	u8 bias2_cfilt_sel;
+	u8 bias3_cfilt_sel;
+	u8 bias4_cfilt_sel;
+	u8 bias1_cap_mode;
+	u8 bias2_cap_mode;
+	u8 bias3_cap_mode;
+	u8 bias4_cap_mode;
+	bool bias2_is_headset_only;
+};
+
+enum sdm660_cdc_pid_current {
+	MSM89XX_PID_MIC_2P5_UA,
+	MSM89XX_PID_MIC_5_UA,
+	MSM89XX_PID_MIC_10_UA,
+	MSM89XX_PID_MIC_20_UA,
+};
+
+struct sdm660_cdc_reg_mask_val {
+	u16	reg;
+	u8	mask;
+	u8	val;
+};
+
+enum {
+	/* INTR_REG 0 - Digital Periph */
+	MSM89XX_IRQ_SPKR_CNP = 0,
+	MSM89XX_IRQ_SPKR_CLIP,
+	MSM89XX_IRQ_SPKR_OCP,
+	MSM89XX_IRQ_MBHC_INSREM_DET1,
+	MSM89XX_IRQ_MBHC_RELEASE,
+	MSM89XX_IRQ_MBHC_PRESS,
+	MSM89XX_IRQ_MBHC_INSREM_DET,
+	MSM89XX_IRQ_MBHC_HS_DET,
+	/* INTR_REG 1 - Analog Periph */
+	MSM89XX_IRQ_EAR_OCP,
+	MSM89XX_IRQ_HPHR_OCP,
+	MSM89XX_IRQ_HPHL_OCP,
+	MSM89XX_IRQ_EAR_CNP,
+	MSM89XX_IRQ_HPHR_CNP,
+	MSM89XX_IRQ_HPHL_CNP,
+	MSM89XX_NUM_IRQS,
+};
+
+enum {
+	ON_DEMAND_MICBIAS = 0,
+	ON_DEMAND_SPKDRV,
+	ON_DEMAND_SUPPLIES_MAX,
+};
+
+/*
+ * The delay list is per codec HW specification.
+ * Please add delay in the list in the future instead
+ * of magic number
+ */
+enum {
+	CODEC_DELAY_1_MS = 1000,
+	CODEC_DELAY_1_1_MS  = 1100,
+};
+
+struct sdm660_cdc_regulator {
+	const char *name;
+	int min_uv;
+	int max_uv;
+	int optimum_ua;
+	bool ondemand;
+	struct regulator *regulator;
+};
+
+struct on_demand_supply {
+	struct regulator *supply;
+	atomic_t ref;
+};
+
+struct wcd_imped_i_ref {
+	enum wcd_curr_ref curr_ref;
+	int min_val;
+	int multiplier;
+	int gain_adj;
+	int offset;
+};
+
+enum sdm660_cdc_micbias_num {
+	MSM89XX_MICBIAS1 = 0,
+};
+
+/* Hold instance to digital codec platform device */
+struct msm_dig_ctrl_data {
+	struct platform_device *dig_pdev;
+};
+
+struct msm_dig_ctrl_platform_data {
+	void *handle;
+	void (*update_clkdiv)(void *handle, int val);
+	int (*get_cdc_version)(void *handle);
+	int (*register_notifier)(void *handle,
+				 struct notifier_block *nblock,
+				 bool enable);
+};
+
+struct sdm660_cdc_priv {
+	struct device *dev;
+	u32 num_of_supplies;
+	struct regulator_bulk_data *supplies;
+	struct snd_soc_codec *codec;
+	struct work_struct msm_anlg_add_child_devices_work;
+	struct msm_dig_ctrl_platform_data dig_plat_data;
+	/* digital codec data structure */
+	struct msm_dig_ctrl_data *dig_ctrl_data;
+	struct blocking_notifier_head notifier;
+	u16 pmic_rev;
+	u16 codec_version;
+	u16 analog_major_rev;
+	u32 boost_voltage;
+	u32 adc_count;
+	u32 rx_bias_count;
+	bool int_mclk0_enabled;
+	u16 boost_option;
+	/* mode to select hd2 */
+	u32 hph_mode;
+	/* compander used for each rx chain */
+	bool spk_boost_set;
+	bool ear_pa_boost_set;
+	bool ext_spk_boost_set;
+	struct on_demand_supply on_demand_list[ON_DEMAND_SUPPLIES_MAX];
+	struct regulator *spkdrv_reg;
+	struct blocking_notifier_head notifier_mbhc;
+	/* mbhc module */
+	struct wcd_mbhc mbhc;
+	/* cal info for codec */
+	struct fw_info *fw_data;
+	struct notifier_block audio_ssr_nb;
+	int (*codec_spk_ext_pa_cb)(struct snd_soc_codec *codec, int enable);
+	unsigned long status_mask;
+	struct wcd_imped_i_ref imped_i_ref;
+	enum wcd_mbhc_imp_det_pin imped_det_pin;
+	/* Entry for version info */
+	struct snd_info_entry *entry;
+	struct snd_info_entry *version_entry;
+};
+
+struct sdm660_cdc_pdata {
+	struct wcd_micbias_setting micbias;
+	struct sdm660_cdc_regulator regulator[MAX_REGULATOR];
+};
+
+
+extern int msm_anlg_cdc_mclk_enable(struct snd_soc_codec *codec,
+				    int mclk_enable, bool dapm);
+
+extern int msm_anlg_cdc_hs_detect(struct snd_soc_codec *codec,
+		    struct wcd_mbhc_config *mbhc_cfg);
+
+extern void msm_anlg_cdc_hs_detect_exit(struct snd_soc_codec *codec);
+
+extern void sdm660_cdc_update_int_spk_boost(bool enable);
+
+extern void msm_anlg_cdc_spk_ext_pa_cb(
+		int (*codec_spk_ext_pa)(struct snd_soc_codec *codec,
+		int enable), struct snd_soc_codec *codec);
+int msm_anlg_codec_info_create_codec_entry(struct snd_info_entry *codec_root,
+					   struct snd_soc_codec *codec);
+#endif
diff --git a/sound/soc/codecs/sdm660_cdc/msm-cdc-common.h b/sound/soc/codecs/sdm660_cdc/msm-cdc-common.h
new file mode 100644
index 0000000..95dbc76
--- /dev/null
+++ b/sound/soc/codecs/sdm660_cdc/msm-cdc-common.h
@@ -0,0 +1,66 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/regmap.h>
+#include "sdm660-cdc-registers.h"
+
+extern struct reg_default
+		msm89xx_cdc_core_defaults[MSM89XX_CDC_CORE_CACHE_SIZE];
+extern struct reg_default
+		msm89xx_pmic_cdc_defaults[MSM89XX_PMIC_CDC_CACHE_SIZE];
+
+bool msm89xx_cdc_core_readable_reg(struct device *dev, unsigned int reg);
+bool msm89xx_cdc_core_volatile_reg(struct device *dev, unsigned int reg);
+
+enum {
+	AIF1_PB = 0,
+	AIF1_CAP,
+	AIF2_VIFEED,
+	AIF3_SVA,
+	NUM_CODEC_DAIS,
+};
+
+enum codec_versions {
+	TOMBAK_1_0,
+	TOMBAK_2_0,
+	CONGA,
+	CAJON,
+	CAJON_2_0,
+	DIANGU,
+	DRAX_CDC,
+	UNSUPPORTED,
+};
+
+/* Support different hph modes */
+enum {
+	NORMAL_MODE = 0,
+	HD2_MODE,
+};
+
+enum dig_cdc_notify_event {
+	DIG_CDC_EVENT_INVALID,
+	DIG_CDC_EVENT_CLK_ON,
+	DIG_CDC_EVENT_CLK_OFF,
+	DIG_CDC_EVENT_RX1_MUTE_ON,
+	DIG_CDC_EVENT_RX1_MUTE_OFF,
+	DIG_CDC_EVENT_RX2_MUTE_ON,
+	DIG_CDC_EVENT_RX2_MUTE_OFF,
+	DIG_CDC_EVENT_RX3_MUTE_ON,
+	DIG_CDC_EVENT_RX3_MUTE_OFF,
+	DIG_CDC_EVENT_PRE_RX1_INT_ON,
+	DIG_CDC_EVENT_PRE_RX2_INT_ON,
+	DIG_CDC_EVENT_POST_RX1_INT_OFF,
+	DIG_CDC_EVENT_POST_RX2_INT_OFF,
+	DIG_CDC_EVENT_SSR_DOWN,
+	DIG_CDC_EVENT_SSR_UP,
+	DIG_CDC_EVENT_LAST,
+};
diff --git a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
new file mode 100644
index 0000000..f140b19
--- /dev/null
+++ b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
@@ -0,0 +1,2143 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/qdsp6v2/apr.h>
+#include <linux/workqueue.h>
+#include <linux/regmap.h>
+#include <sound/q6afe-v2.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+#include "sdm660-cdc-registers.h"
+#include "msm-digital-cdc.h"
+#include "msm-cdc-common.h"
+#include "../../msm/sdm660-common.h"
+
+#define DRV_NAME "msm_digital_codec"
+#define MCLK_RATE_9P6MHZ        9600000
+#define MCLK_RATE_12P288MHZ     12288000
+#define TX_MUX_CTL_CUT_OFF_FREQ_MASK	0x30
+#define CF_MIN_3DB_4HZ			0x0
+#define CF_MIN_3DB_75HZ			0x1
+#define CF_MIN_3DB_150HZ		0x2
+
+#define MSM_DIG_CDC_VERSION_ENTRY_SIZE 32
+
+static unsigned long rx_digital_gain_reg[] = {
+	MSM89XX_CDC_CORE_RX1_VOL_CTL_B2_CTL,
+	MSM89XX_CDC_CORE_RX2_VOL_CTL_B2_CTL,
+	MSM89XX_CDC_CORE_RX3_VOL_CTL_B2_CTL,
+};
+
+static unsigned long tx_digital_gain_reg[] = {
+	MSM89XX_CDC_CORE_TX1_VOL_CTL_GAIN,
+	MSM89XX_CDC_CORE_TX2_VOL_CTL_GAIN,
+	MSM89XX_CDC_CORE_TX3_VOL_CTL_GAIN,
+	MSM89XX_CDC_CORE_TX4_VOL_CTL_GAIN,
+	MSM89XX_CDC_CORE_TX5_VOL_CTL_GAIN,
+};
+
+static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
+
+struct snd_soc_codec *registered_digcodec;
+struct hpf_work tx_hpf_work[NUM_DECIMATORS];
+
+/* Codec supports 2 IIR filters */
+enum {
+	IIR1 = 0,
+	IIR2,
+	IIR_MAX,
+};
+
+static int msm_digcdc_clock_control(bool flag)
+{
+	int ret = -EINVAL;
+	struct msm_asoc_mach_data *pdata = NULL;
+
+	pdata = snd_soc_card_get_drvdata(registered_digcodec->component.card);
+
+	mutex_lock(&pdata->cdc_int_mclk0_mutex);
+	if (flag) {
+		if (atomic_read(&pdata->int_mclk0_enabled) == false) {
+			pdata->digital_cdc_core_clk.enable = 1;
+			ret = afe_set_lpass_clock_v2(
+						AFE_PORT_ID_INT0_MI2S_RX,
+						&pdata->digital_cdc_core_clk);
+			if (ret < 0) {
+				pr_err("%s:failed to enable the MCLK\n",
+				       __func__);
+				mutex_unlock(&pdata->cdc_int_mclk0_mutex);
+				return ret;
+			}
+			pr_debug("enabled digital codec core clk\n");
+			atomic_set(&pdata->int_mclk0_enabled, true);
+			schedule_delayed_work(&pdata->disable_int_mclk0_work,
+					      50);
+		}
+	} else {
+		dev_dbg(registered_digcodec->dev,
+			"disable MCLK, workq to disable set already\n");
+	}
+	mutex_unlock(&pdata->cdc_int_mclk0_mutex);
+	return 0;
+}
+
+static void enable_digital_callback(void *flag)
+{
+	msm_digcdc_clock_control(true);
+}
+
+static void disable_digital_callback(void *flag)
+{
+	pr_debug("disable mclk happens in workq\n");
+}
+
+static int msm_dig_cdc_put_dec_enum(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+			dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *w = wlist->widgets[0];
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+	unsigned int dec_mux, decimator;
+	char *dec_name = NULL;
+	char *widget_name = NULL;
+	char *temp;
+	u16 tx_mux_ctl_reg;
+	u8 adc_dmic_sel = 0x0;
+	int ret = 0;
+	char *dec_num;
+
+	if (ucontrol->value.enumerated.item[0] > e->items) {
+		dev_err(codec->dev, "%s: Invalid enum value: %d\n",
+			__func__, ucontrol->value.enumerated.item[0]);
+		return -EINVAL;
+	}
+	dec_mux = ucontrol->value.enumerated.item[0];
+
+	widget_name = kstrndup(w->name, 15, GFP_KERNEL);
+	if (!widget_name) {
+		dev_err(codec->dev, "%s: failed to copy string\n",
+			__func__);
+		return -ENOMEM;
+	}
+	temp = widget_name;
+
+	dec_name = strsep(&widget_name, " ");
+	widget_name = temp;
+	if (!dec_name) {
+		dev_err(codec->dev, "%s: Invalid decimator = %s\n",
+			__func__, w->name);
+		ret =  -EINVAL;
+		goto out;
+	}
+
+	dec_num = strpbrk(dec_name, "12345");
+	if (dec_num == NULL) {
+		dev_err(codec->dev, "%s: Invalid DEC selected\n", __func__);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = kstrtouint(dec_num, 10, &decimator);
+	if (ret < 0) {
+		dev_err(codec->dev, "%s: Invalid decimator = %s\n",
+			__func__, dec_name);
+		ret =  -EINVAL;
+		goto out;
+	}
+
+	dev_dbg(w->dapm->dev, "%s(): widget = %s decimator = %u dec_mux = %u\n"
+		, __func__, w->name, decimator, dec_mux);
+
+	switch (decimator) {
+	case 1:
+	case 2:
+	case 3:
+	case 4:
+	case 5:
+		if ((dec_mux == 4) || (dec_mux == 5) ||
+		    (dec_mux == 6) || (dec_mux == 7))
+			adc_dmic_sel = 0x1;
+		else
+			adc_dmic_sel = 0x0;
+		break;
+	default:
+		dev_err(codec->dev, "%s: Invalid Decimator = %u\n",
+			__func__, decimator);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	tx_mux_ctl_reg =
+		MSM89XX_CDC_CORE_TX1_MUX_CTL + 32 * (decimator - 1);
+
+	snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x1, adc_dmic_sel);
+
+	ret = snd_soc_dapm_put_enum_double(kcontrol, ucontrol);
+
+out:
+	kfree(widget_name);
+	return ret;
+}
+
+
+static int msm_dig_cdc_codec_config_compander(struct snd_soc_codec *codec,
+					      int interp_n, int event)
+{
+	struct msm_dig_priv *dig_cdc = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s: event %d shift %d, enabled %d\n",
+		__func__, event, interp_n,
+		dig_cdc->comp_enabled[interp_n]);
+
+	/* compander is not enabled */
+	if (!dig_cdc->comp_enabled[interp_n])
+		return 0;
+
+	switch (dig_cdc->comp_enabled[interp_n]) {
+	case COMPANDER_1:
+		if (SND_SOC_DAPM_EVENT_ON(event)) {
+			/* Enable Compander Clock */
+			snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_COMP0_B2_CTL, 0x0F, 0x09);
+			snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_CLK_RX_B2_CTL, 0x01, 0x01);
+			snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_COMP0_B1_CTL,
+				1 << interp_n, 1 << interp_n);
+			snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_COMP0_B3_CTL, 0xFF, 0x01);
+			snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_COMP0_B2_CTL, 0xF0, 0x50);
+			/* add sleep for compander to settle */
+			usleep_range(1000, 1100);
+			snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_COMP0_B3_CTL, 0xFF, 0x28);
+			snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_COMP0_B2_CTL, 0xF0, 0xB0);
+
+			/* Enable Compander GPIO */
+			if (dig_cdc->codec_hph_comp_gpio)
+				dig_cdc->codec_hph_comp_gpio(1, codec);
+		} else if (SND_SOC_DAPM_EVENT_OFF(event)) {
+			/* Disable Compander GPIO */
+			if (dig_cdc->codec_hph_comp_gpio)
+				dig_cdc->codec_hph_comp_gpio(0, codec);
+
+			snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_COMP0_B2_CTL, 0x0F, 0x05);
+			snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_COMP0_B1_CTL,
+				1 << interp_n, 0);
+			snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_CLK_RX_B2_CTL, 0x01, 0x00);
+		}
+		break;
+	default:
+		dev_dbg(codec->dev, "%s: Invalid compander %d\n", __func__,
+				dig_cdc->comp_enabled[interp_n]);
+		break;
+	};
+
+	return 0;
+}
+
+/**
+ * msm_dig_cdc_hph_comp_cb - registers callback to codec by machine driver.
+ *
+ * @codec_hph_comp_gpio: function pointer to set comp gpio at machine driver
+ * @codec: codec pointer
+ *
+ */
+void msm_dig_cdc_hph_comp_cb(
+	int (*codec_hph_comp_gpio)(bool enable, struct snd_soc_codec *codec),
+	struct snd_soc_codec *codec)
+{
+	struct msm_dig_priv *dig_cdc = snd_soc_codec_get_drvdata(codec);
+
+	pr_debug("%s: Enter\n", __func__);
+	dig_cdc->codec_hph_comp_gpio = codec_hph_comp_gpio;
+}
+EXPORT_SYMBOL(msm_dig_cdc_hph_comp_cb);
+
+static int msm_dig_cdc_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
+						 struct snd_kcontrol *kcontrol,
+						 int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct msm_dig_priv *msm_dig_cdc = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s %d %s\n", __func__, event, w->name);
+
+	if (w->shift >= MSM89XX_RX_MAX || w->shift < 0) {
+		dev_err(codec->dev, "%s: wrong RX index: %d\n",
+			__func__, w->shift);
+		return -EINVAL;
+	}
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		msm_dig_cdc_codec_config_compander(codec, w->shift, event);
+		/* apply the digital gain after the interpolator is enabled*/
+		if ((w->shift) < ARRAY_SIZE(rx_digital_gain_reg))
+			snd_soc_write(codec,
+				  rx_digital_gain_reg[w->shift],
+				  snd_soc_read(codec,
+				  rx_digital_gain_reg[w->shift])
+				  );
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		msm_dig_cdc_codec_config_compander(codec, w->shift, event);
+		snd_soc_update_bits(codec,
+			MSM89XX_CDC_CORE_CLK_RX_RESET_CTL,
+			1 << w->shift, 1 << w->shift);
+		snd_soc_update_bits(codec,
+			MSM89XX_CDC_CORE_CLK_RX_RESET_CTL,
+			1 << w->shift, 0x0);
+		/*
+		 * disable the mute enabled during the PMD of this device
+		 */
+		if ((w->shift == 0) &&
+			(msm_dig_cdc->mute_mask & HPHL_PA_DISABLE)) {
+			pr_debug("disabling HPHL mute\n");
+			snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_RX1_B6_CTL, 0x01, 0x00);
+			msm_dig_cdc->mute_mask &= ~(HPHL_PA_DISABLE);
+		} else if ((w->shift == 1) &&
+				(msm_dig_cdc->mute_mask & HPHR_PA_DISABLE)) {
+			pr_debug("disabling HPHR mute\n");
+			snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_RX2_B6_CTL, 0x01, 0x00);
+			msm_dig_cdc->mute_mask &= ~(HPHR_PA_DISABLE);
+		} else if ((w->shift == 2) &&
+				(msm_dig_cdc->mute_mask & SPKR_PA_DISABLE)) {
+			pr_debug("disabling SPKR mute\n");
+			snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_RX3_B6_CTL, 0x01, 0x00);
+			msm_dig_cdc->mute_mask &= ~(SPKR_PA_DISABLE);
+		}
+	}
+	return 0;
+}
+
+static int msm_dig_cdc_get_iir_enable_audio_mixer(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	int iir_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+	int band_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->shift;
+
+	ucontrol->value.integer.value[0] =
+		(snd_soc_read(codec,
+			    (MSM89XX_CDC_CORE_IIR1_CTL + 64 * iir_idx)) &
+		(1 << band_idx)) != 0;
+
+	dev_dbg(codec->dev, "%s: IIR #%d band #%d enable %d\n", __func__,
+		iir_idx, band_idx,
+		(uint32_t)ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_dig_cdc_put_iir_enable_audio_mixer(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	int iir_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+	int band_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->shift;
+	int value = ucontrol->value.integer.value[0];
+
+	/* Mask first 5 bits, 6-8 are reserved */
+	snd_soc_update_bits(codec,
+		(MSM89XX_CDC_CORE_IIR1_CTL + 64 * iir_idx),
+			    (1 << band_idx), (value << band_idx));
+
+	dev_dbg(codec->dev, "%s: IIR #%d band #%d enable %d\n", __func__,
+	  iir_idx, band_idx,
+		((snd_soc_read(codec,
+		(MSM89XX_CDC_CORE_IIR1_CTL + 64 * iir_idx)) &
+	  (1 << band_idx)) != 0));
+
+	return 0;
+}
+
+static uint32_t get_iir_band_coeff(struct snd_soc_codec *codec,
+				   int iir_idx, int band_idx,
+				   int coeff_idx)
+{
+	uint32_t value = 0;
+
+	/* Address does not automatically update if reading */
+	snd_soc_write(codec,
+		(MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL + 64 * iir_idx),
+		((band_idx * BAND_MAX + coeff_idx)
+		* sizeof(uint32_t)) & 0x7F);
+
+	value |= snd_soc_read(codec,
+		(MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx));
+
+	snd_soc_write(codec,
+		(MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL + 64 * iir_idx),
+		((band_idx * BAND_MAX + coeff_idx)
+		* sizeof(uint32_t) + 1) & 0x7F);
+
+	value |= (snd_soc_read(codec,
+		(MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx)) << 8);
+
+	snd_soc_write(codec,
+		(MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL + 64 * iir_idx),
+		((band_idx * BAND_MAX + coeff_idx)
+		* sizeof(uint32_t) + 2) & 0x7F);
+
+	value |= (snd_soc_read(codec,
+		(MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx)) << 16);
+
+	snd_soc_write(codec,
+		(MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL + 64 * iir_idx),
+		((band_idx * BAND_MAX + coeff_idx)
+		* sizeof(uint32_t) + 3) & 0x7F);
+
+	/* Mask bits top 2 bits since they are reserved */
+	value |= ((snd_soc_read(codec, (MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL
+		+ 64 * iir_idx)) & 0x3f) << 24);
+
+	return value;
+
+}
+
+static void set_iir_band_coeff(struct snd_soc_codec *codec,
+			       int iir_idx, int band_idx,
+			       uint32_t value)
+{
+	snd_soc_write(codec,
+		(MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx),
+		(value & 0xFF));
+
+	snd_soc_write(codec,
+		(MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx),
+		(value >> 8) & 0xFF);
+
+	snd_soc_write(codec,
+		(MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx),
+		(value >> 16) & 0xFF);
+
+	/* Mask top 2 bits, 7-8 are reserved */
+	snd_soc_write(codec,
+		(MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL + 64 * iir_idx),
+		(value >> 24) & 0x3F);
+
+}
+
+static int msm_dig_cdc_get_iir_band_audio_mixer(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	int iir_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+	int band_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->shift;
+
+	ucontrol->value.integer.value[0] =
+		get_iir_band_coeff(codec, iir_idx, band_idx, 0);
+	ucontrol->value.integer.value[1] =
+		get_iir_band_coeff(codec, iir_idx, band_idx, 1);
+	ucontrol->value.integer.value[2] =
+		get_iir_band_coeff(codec, iir_idx, band_idx, 2);
+	ucontrol->value.integer.value[3] =
+		get_iir_band_coeff(codec, iir_idx, band_idx, 3);
+	ucontrol->value.integer.value[4] =
+		get_iir_band_coeff(codec, iir_idx, band_idx, 4);
+
+	dev_dbg(codec->dev, "%s: IIR #%d band #%d b0 = 0x%x\n"
+		"%s: IIR #%d band #%d b1 = 0x%x\n"
+		"%s: IIR #%d band #%d b2 = 0x%x\n"
+		"%s: IIR #%d band #%d a1 = 0x%x\n"
+		"%s: IIR #%d band #%d a2 = 0x%x\n",
+		__func__, iir_idx, band_idx,
+		(uint32_t)ucontrol->value.integer.value[0],
+		__func__, iir_idx, band_idx,
+		(uint32_t)ucontrol->value.integer.value[1],
+		__func__, iir_idx, band_idx,
+		(uint32_t)ucontrol->value.integer.value[2],
+		__func__, iir_idx, band_idx,
+		(uint32_t)ucontrol->value.integer.value[3],
+		__func__, iir_idx, band_idx,
+		(uint32_t)ucontrol->value.integer.value[4]);
+	return 0;
+}
+
+static int msm_dig_cdc_put_iir_band_audio_mixer(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	int iir_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+	int band_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->shift;
+
+	/* Mask top bit it is reserved */
+	/* Updates addr automatically for each B2 write */
+	snd_soc_write(codec,
+		(MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL + 64 * iir_idx),
+		(band_idx * BAND_MAX * sizeof(uint32_t)) & 0x7F);
+
+
+	set_iir_band_coeff(codec, iir_idx, band_idx,
+			   ucontrol->value.integer.value[0]);
+	set_iir_band_coeff(codec, iir_idx, band_idx,
+			   ucontrol->value.integer.value[1]);
+	set_iir_band_coeff(codec, iir_idx, band_idx,
+			   ucontrol->value.integer.value[2]);
+	set_iir_band_coeff(codec, iir_idx, band_idx,
+			   ucontrol->value.integer.value[3]);
+	set_iir_band_coeff(codec, iir_idx, band_idx,
+			   ucontrol->value.integer.value[4]);
+
+	dev_dbg(codec->dev, "%s: IIR #%d band #%d b0 = 0x%x\n"
+		"%s: IIR #%d band #%d b1 = 0x%x\n"
+		"%s: IIR #%d band #%d b2 = 0x%x\n"
+		"%s: IIR #%d band #%d a1 = 0x%x\n"
+		"%s: IIR #%d band #%d a2 = 0x%x\n",
+		__func__, iir_idx, band_idx,
+		get_iir_band_coeff(codec, iir_idx, band_idx, 0),
+		__func__, iir_idx, band_idx,
+		get_iir_band_coeff(codec, iir_idx, band_idx, 1),
+		__func__, iir_idx, band_idx,
+		get_iir_band_coeff(codec, iir_idx, band_idx, 2),
+		__func__, iir_idx, band_idx,
+		get_iir_band_coeff(codec, iir_idx, band_idx, 3),
+		__func__, iir_idx, band_idx,
+		get_iir_band_coeff(codec, iir_idx, band_idx, 4));
+	return 0;
+}
+
+static void tx_hpf_corner_freq_callback(struct work_struct *work)
+{
+	struct delayed_work *hpf_delayed_work;
+	struct hpf_work *hpf_work;
+	struct snd_soc_codec *codec;
+	struct msm_dig_priv *msm_dig_cdc;
+	u16 tx_mux_ctl_reg;
+	u8 hpf_cut_of_freq;
+
+	hpf_delayed_work = to_delayed_work(work);
+	hpf_work = container_of(hpf_delayed_work, struct hpf_work, dwork);
+	codec = hpf_work->dig_cdc->codec;
+	msm_dig_cdc = hpf_work->dig_cdc;
+	hpf_cut_of_freq = hpf_work->tx_hpf_cut_of_freq;
+
+	tx_mux_ctl_reg = MSM89XX_CDC_CORE_TX1_MUX_CTL +
+			(hpf_work->decimator - 1) * 32;
+
+	dev_dbg(codec->dev, "%s(): decimator %u hpf_cut_of_freq 0x%x\n",
+		 __func__, hpf_work->decimator, (unsigned int)hpf_cut_of_freq);
+	msm_dig_cdc->update_clkdiv(msm_dig_cdc->handle, 0x51);
+
+	snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x30, hpf_cut_of_freq << 4);
+}
+
+static int msm_dig_cdc_codec_set_iir_gain(struct snd_soc_dapm_widget *w,
+		struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	int value = 0, reg;
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		if (w->shift == 0)
+			reg = MSM89XX_CDC_CORE_IIR1_GAIN_B1_CTL;
+		else if (w->shift == 1)
+			reg = MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL;
+		else
+			goto ret;
+		value = snd_soc_read(codec, reg);
+		snd_soc_write(codec, reg, value);
+		break;
+	default:
+		pr_err("%s: event = %d not expected\n", __func__, event);
+	}
+ret:
+	return 0;
+}
+
+static int msm_dig_cdc_compander_get(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct msm_dig_priv *dig_cdc = snd_soc_codec_get_drvdata(codec);
+	int comp_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+	int rx_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->shift;
+
+	dev_dbg(codec->dev, "%s: msm_dig_cdc->comp[%d]_enabled[%d] = %d\n",
+			__func__, comp_idx, rx_idx,
+			dig_cdc->comp_enabled[rx_idx]);
+
+	ucontrol->value.integer.value[0] = dig_cdc->comp_enabled[rx_idx];
+
+	dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+		__func__, ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static int msm_dig_cdc_compander_set(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct msm_dig_priv *dig_cdc = snd_soc_codec_get_drvdata(codec);
+	int comp_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+	int rx_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->shift;
+	int value = ucontrol->value.integer.value[0];
+
+	dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+		__func__, ucontrol->value.integer.value[0]);
+
+	if (dig_cdc->version >= DIANGU) {
+		if (!value)
+			dig_cdc->comp_enabled[rx_idx] = 0;
+		else
+			dig_cdc->comp_enabled[rx_idx] = comp_idx;
+	}
+
+	dev_dbg(codec->dev, "%s: msm_dig_cdc->comp[%d]_enabled[%d] = %d\n",
+		__func__, comp_idx, rx_idx,
+		dig_cdc->comp_enabled[rx_idx]);
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new compander_kcontrols[] = {
+	SOC_SINGLE_EXT("COMP0 RX1", COMPANDER_1, MSM89XX_RX1, 1, 0,
+	msm_dig_cdc_compander_get, msm_dig_cdc_compander_set),
+
+	SOC_SINGLE_EXT("COMP0 RX2", COMPANDER_1, MSM89XX_RX2, 1, 0,
+	msm_dig_cdc_compander_get, msm_dig_cdc_compander_set),
+
+};
+
+static int msm_dig_cdc_set_interpolator_rate(struct snd_soc_dai *dai,
+					     u8 rx_fs_rate_reg_val,
+					     u32 sample_rate)
+{
+	snd_soc_update_bits(dai->codec,
+			MSM89XX_CDC_CORE_RX1_B5_CTL, 0xF0, rx_fs_rate_reg_val);
+	snd_soc_update_bits(dai->codec,
+			MSM89XX_CDC_CORE_RX2_B5_CTL, 0xF0, rx_fs_rate_reg_val);
+	return 0;
+}
+
+static int msm_dig_cdc_hw_params(struct snd_pcm_substream *substream,
+				 struct snd_pcm_hw_params *params,
+				 struct snd_soc_dai *dai)
+{
+	u8 tx_fs_rate, rx_fs_rate, rx_clk_fs_rate;
+	int ret;
+
+	dev_dbg(dai->codec->dev,
+		"%s: dai_name = %s DAI-ID %x rate %d num_ch %d format %d\n",
+		__func__, dai->name, dai->id, params_rate(params),
+		params_channels(params), params_format(params));
+
+	switch (params_rate(params)) {
+	case 8000:
+		tx_fs_rate = 0x00;
+		rx_fs_rate = 0x00;
+		rx_clk_fs_rate = 0x00;
+		break;
+	case 16000:
+		tx_fs_rate = 0x20;
+		rx_fs_rate = 0x20;
+		rx_clk_fs_rate = 0x01;
+		break;
+	case 32000:
+		tx_fs_rate = 0x40;
+		rx_fs_rate = 0x40;
+		rx_clk_fs_rate = 0x02;
+		break;
+	case 44100:
+	case 48000:
+		tx_fs_rate = 0x60;
+		rx_fs_rate = 0x60;
+		rx_clk_fs_rate = 0x03;
+		break;
+	case 96000:
+		tx_fs_rate = 0x80;
+		rx_fs_rate = 0x80;
+		rx_clk_fs_rate = 0x04;
+		break;
+	case 192000:
+		tx_fs_rate = 0xA0;
+		rx_fs_rate = 0xA0;
+		rx_clk_fs_rate = 0x05;
+		break;
+	default:
+		dev_err(dai->codec->dev,
+			"%s: Invalid sampling rate %d\n", __func__,
+			params_rate(params));
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(dai->codec,
+			MSM89XX_CDC_CORE_CLK_RX_I2S_CTL, 0x0F, rx_clk_fs_rate);
+
+	switch (substream->stream) {
+	case SNDRV_PCM_STREAM_CAPTURE:
+		break;
+	case SNDRV_PCM_STREAM_PLAYBACK:
+		ret = msm_dig_cdc_set_interpolator_rate(dai, rx_fs_rate,
+						  params_rate(params));
+		if (ret < 0) {
+			dev_err(dai->codec->dev,
+				"%s: set decimator rate failed %d\n", __func__,
+				ret);
+			return ret;
+		}
+		break;
+	default:
+		dev_err(dai->codec->dev,
+			"%s: Invalid stream type %d\n", __func__,
+			substream->stream);
+		return -EINVAL;
+	}
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		snd_soc_update_bits(dai->codec,
+				MSM89XX_CDC_CORE_CLK_RX_I2S_CTL, 0x20, 0x20);
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+	case SNDRV_PCM_FORMAT_S24_3LE:
+		snd_soc_update_bits(dai->codec,
+				MSM89XX_CDC_CORE_CLK_RX_I2S_CTL, 0x20, 0x00);
+		break;
+	default:
+		dev_err(dai->codec->dev, "%s: wrong format selected\n",
+				__func__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int msm_dig_cdc_codec_enable_dmic(struct snd_soc_dapm_widget *w,
+					 struct snd_kcontrol *kcontrol,
+					 int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct msm_dig_priv *dig_cdc = snd_soc_codec_get_drvdata(codec);
+	u8  dmic_clk_en;
+	u16 dmic_clk_reg;
+	s32 *dmic_clk_cnt;
+	unsigned int dmic;
+	int ret;
+	char *dmic_num = strpbrk(w->name, "1234");
+
+	if (dmic_num == NULL) {
+		dev_err(codec->dev, "%s: Invalid DMIC\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = kstrtouint(dmic_num, 10, &dmic);
+	if (ret < 0) {
+		dev_err(codec->dev,
+			"%s: Invalid DMIC line on the codec\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (dmic) {
+	case 1:
+	case 2:
+		dmic_clk_en = 0x01;
+		dmic_clk_cnt = &(dig_cdc->dmic_1_2_clk_cnt);
+		dmic_clk_reg = MSM89XX_CDC_CORE_CLK_DMIC_B1_CTL;
+		dev_dbg(codec->dev,
+			"%s() event %d DMIC%d dmic_1_2_clk_cnt %d\n",
+			__func__, event,  dmic, *dmic_clk_cnt);
+		break;
+	case 3:
+	case 4:
+		dmic_clk_en = 0x01;
+		dmic_clk_cnt = &(dig_cdc->dmic_3_4_clk_cnt);
+		dmic_clk_reg = MSM89XX_CDC_CORE_CLK_DMIC_B2_CTL;
+		dev_dbg(codec->dev,
+			"%s() event %d DMIC%d dmic_3_4_clk_cnt %d\n",
+			__func__, event,  dmic, *dmic_clk_cnt);
+		break;
+	default:
+		dev_err(codec->dev, "%s: Invalid DMIC Selection\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		(*dmic_clk_cnt)++;
+		if (*dmic_clk_cnt == 1) {
+			snd_soc_update_bits(codec, dmic_clk_reg,
+					0x0E, 0x04);
+			snd_soc_update_bits(codec, dmic_clk_reg,
+					dmic_clk_en, dmic_clk_en);
+		}
+		snd_soc_update_bits(codec,
+			MSM89XX_CDC_CORE_TX1_DMIC_CTL + (dmic - 1) * 0x20,
+			0x07, 0x02);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		(*dmic_clk_cnt)--;
+		if (*dmic_clk_cnt  == 0)
+			snd_soc_update_bits(codec, dmic_clk_reg,
+					dmic_clk_en, 0);
+		break;
+	}
+	return 0;
+}
+
+static int msm_dig_cdc_codec_enable_dec(struct snd_soc_dapm_widget *w,
+					struct snd_kcontrol *kcontrol,
+					int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct msm_asoc_mach_data *pdata = NULL;
+	unsigned int decimator;
+	struct msm_dig_priv *msm_dig_cdc = snd_soc_codec_get_drvdata(codec);
+	char *dec_name = NULL;
+	char *widget_name = NULL;
+	char *temp;
+	int ret = 0, i;
+	u16 dec_reset_reg, tx_vol_ctl_reg, tx_mux_ctl_reg;
+	u8 dec_hpf_cut_of_freq;
+	int offset;
+	char *dec_num;
+
+	pdata = snd_soc_card_get_drvdata(codec->component.card);
+	dev_dbg(codec->dev, "%s %d\n", __func__, event);
+
+	widget_name = kstrndup(w->name, 15, GFP_KERNEL);
+	if (!widget_name)
+		return -ENOMEM;
+	temp = widget_name;
+
+	dec_name = strsep(&widget_name, " ");
+	widget_name = temp;
+	if (!dec_name) {
+		dev_err(codec->dev,
+			"%s: Invalid decimator = %s\n", __func__, w->name);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	dec_num = strpbrk(dec_name, "12345");
+	if (dec_num == NULL) {
+		dev_err(codec->dev, "%s: Invalid Decimator\n", __func__);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = kstrtouint(dec_num, 10, &decimator);
+	if (ret < 0) {
+		dev_err(codec->dev,
+			"%s: Invalid decimator = %s\n", __func__, dec_name);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	dev_dbg(codec->dev,
+		"%s(): widget = %s dec_name = %s decimator = %u\n", __func__,
+		w->name, dec_name, decimator);
+
+	if (w->reg == MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL) {
+		dec_reset_reg = MSM89XX_CDC_CORE_CLK_TX_RESET_B1_CTL;
+		offset = 0;
+	} else {
+		dev_err(codec->dev, "%s: Error, incorrect dec\n", __func__);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	tx_vol_ctl_reg = MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG +
+			 32 * (decimator - 1);
+	tx_mux_ctl_reg = MSM89XX_CDC_CORE_TX1_MUX_CTL +
+			  32 * (decimator - 1);
+	if (decimator == 5) {
+		tx_vol_ctl_reg = MSM89XX_CDC_CORE_TX5_VOL_CTL_CFG;
+		tx_mux_ctl_reg = MSM89XX_CDC_CORE_TX5_MUX_CTL;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		/* Enableable TX digital mute */
+		snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x01);
+		for (i = 0; i < NUM_DECIMATORS; i++) {
+			if (decimator == i + 1)
+				msm_dig_cdc->dec_active[i] = true;
+		}
+
+		dec_hpf_cut_of_freq = snd_soc_read(codec, tx_mux_ctl_reg);
+
+		dec_hpf_cut_of_freq = (dec_hpf_cut_of_freq & 0x30) >> 4;
+
+		tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq =
+			dec_hpf_cut_of_freq;
+
+		if (dec_hpf_cut_of_freq != CF_MIN_3DB_150HZ) {
+
+			/* set cut of freq to CF_MIN_3DB_150HZ (0x1); */
+			snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x30,
+					    CF_MIN_3DB_150HZ << 4);
+		}
+		msm_dig_cdc->update_clkdiv(msm_dig_cdc->handle, 0x42);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		/* enable HPF */
+		snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x00);
+
+		if (tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq !=
+				CF_MIN_3DB_150HZ) {
+
+			schedule_delayed_work(&tx_hpf_work[decimator - 1].dwork,
+					msecs_to_jiffies(300));
+		}
+		/* apply the digital gain after the decimator is enabled*/
+		if ((w->shift) < ARRAY_SIZE(tx_digital_gain_reg))
+			snd_soc_write(codec,
+				  tx_digital_gain_reg[w->shift + offset],
+				  snd_soc_read(codec,
+				  tx_digital_gain_reg[w->shift + offset])
+				  );
+		if (pdata->lb_mode) {
+			pr_debug("%s: loopback mode unmute the DEC\n",
+							__func__);
+			snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x00);
+		}
+				snd_soc_update_bits(codec, tx_vol_ctl_reg,
+						0x01, 0x00);
+
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x01);
+		msleep(20);
+		snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x08);
+		cancel_delayed_work_sync(&tx_hpf_work[decimator - 1].dwork);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift,
+			1 << w->shift);
+		snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift, 0x0);
+		snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x08);
+		snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x30,
+			(tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq) << 4);
+		snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x00);
+		for (i = 0; i < NUM_DECIMATORS; i++) {
+			if (decimator == i + 1)
+				msm_dig_cdc->dec_active[i] = false;
+		}
+		break;
+	}
+out:
+	kfree(widget_name);
+	return ret;
+}
+
+static int msm_dig_cdc_event_notify(struct notifier_block *block,
+				    unsigned long val,
+				    void *data)
+{
+	enum dig_cdc_notify_event event = (enum dig_cdc_notify_event)val;
+	struct snd_soc_codec *codec = registered_digcodec;
+	struct msm_dig_priv *msm_dig_cdc = snd_soc_codec_get_drvdata(codec);
+	struct msm_asoc_mach_data *pdata = NULL;
+
+	pdata = snd_soc_card_get_drvdata(codec->component.card);
+
+	switch (event) {
+	case DIG_CDC_EVENT_CLK_ON:
+		snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_CLK_PDM_CTL, 0x03, 0x03);
+		if (pdata->mclk_freq == MCLK_RATE_12P288MHZ ||
+		    pdata->native_clk_set)
+			snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_TOP_CTL, 0x01, 0x00);
+		else if (pdata->mclk_freq == MCLK_RATE_9P6MHZ)
+			snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_TOP_CTL, 0x01, 0x01);
+		snd_soc_update_bits(codec,
+			MSM89XX_CDC_CORE_CLK_MCLK_CTL, 0x01, 0x01);
+		break;
+	case DIG_CDC_EVENT_CLK_OFF:
+		snd_soc_update_bits(codec,
+			MSM89XX_CDC_CORE_CLK_PDM_CTL, 0x03, 0x00);
+		snd_soc_update_bits(codec,
+			MSM89XX_CDC_CORE_CLK_MCLK_CTL, 0x01, 0x00);
+		break;
+	case DIG_CDC_EVENT_RX1_MUTE_ON:
+		snd_soc_update_bits(codec,
+			MSM89XX_CDC_CORE_RX1_B6_CTL, 0x01, 0x01);
+		msm_dig_cdc->mute_mask |= HPHL_PA_DISABLE;
+		break;
+	case DIG_CDC_EVENT_RX1_MUTE_OFF:
+		snd_soc_update_bits(codec,
+			MSM89XX_CDC_CORE_RX1_B6_CTL, 0x01, 0x00);
+		msm_dig_cdc->mute_mask &= (~HPHL_PA_DISABLE);
+		break;
+	case DIG_CDC_EVENT_RX2_MUTE_ON:
+		snd_soc_update_bits(codec,
+			MSM89XX_CDC_CORE_RX2_B6_CTL, 0x01, 0x01);
+		msm_dig_cdc->mute_mask |= HPHR_PA_DISABLE;
+		break;
+	case DIG_CDC_EVENT_RX2_MUTE_OFF:
+		snd_soc_update_bits(codec,
+			MSM89XX_CDC_CORE_RX2_B6_CTL, 0x01, 0x00);
+		msm_dig_cdc->mute_mask &= (~HPHR_PA_DISABLE);
+		break;
+	case DIG_CDC_EVENT_RX3_MUTE_ON:
+		snd_soc_update_bits(codec,
+			MSM89XX_CDC_CORE_RX3_B6_CTL, 0x01, 0x01);
+		msm_dig_cdc->mute_mask |= SPKR_PA_DISABLE;
+		break;
+	case DIG_CDC_EVENT_RX3_MUTE_OFF:
+		snd_soc_update_bits(codec,
+			MSM89XX_CDC_CORE_RX3_B6_CTL, 0x01, 0x00);
+		msm_dig_cdc->mute_mask &= (~SPKR_PA_DISABLE);
+		break;
+	case DIG_CDC_EVENT_PRE_RX1_INT_ON:
+		snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_RX1_B3_CTL, 0x1C, 0x14);
+		snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_RX1_B4_CTL, 0x18, 0x10);
+		snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_RX1_B3_CTL, 0x80, 0x80);
+		break;
+	case DIG_CDC_EVENT_PRE_RX2_INT_ON:
+		snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_RX2_B3_CTL, 0x1C, 0x14);
+		snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_RX2_B4_CTL, 0x18, 0x10);
+		snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_RX2_B3_CTL, 0x80, 0x80);
+		break;
+	case DIG_CDC_EVENT_POST_RX1_INT_OFF:
+		snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_RX1_B3_CTL, 0x1C, 0x00);
+		snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_RX1_B4_CTL, 0x18, 0xFF);
+		snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_RX1_B3_CTL, 0x80, 0x00);
+		break;
+	case DIG_CDC_EVENT_POST_RX2_INT_OFF:
+		snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_RX2_B3_CTL, 0x1C, 0x00);
+		snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_RX2_B4_CTL, 0x18, 0xFF);
+		snd_soc_update_bits(codec,
+				MSM89XX_CDC_CORE_RX2_B3_CTL, 0x80, 0x00);
+		break;
+	case DIG_CDC_EVENT_SSR_DOWN:
+		regcache_cache_only(msm_dig_cdc->regmap, true);
+		break;
+	case DIG_CDC_EVENT_SSR_UP:
+		regcache_cache_only(msm_dig_cdc->regmap, false);
+		regcache_mark_dirty(msm_dig_cdc->regmap);
+		regcache_sync(msm_dig_cdc->regmap);
+		break;
+	case DIG_CDC_EVENT_INVALID:
+	default:
+		break;
+	}
+	return 0;
+}
+
+static ssize_t msm_dig_codec_version_read(struct snd_info_entry *entry,
+					  void *file_private_data,
+					  struct file *file,
+					  char __user *buf, size_t count,
+					  loff_t pos)
+{
+	struct msm_dig_priv *msm_dig;
+	char buffer[MSM_DIG_CDC_VERSION_ENTRY_SIZE];
+	int len = 0;
+
+	msm_dig = (struct msm_dig_priv *) entry->private_data;
+	if (!msm_dig) {
+		pr_err("%s: msm_dig priv is null\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (msm_dig->version) {
+	case DRAX_CDC:
+		len = snprintf(buffer, sizeof(buffer), "SDM660-CDC_1_0\n");
+		break;
+	default:
+		len = snprintf(buffer, sizeof(buffer), "VER_UNDEFINED\n");
+	}
+
+	return simple_read_from_buffer(buf, count, &pos, buffer, len);
+}
+
+static struct snd_info_entry_ops msm_dig_codec_info_ops = {
+	.read = msm_dig_codec_version_read,
+};
+
+/*
+ * msm_dig_codec_info_create_codec_entry - creates msm_dig module
+ * @codec_root: The parent directory
+ * @codec: Codec instance
+ *
+ * Creates msm_dig module and version entry under the given
+ * parent directory.
+ *
+ * Return: 0 on success or negative error code on failure.
+ */
+int msm_dig_codec_info_create_codec_entry(struct snd_info_entry *codec_root,
+					  struct snd_soc_codec *codec)
+{
+	struct snd_info_entry *version_entry;
+	struct msm_dig_priv *msm_dig;
+	struct snd_soc_card *card;
+
+	if (!codec_root || !codec)
+		return -EINVAL;
+
+	msm_dig = snd_soc_codec_get_drvdata(codec);
+	card = codec->component.card;
+	msm_dig->entry = snd_register_module_info(codec_root->module,
+						  "msm_digital_codec",
+						  codec_root);
+	if (!msm_dig->entry) {
+		dev_dbg(codec->dev, "%s: failed to create msm_digital entry\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	version_entry = snd_info_create_card_entry(card->snd_card,
+						   "version",
+						   msm_dig->entry);
+	if (!version_entry) {
+		dev_dbg(codec->dev, "%s: failed to create msm_digital version entry\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	version_entry->private_data = msm_dig;
+	version_entry->size = MSM_DIG_CDC_VERSION_ENTRY_SIZE;
+	version_entry->content = SNDRV_INFO_CONTENT_DATA;
+	version_entry->c.ops = &msm_dig_codec_info_ops;
+
+	if (snd_info_register(version_entry) < 0) {
+		snd_info_free_entry(version_entry);
+		return -ENOMEM;
+	}
+	msm_dig->version_entry = version_entry;
+	if (msm_dig->get_cdc_version)
+		msm_dig->version = msm_dig->get_cdc_version(msm_dig->handle);
+	else
+		msm_dig->version = DRAX_CDC;
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_dig_codec_info_create_codec_entry);
+
+static int msm_dig_cdc_soc_probe(struct snd_soc_codec *codec)
+{
+	struct msm_dig_priv *msm_dig_cdc = dev_get_drvdata(codec->dev);
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+	int i, ret;
+
+	msm_dig_cdc->codec = codec;
+
+	snd_soc_add_codec_controls(codec, compander_kcontrols,
+			ARRAY_SIZE(compander_kcontrols));
+
+	for (i = 0; i < NUM_DECIMATORS; i++) {
+		tx_hpf_work[i].dig_cdc = msm_dig_cdc;
+		tx_hpf_work[i].decimator = i + 1;
+		INIT_DELAYED_WORK(&tx_hpf_work[i].dwork,
+			tx_hpf_corner_freq_callback);
+	}
+
+	for (i = 0; i < MSM89XX_RX_MAX; i++)
+		msm_dig_cdc->comp_enabled[i] = COMPANDER_NONE;
+
+	/* Register event notifier */
+	msm_dig_cdc->nblock.notifier_call = msm_dig_cdc_event_notify;
+	if (msm_dig_cdc->register_notifier) {
+		ret = msm_dig_cdc->register_notifier(msm_dig_cdc->handle,
+						     &msm_dig_cdc->nblock,
+						     true);
+		if (ret) {
+			pr_err("%s: Failed to register notifier %d\n",
+				__func__, ret);
+			return ret;
+		}
+	}
+	registered_digcodec = codec;
+
+	snd_soc_dapm_ignore_suspend(dapm, "AIF1 Playback");
+	snd_soc_dapm_ignore_suspend(dapm, "AIF1 Capture");
+	snd_soc_dapm_ignore_suspend(dapm, "ADC1_IN");
+	snd_soc_dapm_ignore_suspend(dapm, "ADC2_IN");
+	snd_soc_dapm_ignore_suspend(dapm, "ADC3_IN");
+	snd_soc_dapm_ignore_suspend(dapm, "PDM_OUT_RX1");
+	snd_soc_dapm_ignore_suspend(dapm, "PDM_OUT_RX2");
+	snd_soc_dapm_ignore_suspend(dapm, "PDM_OUT_RX3");
+
+	return 0;
+}
+
+static int msm_dig_cdc_soc_remove(struct snd_soc_codec *codec)
+{
+	struct msm_dig_priv *msm_dig_cdc = dev_get_drvdata(codec->dev);
+
+	if (msm_dig_cdc->register_notifier)
+		msm_dig_cdc->register_notifier(msm_dig_cdc->handle,
+					       &msm_dig_cdc->nblock,
+					       false);
+	iounmap(msm_dig_cdc->dig_base);
+	return 0;
+}
+
+static const struct snd_soc_dapm_route audio_dig_map[] = {
+	{"RX_I2S_CLK", NULL, "CDC_CONN"},
+	{"I2S RX1", NULL, "RX_I2S_CLK"},
+	{"I2S RX2", NULL, "RX_I2S_CLK"},
+	{"I2S RX3", NULL, "RX_I2S_CLK"},
+
+	{"I2S TX1", NULL, "TX_I2S_CLK"},
+	{"I2S TX2", NULL, "TX_I2S_CLK"},
+	{"I2S TX3", NULL, "TX_I2S_CLK"},
+	{"I2S TX4", NULL, "TX_I2S_CLK"},
+	{"I2S TX5", NULL, "TX_I2S_CLK"},
+	{"I2S TX6", NULL, "TX_I2S_CLK"},
+
+	{"I2S TX1", NULL, "DEC1 MUX"},
+	{"I2S TX2", NULL, "DEC2 MUX"},
+	{"I2S TX3", NULL, "I2S TX2 INP1"},
+	{"I2S TX4", NULL, "I2S TX2 INP2"},
+	{"I2S TX5", NULL, "DEC3 MUX"},
+	{"I2S TX6", NULL, "I2S TX3 INP2"},
+
+	{"I2S TX2 INP1", "RX_MIX1", "RX1 MIX2"},
+	{"I2S TX2 INP1", "DEC3", "DEC3 MUX"},
+	{"I2S TX2 INP2", "RX_MIX2", "RX2 MIX2"},
+	{"I2S TX2 INP2", "RX_MIX3", "RX3 MIX1"},
+	{"I2S TX2 INP2", "DEC4", "DEC4 MUX"},
+	{"I2S TX3 INP2", "DEC4", "DEC4 MUX"},
+	{"I2S TX3 INP2", "DEC5", "DEC5 MUX"},
+
+	{"PDM_OUT_RX1", NULL, "RX1 CHAIN"},
+	{"PDM_OUT_RX2", NULL, "RX2 CHAIN"},
+	{"PDM_OUT_RX3", NULL, "RX3 CHAIN"},
+
+	{"RX1 CHAIN", NULL, "RX1 MIX2"},
+	{"RX2 CHAIN", NULL, "RX2 MIX2"},
+	{"RX3 CHAIN", NULL, "RX3 MIX1"},
+
+	{"RX1 MIX1", NULL, "RX1 MIX1 INP1"},
+	{"RX1 MIX1", NULL, "RX1 MIX1 INP2"},
+	{"RX1 MIX1", NULL, "RX1 MIX1 INP3"},
+	{"RX2 MIX1", NULL, "RX2 MIX1 INP1"},
+	{"RX2 MIX1", NULL, "RX2 MIX1 INP2"},
+	{"RX3 MIX1", NULL, "RX3 MIX1 INP1"},
+	{"RX3 MIX1", NULL, "RX3 MIX1 INP2"},
+	{"RX1 MIX2", NULL, "RX1 MIX1"},
+	{"RX1 MIX2", NULL, "RX1 MIX2 INP1"},
+	{"RX2 MIX2", NULL, "RX2 MIX1"},
+	{"RX2 MIX2", NULL, "RX2 MIX2 INP1"},
+
+	{"RX1 MIX1 INP1", "RX1", "I2S RX1"},
+	{"RX1 MIX1 INP1", "RX2", "I2S RX2"},
+	{"RX1 MIX1 INP1", "RX3", "I2S RX3"},
+	{"RX1 MIX1 INP1", "IIR1", "IIR1"},
+	{"RX1 MIX1 INP1", "IIR2", "IIR2"},
+	{"RX1 MIX1 INP2", "RX1", "I2S RX1"},
+	{"RX1 MIX1 INP2", "RX2", "I2S RX2"},
+	{"RX1 MIX1 INP2", "RX3", "I2S RX3"},
+	{"RX1 MIX1 INP2", "IIR1", "IIR1"},
+	{"RX1 MIX1 INP2", "IIR2", "IIR2"},
+	{"RX1 MIX1 INP3", "RX1", "I2S RX1"},
+	{"RX1 MIX1 INP3", "RX2", "I2S RX2"},
+	{"RX1 MIX1 INP3", "RX3", "I2S RX3"},
+
+	{"RX2 MIX1 INP1", "RX1", "I2S RX1"},
+	{"RX2 MIX1 INP1", "RX2", "I2S RX2"},
+	{"RX2 MIX1 INP1", "RX3", "I2S RX3"},
+	{"RX2 MIX1 INP1", "IIR1", "IIR1"},
+	{"RX2 MIX1 INP1", "IIR2", "IIR2"},
+	{"RX2 MIX1 INP2", "RX1", "I2S RX1"},
+	{"RX2 MIX1 INP2", "RX2", "I2S RX2"},
+	{"RX2 MIX1 INP2", "RX3", "I2S RX3"},
+	{"RX2 MIX1 INP2", "IIR1", "IIR1"},
+	{"RX2 MIX1 INP2", "IIR2", "IIR2"},
+
+	{"RX3 MIX1 INP1", "RX1", "I2S RX1"},
+	{"RX3 MIX1 INP1", "RX2", "I2S RX2"},
+	{"RX3 MIX1 INP1", "RX3", "I2S RX3"},
+	{"RX3 MIX1 INP1", "IIR1", "IIR1"},
+	{"RX3 MIX1 INP1", "IIR2", "IIR2"},
+	{"RX3 MIX1 INP2", "RX1", "I2S RX1"},
+	{"RX3 MIX1 INP2", "RX2", "I2S RX2"},
+	{"RX3 MIX1 INP2", "RX3", "I2S RX3"},
+	{"RX3 MIX1 INP2", "IIR1", "IIR1"},
+	{"RX3 MIX1 INP2", "IIR2", "IIR2"},
+
+	{"RX1 MIX2 INP1", "IIR1", "IIR1"},
+	{"RX2 MIX2 INP1", "IIR1", "IIR1"},
+	{"RX1 MIX2 INP1", "IIR2", "IIR2"},
+	{"RX2 MIX2 INP1", "IIR2", "IIR2"},
+
+		/* Decimator Inputs */
+	{"DEC1 MUX", "DMIC1", "DMIC1"},
+	{"DEC1 MUX", "DMIC2", "DMIC2"},
+	{"DEC1 MUX", "DMIC3", "DMIC3"},
+	{"DEC1 MUX", "DMIC4", "DMIC4"},
+	{"DEC1 MUX", "ADC1", "ADC1_IN"},
+	{"DEC1 MUX", "ADC2", "ADC2_IN"},
+	{"DEC1 MUX", "ADC3", "ADC3_IN"},
+	{"DEC1 MUX", NULL, "CDC_CONN"},
+
+	{"DEC2 MUX", "DMIC1", "DMIC1"},
+	{"DEC2 MUX", "DMIC2", "DMIC2"},
+	{"DEC2 MUX", "DMIC3", "DMIC3"},
+	{"DEC2 MUX", "DMIC4", "DMIC4"},
+	{"DEC2 MUX", "ADC1", "ADC1_IN"},
+	{"DEC2 MUX", "ADC2", "ADC2_IN"},
+	{"DEC2 MUX", "ADC3", "ADC3_IN"},
+	{"DEC2 MUX", NULL, "CDC_CONN"},
+
+	{"DEC3 MUX", "DMIC1", "DMIC1"},
+	{"DEC3 MUX", "DMIC2", "DMIC2"},
+	{"DEC3 MUX", "DMIC3", "DMIC3"},
+	{"DEC3 MUX", "DMIC4", "DMIC4"},
+	{"DEC3 MUX", "ADC1", "ADC1_IN"},
+	{"DEC3 MUX", "ADC2", "ADC2_IN"},
+	{"DEC3 MUX", "ADC3", "ADC3_IN"},
+	{"DEC3 MUX", NULL, "CDC_CONN"},
+
+	{"DEC4 MUX", "DMIC1", "DMIC1"},
+	{"DEC4 MUX", "DMIC2", "DMIC2"},
+	{"DEC4 MUX", "DMIC3", "DMIC3"},
+	{"DEC4 MUX", "DMIC4", "DMIC4"},
+	{"DEC4 MUX", "ADC1", "ADC1_IN"},
+	{"DEC4 MUX", "ADC2", "ADC2_IN"},
+	{"DEC4 MUX", "ADC3", "ADC3_IN"},
+	{"DEC4 MUX", NULL, "CDC_CONN"},
+
+	{"DEC5 MUX", "DMIC1", "DMIC1"},
+	{"DEC5 MUX", "DMIC2", "DMIC2"},
+	{"DEC5 MUX", "DMIC3", "DMIC3"},
+	{"DEC5 MUX", "DMIC4", "DMIC4"},
+	{"DEC5 MUX", "ADC1", "ADC1_IN"},
+	{"DEC5 MUX", "ADC2", "ADC2_IN"},
+	{"DEC5 MUX", "ADC3", "ADC3_IN"},
+	{"DEC5 MUX", NULL, "CDC_CONN"},
+
+	{"IIR1", NULL, "IIR1 INP1 MUX"},
+	{"IIR1 INP1 MUX", "DEC1", "DEC1 MUX"},
+	{"IIR1 INP1 MUX", "DEC2", "DEC2 MUX"},
+	{"IIR1 INP1 MUX", "DEC3", "DEC3 MUX"},
+	{"IIR1 INP1 MUX", "DEC4", "DEC4 MUX"},
+	{"IIR2", NULL, "IIR2 INP1 MUX"},
+	{"IIR2 INP1 MUX", "DEC1", "DEC1 MUX"},
+	{"IIR2 INP1 MUX", "DEC2", "DEC2 MUX"},
+	{"IIR1 INP1 MUX", "DEC3", "DEC3 MUX"},
+	{"IIR1 INP1 MUX", "DEC4", "DEC4 MUX"},
+};
+
+
+static const char * const i2s_tx2_inp1_text[] = {
+	"ZERO", "RX_MIX1", "DEC3"
+};
+
+static const char * const i2s_tx2_inp2_text[] = {
+	"ZERO", "RX_MIX2", "RX_MIX3", "DEC4"
+};
+
+static const char * const i2s_tx3_inp2_text[] = {
+	"DEC4", "DEC5"
+};
+
+static const char * const rx_mix1_text[] = {
+	"ZERO", "IIR1", "IIR2", "RX1", "RX2", "RX3"
+};
+
+static const char * const rx_mix2_text[] = {
+	"ZERO", "IIR1", "IIR2"
+};
+
+static const char * const dec_mux_text[] = {
+	"ZERO", "ADC1", "ADC2", "ADC3", "DMIC1", "DMIC2", "DMIC3", "DMIC4"
+};
+
+static const char * const iir_inp1_text[] = {
+	"ZERO", "DEC1", "DEC2", "RX1", "RX2", "RX3", "DEC3", "DEC4"
+};
+
+/* I2S TX MUXes */
+static const struct soc_enum i2s_tx2_inp1_chain_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_TX_I2S_SD1_CTL,
+		2, 3, i2s_tx2_inp1_text);
+
+static const struct soc_enum i2s_tx2_inp2_chain_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_TX_I2S_SD1_CTL,
+		0, 4, i2s_tx2_inp2_text);
+
+static const struct soc_enum i2s_tx3_inp2_chain_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_TX_I2S_SD1_CTL,
+		4, 2, i2s_tx3_inp2_text);
+
+/* RX1 MIX1 */
+static const struct soc_enum rx_mix1_inp1_chain_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX1_B1_CTL,
+		0, 6, rx_mix1_text);
+
+static const struct soc_enum rx_mix1_inp2_chain_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX1_B1_CTL,
+		3, 6, rx_mix1_text);
+
+static const struct soc_enum rx_mix1_inp3_chain_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX1_B2_CTL,
+		0, 6, rx_mix1_text);
+
+/* RX1 MIX2 */
+static const struct soc_enum rx_mix2_inp1_chain_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX1_B3_CTL,
+		0, 3, rx_mix2_text);
+
+/* RX2 MIX1 */
+static const struct soc_enum rx2_mix1_inp1_chain_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX2_B1_CTL,
+		0, 6, rx_mix1_text);
+
+static const struct soc_enum rx2_mix1_inp2_chain_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX2_B1_CTL,
+		3, 6, rx_mix1_text);
+
+static const struct soc_enum rx2_mix1_inp3_chain_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX2_B1_CTL,
+		0, 6, rx_mix1_text);
+
+/* RX2 MIX2 */
+static const struct soc_enum rx2_mix2_inp1_chain_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX2_B3_CTL,
+		0, 3, rx_mix2_text);
+
+/* RX3 MIX1 */
+static const struct soc_enum rx3_mix1_inp1_chain_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX3_B1_CTL,
+		0, 6, rx_mix1_text);
+
+static const struct soc_enum rx3_mix1_inp2_chain_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX3_B1_CTL,
+		3, 6, rx_mix1_text);
+
+static const struct soc_enum rx3_mix1_inp3_chain_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_RX3_B1_CTL,
+		0, 6, rx_mix1_text);
+
+/* DEC */
+static const struct soc_enum dec1_mux_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_TX_B1_CTL,
+		0, 8, dec_mux_text);
+
+static const struct soc_enum dec2_mux_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_TX_B1_CTL,
+		3, 8, dec_mux_text);
+
+static const struct soc_enum dec3_mux_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_TX_B2_CTL,
+		0, 8, dec_mux_text);
+
+static const struct soc_enum dec4_mux_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_TX_B2_CTL,
+		3, 8, dec_mux_text);
+
+static const struct soc_enum decsva_mux_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_TX_B3_CTL,
+		0, 8, dec_mux_text);
+
+static const struct soc_enum iir1_inp1_mux_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_EQ1_B1_CTL,
+		0, 8, iir_inp1_text);
+
+static const struct soc_enum iir2_inp1_mux_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_CONN_EQ2_B1_CTL,
+		0, 8, iir_inp1_text);
+
+/*cut of frequency for high pass filter*/
+static const char * const cf_text[] = {
+	"MIN_3DB_4Hz", "MIN_3DB_75Hz", "MIN_3DB_150Hz"
+};
+
+static const struct soc_enum cf_rxmix1_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_RX1_B4_CTL, 0, 3, cf_text);
+
+static const struct soc_enum cf_rxmix2_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_RX2_B4_CTL, 0, 3, cf_text);
+
+static const struct soc_enum cf_rxmix3_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_RX3_B4_CTL, 0, 3, cf_text);
+
+static const struct snd_kcontrol_new rx3_mix1_inp1_mux =
+	SOC_DAPM_ENUM("RX3 MIX1 INP1 Mux", rx3_mix1_inp1_chain_enum);
+
+#define MSM89XX_DEC_ENUM(xname, xenum) \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.info = snd_soc_info_enum_double, \
+	.get = snd_soc_dapm_get_enum_double, \
+	.put = msm_dig_cdc_put_dec_enum, \
+	.private_value = (unsigned long)&xenum }
+
+static const struct snd_kcontrol_new dec1_mux =
+	MSM89XX_DEC_ENUM("DEC1 MUX Mux", dec1_mux_enum);
+
+static const struct snd_kcontrol_new dec2_mux =
+	MSM89XX_DEC_ENUM("DEC2 MUX Mux", dec2_mux_enum);
+
+static const struct snd_kcontrol_new dec3_mux =
+	MSM89XX_DEC_ENUM("DEC3 MUX Mux", dec3_mux_enum);
+
+static const struct snd_kcontrol_new dec4_mux =
+	MSM89XX_DEC_ENUM("DEC4 MUX Mux", dec4_mux_enum);
+
+static const struct snd_kcontrol_new decsva_mux =
+	MSM89XX_DEC_ENUM("DEC5 MUX Mux", decsva_mux_enum);
+
+static const struct snd_kcontrol_new i2s_tx2_inp1_mux =
+	SOC_DAPM_ENUM("I2S TX2 INP1 Mux", i2s_tx2_inp1_chain_enum);
+
+static const struct snd_kcontrol_new i2s_tx2_inp2_mux =
+	SOC_DAPM_ENUM("I2S TX2 INP2 Mux", i2s_tx2_inp2_chain_enum);
+
+static const struct snd_kcontrol_new i2s_tx3_inp2_mux =
+	SOC_DAPM_ENUM("I2S TX3 INP2 Mux", i2s_tx3_inp2_chain_enum);
+
+static const struct snd_kcontrol_new iir1_inp1_mux =
+	SOC_DAPM_ENUM("IIR1 INP1 Mux", iir1_inp1_mux_enum);
+
+static const struct snd_kcontrol_new iir2_inp1_mux =
+	SOC_DAPM_ENUM("IIR2 INP1 Mux", iir2_inp1_mux_enum);
+
+static const struct snd_kcontrol_new rx_mix1_inp1_mux =
+	SOC_DAPM_ENUM("RX1 MIX1 INP1 Mux", rx_mix1_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx_mix1_inp2_mux =
+	SOC_DAPM_ENUM("RX1 MIX1 INP2 Mux", rx_mix1_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx_mix1_inp3_mux =
+	SOC_DAPM_ENUM("RX1 MIX1 INP3 Mux", rx_mix1_inp3_chain_enum);
+
+static const struct snd_kcontrol_new rx2_mix1_inp1_mux =
+	SOC_DAPM_ENUM("RX2 MIX1 INP1 Mux", rx2_mix1_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx2_mix1_inp2_mux =
+	SOC_DAPM_ENUM("RX2 MIX1 INP2 Mux", rx2_mix1_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx2_mix1_inp3_mux =
+	SOC_DAPM_ENUM("RX2 MIX1 INP3 Mux", rx2_mix1_inp3_chain_enum);
+
+static const struct snd_kcontrol_new rx3_mix1_inp2_mux =
+	SOC_DAPM_ENUM("RX3 MIX1 INP2 Mux", rx3_mix1_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx3_mix1_inp3_mux =
+	SOC_DAPM_ENUM("RX3 MIX1 INP3 Mux", rx3_mix1_inp3_chain_enum);
+
+static const struct snd_kcontrol_new rx1_mix2_inp1_mux =
+	SOC_DAPM_ENUM("RX1 MIX2 INP1 Mux", rx_mix2_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx2_mix2_inp1_mux =
+	SOC_DAPM_ENUM("RX2 MIX2 INP1 Mux", rx2_mix2_inp1_chain_enum);
+
+static const struct snd_soc_dapm_widget msm_dig_dapm_widgets[] = {
+	SND_SOC_DAPM_AIF_IN("I2S RX1", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("I2S RX2", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("I2S RX3", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+
+	SND_SOC_DAPM_AIF_OUT("I2S TX1", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("I2S TX2", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("I2S TX3", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("I2S TX4", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("I2S TX5", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("I2S TX6", "AIF2 Capture", 0, SND_SOC_NOPM, 0, 0),
+
+	SND_SOC_DAPM_MIXER_E("RX1 MIX2", MSM89XX_CDC_CORE_CLK_RX_B1_CTL,
+			     MSM89XX_RX1, 0, NULL, 0,
+			     msm_dig_cdc_codec_enable_interpolator,
+			     SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MIXER_E("RX2 MIX2", MSM89XX_CDC_CORE_CLK_RX_B1_CTL,
+			     MSM89XX_RX2, 0, NULL, 0,
+			     msm_dig_cdc_codec_enable_interpolator,
+			     SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MIXER_E("RX3 MIX1", MSM89XX_CDC_CORE_CLK_RX_B1_CTL,
+			     MSM89XX_RX3, 0, NULL, 0,
+			     msm_dig_cdc_codec_enable_interpolator,
+			     SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MIXER("RX1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX2 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	SND_SOC_DAPM_MIXER("RX1 CHAIN", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX2 CHAIN", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX3 CHAIN", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	SND_SOC_DAPM_MUX("RX1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+		&rx_mix1_inp1_mux),
+	SND_SOC_DAPM_MUX("RX1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+		&rx_mix1_inp2_mux),
+	SND_SOC_DAPM_MUX("RX1 MIX1 INP3", SND_SOC_NOPM, 0, 0,
+		&rx_mix1_inp3_mux),
+
+	SND_SOC_DAPM_MUX("RX2 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+		&rx2_mix1_inp1_mux),
+	SND_SOC_DAPM_MUX("RX2 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+		&rx2_mix1_inp2_mux),
+	SND_SOC_DAPM_MUX("RX2 MIX1 INP3", SND_SOC_NOPM, 0, 0,
+		&rx2_mix1_inp3_mux),
+
+	SND_SOC_DAPM_MUX("RX3 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+		&rx3_mix1_inp1_mux),
+	SND_SOC_DAPM_MUX("RX3 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+		&rx3_mix1_inp2_mux),
+	SND_SOC_DAPM_MUX("RX3 MIX1 INP3", SND_SOC_NOPM, 0, 0,
+		&rx3_mix1_inp3_mux),
+
+	SND_SOC_DAPM_MUX("RX1 MIX2 INP1", SND_SOC_NOPM, 0, 0,
+		&rx1_mix2_inp1_mux),
+	SND_SOC_DAPM_MUX("RX2 MIX2 INP1", SND_SOC_NOPM, 0, 0,
+		&rx2_mix2_inp1_mux),
+
+	SND_SOC_DAPM_SUPPLY_S("CDC_CONN", -2, MSM89XX_CDC_CORE_CLK_OTHR_CTL,
+		2, 0, NULL, 0),
+
+	SND_SOC_DAPM_MUX_E("DEC1 MUX",
+		MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL, 0, 0,
+		&dec1_mux, msm_dig_cdc_codec_enable_dec,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("DEC2 MUX",
+		MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL, 1, 0,
+		&dec2_mux, msm_dig_cdc_codec_enable_dec,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("DEC3 MUX",
+		MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL, 2, 0,
+		&dec3_mux, msm_dig_cdc_codec_enable_dec,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("DEC4 MUX",
+		MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL, 3, 0,
+		&dec4_mux, msm_dig_cdc_codec_enable_dec,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("DEC5 MUX",
+		MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL, 4, 0,
+		&decsva_mux, msm_dig_cdc_codec_enable_dec,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+	/* Sidetone */
+	SND_SOC_DAPM_MUX("IIR1 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp1_mux),
+	SND_SOC_DAPM_PGA_E("IIR1", MSM89XX_CDC_CORE_CLK_SD_CTL, 0, 0, NULL, 0,
+		msm_dig_cdc_codec_set_iir_gain, SND_SOC_DAPM_POST_PMU),
+
+	SND_SOC_DAPM_MUX("IIR2 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir2_inp1_mux),
+	SND_SOC_DAPM_PGA_E("IIR2", MSM89XX_CDC_CORE_CLK_SD_CTL, 1, 0, NULL, 0,
+		msm_dig_cdc_codec_set_iir_gain, SND_SOC_DAPM_POST_PMU),
+
+	SND_SOC_DAPM_SUPPLY("RX_I2S_CLK",
+		MSM89XX_CDC_CORE_CLK_RX_I2S_CTL, 4, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("TX_I2S_CLK",
+		MSM89XX_CDC_CORE_CLK_TX_I2S_CTL, 4, 0, NULL, 0),
+
+
+	SND_SOC_DAPM_MUX("I2S TX2 INP1", SND_SOC_NOPM, 0, 0,
+			&i2s_tx2_inp1_mux),
+	SND_SOC_DAPM_MUX("I2S TX2 INP2", SND_SOC_NOPM, 0, 0,
+			&i2s_tx2_inp2_mux),
+	SND_SOC_DAPM_MUX("I2S TX3 INP2", SND_SOC_NOPM, 0, 0,
+			&i2s_tx3_inp2_mux),
+
+	/* Digital Mic Inputs */
+	SND_SOC_DAPM_ADC_E("DMIC1", NULL, SND_SOC_NOPM, 0, 0,
+		msm_dig_cdc_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_ADC_E("DMIC2", NULL, SND_SOC_NOPM, 0, 0,
+		msm_dig_cdc_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_ADC_E("DMIC3", NULL, SND_SOC_NOPM, 0, 0,
+		msm_dig_cdc_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_ADC_E("DMIC4", NULL, SND_SOC_NOPM, 0, 0,
+		msm_dig_cdc_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_INPUT("ADC1_IN"),
+	SND_SOC_DAPM_INPUT("ADC2_IN"),
+	SND_SOC_DAPM_INPUT("ADC3_IN"),
+	SND_SOC_DAPM_OUTPUT("PDM_OUT_RX1"),
+	SND_SOC_DAPM_OUTPUT("PDM_OUT_RX2"),
+	SND_SOC_DAPM_OUTPUT("PDM_OUT_RX3"),
+};
+
+static const struct soc_enum cf_dec1_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_TX1_MUX_CTL, 4, 3, cf_text);
+
+static const struct soc_enum cf_dec2_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_TX2_MUX_CTL, 4, 3, cf_text);
+
+static const struct soc_enum cf_dec3_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_TX3_MUX_CTL, 4, 3, cf_text);
+
+static const struct soc_enum cf_dec4_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_TX4_MUX_CTL, 4, 3, cf_text);
+
+static const struct soc_enum cf_decsva_enum =
+	SOC_ENUM_SINGLE(MSM89XX_CDC_CORE_TX5_MUX_CTL, 4, 3, cf_text);
+
+static const struct snd_kcontrol_new msm_dig_snd_controls[] = {
+	SOC_SINGLE_SX_TLV("DEC1 Volume",
+		MSM89XX_CDC_CORE_TX1_VOL_CTL_GAIN,
+		0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("DEC2 Volume",
+		  MSM89XX_CDC_CORE_TX2_VOL_CTL_GAIN,
+		0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("DEC3 Volume",
+		  MSM89XX_CDC_CORE_TX3_VOL_CTL_GAIN,
+		0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("DEC4 Volume",
+		  MSM89XX_CDC_CORE_TX4_VOL_CTL_GAIN,
+		0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("DEC5 Volume",
+		  MSM89XX_CDC_CORE_TX5_VOL_CTL_GAIN,
+		0, -84, 40, digital_gain),
+
+	SOC_SINGLE_SX_TLV("IIR1 INP1 Volume",
+			  MSM89XX_CDC_CORE_IIR1_GAIN_B1_CTL,
+			0,  -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("IIR1 INP2 Volume",
+			  MSM89XX_CDC_CORE_IIR1_GAIN_B2_CTL,
+			0,  -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("IIR1 INP3 Volume",
+			  MSM89XX_CDC_CORE_IIR1_GAIN_B3_CTL,
+			0,  -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("IIR1 INP4 Volume",
+			  MSM89XX_CDC_CORE_IIR1_GAIN_B4_CTL,
+			0,  -84,	40, digital_gain),
+	SOC_SINGLE_SX_TLV("IIR2 INP1 Volume",
+			  MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL,
+			0,  -84, 40, digital_gain),
+
+	SOC_SINGLE_SX_TLV("RX1 Digital Volume",
+		MSM89XX_CDC_CORE_RX1_VOL_CTL_B2_CTL,
+		0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("RX2 Digital Volume",
+		MSM89XX_CDC_CORE_RX2_VOL_CTL_B2_CTL,
+		0, -84, 40, digital_gain),
+	SOC_SINGLE_SX_TLV("RX3 Digital Volume",
+		MSM89XX_CDC_CORE_RX3_VOL_CTL_B2_CTL,
+		0, -84, 40, digital_gain),
+
+	SOC_SINGLE_EXT("IIR1 Enable Band1", IIR1, BAND1, 1, 0,
+		msm_dig_cdc_get_iir_enable_audio_mixer,
+		msm_dig_cdc_put_iir_enable_audio_mixer),
+	SOC_SINGLE_EXT("IIR1 Enable Band2", IIR1, BAND2, 1, 0,
+		msm_dig_cdc_get_iir_enable_audio_mixer,
+		msm_dig_cdc_put_iir_enable_audio_mixer),
+	SOC_SINGLE_EXT("IIR1 Enable Band3", IIR1, BAND3, 1, 0,
+		msm_dig_cdc_get_iir_enable_audio_mixer,
+		msm_dig_cdc_put_iir_enable_audio_mixer),
+	SOC_SINGLE_EXT("IIR1 Enable Band4", IIR1, BAND4, 1, 0,
+		msm_dig_cdc_get_iir_enable_audio_mixer,
+		msm_dig_cdc_put_iir_enable_audio_mixer),
+	SOC_SINGLE_EXT("IIR1 Enable Band5", IIR1, BAND5, 1, 0,
+		msm_dig_cdc_get_iir_enable_audio_mixer,
+		msm_dig_cdc_put_iir_enable_audio_mixer),
+
+	SOC_SINGLE_EXT("IIR2 Enable Band1", IIR2, BAND1, 1, 0,
+		msm_dig_cdc_get_iir_enable_audio_mixer,
+		msm_dig_cdc_put_iir_enable_audio_mixer),
+	SOC_SINGLE_EXT("IIR2 Enable Band2", IIR2, BAND2, 1, 0,
+		msm_dig_cdc_get_iir_enable_audio_mixer,
+		msm_dig_cdc_put_iir_enable_audio_mixer),
+	SOC_SINGLE_EXT("IIR2 Enable Band3", IIR2, BAND3, 1, 0,
+		msm_dig_cdc_get_iir_enable_audio_mixer,
+		msm_dig_cdc_put_iir_enable_audio_mixer),
+	SOC_SINGLE_EXT("IIR2 Enable Band4", IIR2, BAND4, 1, 0,
+		msm_dig_cdc_get_iir_enable_audio_mixer,
+		msm_dig_cdc_put_iir_enable_audio_mixer),
+	SOC_SINGLE_EXT("IIR2 Enable Band5", IIR2, BAND5, 1, 0,
+		msm_dig_cdc_get_iir_enable_audio_mixer,
+		msm_dig_cdc_put_iir_enable_audio_mixer),
+
+	SOC_SINGLE_MULTI_EXT("IIR1 Band1", IIR1, BAND1, 255, 0, 5,
+		msm_dig_cdc_get_iir_band_audio_mixer,
+		msm_dig_cdc_put_iir_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("IIR1 Band2", IIR1, BAND2, 255, 0, 5,
+		msm_dig_cdc_get_iir_band_audio_mixer,
+		msm_dig_cdc_put_iir_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("IIR1 Band3", IIR1, BAND3, 255, 0, 5,
+		msm_dig_cdc_get_iir_band_audio_mixer,
+		msm_dig_cdc_put_iir_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("IIR1 Band4", IIR1, BAND4, 255, 0, 5,
+		msm_dig_cdc_get_iir_band_audio_mixer,
+		msm_dig_cdc_put_iir_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("IIR1 Band5", IIR1, BAND5, 255, 0, 5,
+		msm_dig_cdc_get_iir_band_audio_mixer,
+		msm_dig_cdc_put_iir_band_audio_mixer),
+
+	SOC_SINGLE_MULTI_EXT("IIR2 Band1", IIR2, BAND1, 255, 0, 5,
+		msm_dig_cdc_get_iir_band_audio_mixer,
+		msm_dig_cdc_put_iir_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("IIR2 Band2", IIR2, BAND2, 255, 0, 5,
+		msm_dig_cdc_get_iir_band_audio_mixer,
+		msm_dig_cdc_put_iir_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("IIR2 Band3", IIR2, BAND3, 255, 0, 5,
+		msm_dig_cdc_get_iir_band_audio_mixer,
+		msm_dig_cdc_put_iir_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("IIR2 Band4", IIR2, BAND4, 255, 0, 5,
+		msm_dig_cdc_get_iir_band_audio_mixer,
+		msm_dig_cdc_put_iir_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("IIR2 Band5", IIR2, BAND5, 255, 0, 5,
+		msm_dig_cdc_get_iir_band_audio_mixer,
+		msm_dig_cdc_put_iir_band_audio_mixer),
+
+	SOC_SINGLE("RX1 HPF Switch",
+		MSM89XX_CDC_CORE_RX1_B5_CTL, 2, 1, 0),
+	SOC_SINGLE("RX2 HPF Switch",
+		MSM89XX_CDC_CORE_RX2_B5_CTL, 2, 1, 0),
+	SOC_SINGLE("RX3 HPF Switch",
+		MSM89XX_CDC_CORE_RX3_B5_CTL, 2, 1, 0),
+
+	SOC_ENUM("RX1 HPF cut off", cf_rxmix1_enum),
+	SOC_ENUM("RX2 HPF cut off", cf_rxmix2_enum),
+	SOC_ENUM("RX3 HPF cut off", cf_rxmix3_enum),
+
+	SOC_ENUM("TX1 HPF cut off", cf_dec1_enum),
+	SOC_ENUM("TX2 HPF cut off", cf_dec2_enum),
+	SOC_ENUM("TX3 HPF cut off", cf_dec3_enum),
+	SOC_ENUM("TX4 HPF cut off", cf_dec4_enum),
+	SOC_ENUM("TX5 HPF cut off", cf_decsva_enum),
+	SOC_SINGLE("TX1 HPF Switch",
+		MSM89XX_CDC_CORE_TX1_MUX_CTL, 3, 1, 0),
+	SOC_SINGLE("TX2 HPF Switch",
+		MSM89XX_CDC_CORE_TX2_MUX_CTL, 3, 1, 0),
+	SOC_SINGLE("TX3 HPF Switch",
+		MSM89XX_CDC_CORE_TX3_MUX_CTL, 3, 1, 0),
+	SOC_SINGLE("TX4 HPF Switch",
+		MSM89XX_CDC_CORE_TX4_MUX_CTL, 3, 1, 0),
+	SOC_SINGLE("TX5 HPF Switch",
+		MSM89XX_CDC_CORE_TX5_MUX_CTL, 3, 1, 0),
+};
+
+static int msm_dig_cdc_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+	struct snd_soc_codec *codec = NULL;
+	u16 tx_vol_ctl_reg = 0;
+	u8 decimator = 0, i;
+	struct msm_dig_priv *dig_cdc;
+
+	pr_debug("%s: Digital Mute val = %d\n", __func__, mute);
+
+	if (!dai || !dai->codec) {
+		pr_err("%s: Invalid params\n", __func__);
+		return -EINVAL;
+	}
+	codec = dai->codec;
+	dig_cdc = snd_soc_codec_get_drvdata(codec);
+
+	if (dai->id == AIF1_PB) {
+		dev_dbg(codec->dev, "%s: Not capture use case skip\n",
+			__func__);
+		return 0;
+	}
+
+	mute = (mute) ? 1 : 0;
+	if (!mute) {
+		/*
+		 * 15 ms is an emperical value for the mute time
+		 * that was arrived by checking the pop level
+		 * to be inaudible
+		 */
+		usleep_range(15000, 15010);
+	}
+
+	if (dai->id == AIF3_SVA) {
+		snd_soc_update_bits(codec,
+			MSM89XX_CDC_CORE_TX5_VOL_CTL_CFG, 0x01, mute);
+		goto ret;
+	}
+	for (i = 0; i < (NUM_DECIMATORS - 1); i++) {
+		if (dig_cdc->dec_active[i])
+			decimator = i + 1;
+		if (decimator && decimator < NUM_DECIMATORS) {
+			/* mute/unmute decimators corresponding to Tx DAI's */
+			tx_vol_ctl_reg =
+			MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG +
+					32 * (decimator - 1);
+			snd_soc_update_bits(codec, tx_vol_ctl_reg,
+					    0x01, mute);
+		}
+		decimator = 0;
+	}
+ret:
+	return 0;
+}
+
+static struct snd_soc_dai_ops msm_dig_dai_ops = {
+	.hw_params = msm_dig_cdc_hw_params,
+	.digital_mute = msm_dig_cdc_digital_mute,
+};
+
+
+static struct snd_soc_dai_driver msm_codec_dais[] = {
+	{
+		.name = "msm_dig_cdc_dai_rx1",
+		.id = AIF1_PB,
+		.playback = { /* Support maximum range */
+			.stream_name = "AIF1 Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		},
+		 .ops = &msm_dig_dai_ops,
+	},
+	{
+		.name = "msm_dig_cdc_dai_tx1",
+		.id = AIF1_CAP,
+		.capture = { /* Support maximum range */
+			.stream_name = "AIF1 Capture",
+			.channels_min = 1,
+			.channels_max = 4,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		},
+		 .ops = &msm_dig_dai_ops,
+	},
+	{
+		.name = "msm_dig_cdc_dai_tx2",
+		.id = AIF3_SVA,
+		.capture = { /* Support maximum range */
+			.stream_name = "AIF2 Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		},
+		 .ops = &msm_dig_dai_ops,
+	},
+	{
+		.name = "msm_dig_cdc_dai_vifeed",
+		.id = AIF2_VIFEED,
+		.capture = { /* Support maximum range */
+			.stream_name = "AIF2 Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		},
+		 .ops = &msm_dig_dai_ops,
+	},
+};
+
+static struct regmap *msm_digital_get_regmap(struct device *dev)
+{
+	struct msm_dig_priv *msm_dig_cdc = dev_get_drvdata(dev);
+
+	return msm_dig_cdc->regmap;
+}
+
+static int msm_dig_cdc_suspend(struct snd_soc_codec *codec)
+{
+	struct msm_dig_priv *msm_dig_cdc = dev_get_drvdata(codec->dev);
+
+	msm_dig_cdc->dapm_bias_off = 1;
+	return 0;
+}
+
+static int msm_dig_cdc_resume(struct snd_soc_codec *codec)
+{
+	struct msm_dig_priv *msm_dig_cdc = dev_get_drvdata(codec->dev);
+
+	msm_dig_cdc->dapm_bias_off = 0;
+	return 0;
+}
+
+static struct snd_soc_codec_driver soc_msm_dig_codec = {
+	.probe  = msm_dig_cdc_soc_probe,
+	.remove = msm_dig_cdc_soc_remove,
+	.suspend = msm_dig_cdc_suspend,
+	.resume = msm_dig_cdc_resume,
+	.controls = msm_dig_snd_controls,
+	.num_controls = ARRAY_SIZE(msm_dig_snd_controls),
+	.dapm_widgets = msm_dig_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(msm_dig_dapm_widgets),
+	.dapm_routes = audio_dig_map,
+	.num_dapm_routes = ARRAY_SIZE(audio_dig_map),
+	.get_regmap = msm_digital_get_regmap,
+};
+
+const struct regmap_config msm_digital_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.lock = enable_digital_callback,
+	.unlock = disable_digital_callback,
+	.cache_type = REGCACHE_FLAT,
+	.reg_defaults = msm89xx_cdc_core_defaults,
+	.num_reg_defaults = MSM89XX_CDC_CORE_MAX_REGISTER,
+	.readable_reg = msm89xx_cdc_core_readable_reg,
+	.volatile_reg = msm89xx_cdc_core_volatile_reg,
+	.reg_format_endian = REGMAP_ENDIAN_NATIVE,
+	.val_format_endian = REGMAP_ENDIAN_NATIVE,
+	.max_register = MSM89XX_CDC_CORE_MAX_REGISTER,
+};
+
+static int msm_dig_cdc_probe(struct platform_device *pdev)
+{
+	int ret;
+	u32 dig_cdc_addr;
+	struct msm_dig_priv *msm_dig_cdc;
+	struct dig_ctrl_platform_data *pdata;
+
+	msm_dig_cdc = devm_kzalloc(&pdev->dev, sizeof(struct msm_dig_priv),
+			      GFP_KERNEL);
+	if (!msm_dig_cdc)
+		return -ENOMEM;
+	pdata = dev_get_platdata(&pdev->dev);
+	if (!pdata) {
+		dev_err(&pdev->dev, "%s: pdata from parent is NULL\n",
+			__func__);
+		ret = -EINVAL;
+		goto rtn;
+	}
+
+	ret = of_property_read_u32(pdev->dev.of_node, "reg",
+					&dig_cdc_addr);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: could not find %s entry in dt\n",
+			__func__, "reg");
+		return ret;
+	}
+
+	msm_dig_cdc->dig_base = ioremap(dig_cdc_addr,
+					MSM89XX_CDC_CORE_MAX_REGISTER);
+	if (msm_dig_cdc->dig_base == NULL) {
+		dev_err(&pdev->dev, "%s ioremap failed\n", __func__);
+		return -ENOMEM;
+	}
+	msm_dig_cdc->regmap =
+		devm_regmap_init_mmio_clk(&pdev->dev, NULL,
+			msm_dig_cdc->dig_base, &msm_digital_regmap_config);
+
+	msm_dig_cdc->update_clkdiv = pdata->update_clkdiv;
+	msm_dig_cdc->get_cdc_version = pdata->get_cdc_version;
+	msm_dig_cdc->handle = pdata->handle;
+	msm_dig_cdc->register_notifier = pdata->register_notifier;
+
+	dev_set_drvdata(&pdev->dev, msm_dig_cdc);
+	snd_soc_register_codec(&pdev->dev, &soc_msm_dig_codec,
+				msm_codec_dais, ARRAY_SIZE(msm_codec_dais));
+	dev_dbg(&pdev->dev, "%s: registered DIG CODEC 0x%x\n",
+			__func__, dig_cdc_addr);
+rtn:
+	return ret;
+}
+
+static int msm_dig_cdc_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_codec(&pdev->dev);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int msm_dig_suspend(struct device *dev)
+{
+	struct msm_asoc_mach_data *pdata =
+	snd_soc_card_get_drvdata(registered_digcodec->component.card);
+	struct msm_dig_priv *msm_dig_cdc = dev_get_drvdata(dev);
+
+	if (msm_dig_cdc->dapm_bias_off) {
+		pr_debug("%s: mclk cnt = %d, mclk_enabled = %d\n",
+			__func__, atomic_read(&pdata->int_mclk0_rsc_ref),
+			atomic_read(&pdata->int_mclk0_enabled));
+
+		if (atomic_read(&pdata->int_mclk0_enabled) == true) {
+			cancel_delayed_work_sync(
+				&pdata->disable_int_mclk0_work);
+			mutex_lock(&pdata->cdc_int_mclk0_mutex);
+			pdata->digital_cdc_core_clk.enable = 0;
+			afe_set_lpass_clock_v2(AFE_PORT_ID_INT0_MI2S_RX,
+						&pdata->digital_cdc_core_clk);
+			atomic_set(&pdata->int_mclk0_enabled, false);
+			mutex_unlock(&pdata->cdc_int_mclk0_mutex);
+		}
+	}
+
+	return 0;
+}
+
+static int msm_dig_resume(struct device *dev)
+{
+	return 0;
+}
+
+static const struct dev_pm_ops msm_dig_pm_ops = {
+	.suspend = msm_dig_suspend,
+	.resume = msm_dig_resume,
+};
+#endif
+
+static const struct of_device_id msm_dig_cdc_of_match[] = {
+	{.compatible = "qcom,msm-digital-codec"},
+	{},
+};
+
+static struct platform_driver msm_digcodec_driver = {
+	.driver                 = {
+		.owner          = THIS_MODULE,
+		.name           = DRV_NAME,
+		.of_match_table = msm_dig_cdc_of_match,
+#ifdef CONFIG_PM
+	.pm = &msm_dig_pm_ops,
+#endif
+	},
+	.probe                  = msm_dig_cdc_probe,
+	.remove                 = msm_dig_cdc_remove,
+};
+module_platform_driver(msm_digcodec_driver);
+
+MODULE_DESCRIPTION("MSM Audio Digital codec driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.h b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.h
new file mode 100644
index 0000000..f0e7a9c
--- /dev/null
+++ b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.h
@@ -0,0 +1,91 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef MSM_DIGITAL_CDC_H
+#define MSM_DIGITAL_CDC_H
+
+#define HPHL_PA_DISABLE (0x01 << 1)
+#define HPHR_PA_DISABLE (0x01 << 2)
+#define SPKR_PA_DISABLE (0x01 << 3)
+
+#define NUM_DECIMATORS	5
+/* Codec supports 1 compander */
+enum {
+	COMPANDER_NONE = 0,
+	COMPANDER_1, /* HPHL/R */
+	COMPANDER_MAX,
+};
+
+/* Number of output I2S port */
+enum {
+	MSM89XX_RX1 = 0,
+	MSM89XX_RX2,
+	MSM89XX_RX3,
+	MSM89XX_RX_MAX,
+};
+
+struct msm_dig_priv {
+	struct snd_soc_codec *codec;
+	u32 comp_enabled[MSM89XX_RX_MAX];
+	int (*codec_hph_comp_gpio)(bool enable, struct snd_soc_codec *codec);
+	s32 dmic_1_2_clk_cnt;
+	s32 dmic_3_4_clk_cnt;
+	bool dec_active[NUM_DECIMATORS];
+	int version;
+	/* Entry for version info */
+	struct snd_info_entry *entry;
+	struct snd_info_entry *version_entry;
+	char __iomem *dig_base;
+	struct regmap *regmap;
+	struct notifier_block nblock;
+	u32 mute_mask;
+	int dapm_bias_off;
+	void *handle;
+	void (*update_clkdiv)(void *handle, int val);
+	int (*get_cdc_version)(void *handle);
+	int (*register_notifier)(void *handle,
+				 struct notifier_block *nblock,
+				 bool enable);
+};
+
+struct dig_ctrl_platform_data {
+	void *handle;
+	void (*update_clkdiv)(void *handle, int val);
+	int (*get_cdc_version)(void *handle);
+	int (*register_notifier)(void *handle,
+				 struct notifier_block *nblock,
+				 bool enable);
+};
+
+struct hpf_work {
+	struct msm_dig_priv *dig_cdc;
+	u32 decimator;
+	u8 tx_hpf_cut_of_freq;
+	struct delayed_work dwork;
+};
+
+/* Codec supports 5 bands */
+enum {
+	BAND1 = 0,
+	BAND2,
+	BAND3,
+	BAND4,
+	BAND5,
+	BAND_MAX,
+};
+
+extern void msm_dig_cdc_hph_comp_cb(
+		int (*codec_hph_comp_gpio)(
+			bool enable, struct snd_soc_codec *codec),
+		struct snd_soc_codec *codec);
+int msm_dig_codec_info_create_codec_entry(struct snd_info_entry *codec_root,
+					  struct snd_soc_codec *codec);
+#endif
diff --git a/sound/soc/codecs/msm8x16/msm8916-wcd-irq.c b/sound/soc/codecs/sdm660_cdc/sdm660-cdc-irq.c
similarity index 88%
rename from sound/soc/codecs/msm8x16/msm8916-wcd-irq.c
rename to sound/soc/codecs/sdm660_cdc/sdm660-cdc-irq.c
index a722842..ee4ec34 100644
--- a/sound/soc/codecs/msm8x16/msm8916-wcd-irq.c
+++ b/sound/soc/codecs/sdm660_cdc/sdm660-cdc-irq.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,6 +13,7 @@
 #include <linux/bitops.h>
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/of_irq.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
@@ -25,9 +26,9 @@
 #include <linux/pm_qos.h>
 #include <soc/qcom/pm.h>
 #include <sound/soc.h>
-#include "msm8x16-wcd.h"
-#include "msm8916-wcd-irq.h"
-#include "msm8x16_wcd_registers.h"
+#include "msm-analog-cdc.h"
+#include "sdm660-cdc-irq.h"
+#include "sdm660-cdc-registers.h"
 
 #define MAX_NUM_IRQS 14
 #define NUM_IRQ_REGS 2
@@ -83,7 +84,7 @@
 	uint8_t mask[NUM_IRQ_REGS];
 	int linuxirq[MAX_NUM_IRQS];
 	irq_handler_t handler[MAX_NUM_IRQS];
-	struct spmi_device *spmi[NUM_IRQ_REGS];
+	struct platform_device *spmi[NUM_IRQ_REGS];
 	struct snd_soc_codec *codec;
 
 	enum wcd9xxx_spmi_pm_state pm_state;
@@ -99,22 +100,6 @@
 void wcd9xxx_spmi_enable_irq(int irq)
 {
 	pr_debug("%s: irqno =%d\n", __func__, irq);
-	if ((irq >= 0) && (irq <= 7)) {
-		snd_soc_update_bits(map.codec,
-				MSM89XX_PMIC_DIGITAL_INT_EN_CLR,
-				(0x01 << irq), 0x00);
-		snd_soc_update_bits(map.codec,
-				MSM89XX_PMIC_DIGITAL_INT_EN_SET,
-				(0x01 << irq), (0x01 << irq));
-	}
-	if ((irq > 7) && (irq <= 15)) {
-		snd_soc_update_bits(map.codec,
-				MSM89XX_PMIC_ANALOG_INT_EN_CLR,
-				(0x01 << (irq - 8)), 0x00);
-		snd_soc_update_bits(map.codec,
-				MSM89XX_PMIC_ANALOG_INT_EN_SET,
-				(0x01 << (irq - 8)), (0x01 << (irq - 8)));
-	}
 
 	if (!(map.mask[BIT_BYTE(irq)] & (BYTE_BIT_MASK(irq))))
 		return;
@@ -128,23 +113,6 @@
 void wcd9xxx_spmi_disable_irq(int irq)
 {
 	pr_debug("%s: irqno =%d\n", __func__, irq);
-	if ((irq >= 0) && (irq <= 7)) {
-		snd_soc_update_bits(map.codec,
-				MSM89XX_PMIC_DIGITAL_INT_EN_SET,
-				(0x01 << (irq)), 0x00);
-		snd_soc_update_bits(map.codec,
-				MSM89XX_PMIC_DIGITAL_INT_EN_CLR,
-				(0x01 << irq), (0x01 << irq));
-	}
-
-	if ((irq > 7) && (irq <= 15)) {
-		snd_soc_update_bits(map.codec,
-				MSM89XX_PMIC_ANALOG_INT_EN_SET,
-				(0x01 << (irq - 8)), 0x00);
-		snd_soc_update_bits(map.codec,
-				MSM89XX_PMIC_ANALOG_INT_EN_CLR,
-				(0x01 << (irq - 8)), (0x01 << (irq - 8)));
-	}
 
 	if (map.mask[BIT_BYTE(irq)] & (BYTE_BIT_MASK(irq)))
 		return;
@@ -161,6 +129,10 @@
 	int rc;
 	unsigned long irq_flags;
 
+	map.linuxirq[irq] =
+		platform_get_irq_byname(map.spmi[BIT_BYTE(irq)],
+					irq_names[irq]);
+
 	if (strcmp(name, "mbhc sw intr"))
 		irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
 			IRQF_ONESHOT;
@@ -414,7 +386,7 @@
 	map.codec = codec;
 }
 
-void wcd9xxx_spmi_set_dev(struct spmi_device *spmi, int i)
+void wcd9xxx_spmi_set_dev(struct platform_device *spmi, int i)
 {
 	if (i < NUM_IRQ_REGS)
 		map.spmi[i] = spmi;
diff --git a/sound/soc/codecs/msm8x16/msm8916-wcd-irq.h b/sound/soc/codecs/sdm660_cdc/sdm660-cdc-irq.h
similarity index 89%
rename from sound/soc/codecs/msm8x16/msm8916-wcd-irq.h
rename to sound/soc/codecs/sdm660_cdc/sdm660-cdc-irq.h
index 3862865..d0f48d0 100644
--- a/sound/soc/codecs/msm8x16/msm8916-wcd-irq.h
+++ b/sound/soc/codecs/sdm660_cdc/sdm660-cdc-irq.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, 2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -24,7 +24,7 @@
 				const char *name, void *priv);
 extern int wcd9xxx_spmi_free_irq(int irq, void *priv);
 extern void wcd9xxx_spmi_set_codec(struct snd_soc_codec *codec);
-extern void wcd9xxx_spmi_set_dev(struct spmi_device *spmi, int i);
+extern void wcd9xxx_spmi_set_dev(struct platform_device *spmi, int i);
 extern int wcd9xxx_spmi_irq_init(void);
 extern int wcd9xxx_spmi_suspend(pm_message_t pmesg);
 extern int wcd9xxx_spmi_resume(void);
diff --git a/sound/soc/codecs/msm8x16/msm8x16_wcd_registers.h b/sound/soc/codecs/sdm660_cdc/sdm660-cdc-registers.h
similarity index 82%
rename from sound/soc/codecs/msm8x16/msm8x16_wcd_registers.h
rename to sound/soc/codecs/sdm660_cdc/sdm660-cdc-registers.h
index ec26ef39..1317ce1 100644
--- a/sound/soc/codecs/msm8x16/msm8x16_wcd_registers.h
+++ b/sound/soc/codecs/sdm660_cdc/sdm660-cdc-registers.h
@@ -9,8 +9,8 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
-#ifndef MSM8X16_WCD_REGISTERS_H
-#define MSM8X16_WCD_REGISTERS_H
+#ifndef SDM660_WCD_REGISTERS_H
+#define SDM660_WCD_REGISTERS_H
 
 #define CDC_DIG_BASE		0xF000
 #define CDC_ANA_BASE		0xF100
@@ -335,20 +335,20 @@
 		MSM89XX_PMIC_CDC_NUM_REGISTERS
 
 
-#define MSM89XX_CDC_CORE_CLK_RX_RESET_CTL		(0x00)
+#define MSM89XX_CDC_CORE_CLK_RX_RESET_CTL	(0x00)
 #define MSM89XX_CDC_CORE_CLK_RX_RESET_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_CLK_TX_RESET_B1_CTL		(0x04)
-#define MSM89XX_CDC_CORE_CLK_TX_RESET_B1_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_CLK_DMIC_B1_CTL		(0x08)
+#define MSM89XX_CDC_CORE_CLK_TX_RESET_B1_CTL	(0x04)
+#define MSM89XX_CDC_CORE_CLK_TX_RESET_B1_CTL__POR	(0x00)
+#define MSM89XX_CDC_CORE_CLK_DMIC_B1_CTL	(0x08)
 #define MSM89XX_CDC_CORE_CLK_DMIC_B1_CTL__POR		(0x00)
 #define MSM89XX_CDC_CORE_CLK_RX_I2S_CTL		(0x0C)
 #define MSM89XX_CDC_CORE_CLK_RX_I2S_CTL__POR		(0x13)
 #define MSM89XX_CDC_CORE_CLK_TX_I2S_CTL		(0x10)
 #define MSM89XX_CDC_CORE_CLK_TX_I2S_CTL__POR		(0x13)
-#define MSM89XX_CDC_CORE_CLK_OTHR_RESET_B1_CTL		(0x14)
-#define MSM89XX_CDC_CORE_CLK_OTHR_RESET_B1_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL		(0x18)
-#define MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL__POR		(0x00)
+#define MSM89XX_CDC_CORE_CLK_OTHR_RESET_B1_CTL	(0x14)
+#define MSM89XX_CDC_CORE_CLK_OTHR_RESET_B1_CTL__POR	(0x00)
+#define MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL	(0x18)
+#define MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL__POR	(0x00)
 #define MSM89XX_CDC_CORE_CLK_OTHR_CTL		(0x1C)
 #define MSM89XX_CDC_CORE_CLK_OTHR_CTL__POR		(0x04)
 #define MSM89XX_CDC_CORE_CLK_RX_B1_CTL		(0x20)
@@ -359,10 +359,12 @@
 #define MSM89XX_CDC_CORE_CLK_PDM_CTL__POR		(0x00)
 #define MSM89XX_CDC_CORE_CLK_SD_CTL		(0x2C)
 #define MSM89XX_CDC_CORE_CLK_SD_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_CLK_WSA_VI_B1_CTL		(0x30)
-#define MSM89XX_CDC_CORE_CLK_WSA_VI_B1_CTL__POR		(0x00)
+#define MSM89XX_CDC_CORE_CLK_DMIC_B2_CTL	(0x30)
+#define MSM89XX_CDC_CORE_CLK_DMIC_B2_CTL__POR		(0x00)
 #define MSM89XX_CDC_CORE_CLK_RX_B2_CTL		(0x34)
 #define MSM89XX_CDC_CORE_CLK_RX_B2_CTL__POR		(0x00)
+#define MSM89XX_CDC_CORE_CLK_TX2_I2S_CTL	(0x38)
+#define MSM89XX_CDC_CORE_CLK_TX2_I2S_CTL__POR		(0x13)
 #define MSM89XX_CDC_CORE_RX1_B1_CTL		(0x40)
 #define MSM89XX_CDC_CORE_RX1_B1_CTL__POR		(0x00)
 #define MSM89XX_CDC_CORE_RX2_B1_CTL		(0x60)
@@ -399,19 +401,19 @@
 #define MSM89XX_CDC_CORE_RX2_B6_CTL__POR		(0x00)
 #define MSM89XX_CDC_CORE_RX3_B6_CTL		(0x94)
 #define MSM89XX_CDC_CORE_RX3_B6_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_RX1_VOL_CTL_B1_CTL		(0x58)
-#define MSM89XX_CDC_CORE_RX1_VOL_CTL_B1_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_RX2_VOL_CTL_B1_CTL		(0x78)
-#define MSM89XX_CDC_CORE_RX2_VOL_CTL_B1_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_RX3_VOL_CTL_B1_CTL		(0x98)
-#define MSM89XX_CDC_CORE_RX3_VOL_CTL_B1_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_RX1_VOL_CTL_B2_CTL		(0x5C)
-#define MSM89XX_CDC_CORE_RX1_VOL_CTL_B2_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_RX2_VOL_CTL_B2_CTL		(0x7C)
-#define MSM89XX_CDC_CORE_RX2_VOL_CTL_B2_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_RX3_VOL_CTL_B2_CTL		(0x9C)
-#define MSM89XX_CDC_CORE_RX3_VOL_CTL_B2_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_TOP_GAIN_UPDATE		(0xA0)
+#define MSM89XX_CDC_CORE_RX1_VOL_CTL_B1_CTL	(0x58)
+#define MSM89XX_CDC_CORE_RX1_VOL_CTL_B1_CTL__POR	(0x00)
+#define MSM89XX_CDC_CORE_RX2_VOL_CTL_B1_CTL	(0x78)
+#define MSM89XX_CDC_CORE_RX2_VOL_CTL_B1_CTL__POR	(0x00)
+#define MSM89XX_CDC_CORE_RX3_VOL_CTL_B1_CTL	(0x98)
+#define MSM89XX_CDC_CORE_RX3_VOL_CTL_B1_CTL__POR	(0x00)
+#define MSM89XX_CDC_CORE_RX1_VOL_CTL_B2_CTL	(0x5C)
+#define MSM89XX_CDC_CORE_RX1_VOL_CTL_B2_CTL__POR	(0x00)
+#define MSM89XX_CDC_CORE_RX2_VOL_CTL_B2_CTL	(0x7C)
+#define MSM89XX_CDC_CORE_RX2_VOL_CTL_B2_CTL__POR	(0x00)
+#define MSM89XX_CDC_CORE_RX3_VOL_CTL_B2_CTL	(0x9C)
+#define MSM89XX_CDC_CORE_RX3_VOL_CTL_B2_CTL__POR	(0x00)
+#define MSM89XX_CDC_CORE_TOP_GAIN_UPDATE	(0xA0)
 #define MSM89XX_CDC_CORE_TOP_GAIN_UPDATE__POR		(0x00)
 #define MSM89XX_CDC_CORE_TOP_CTL		(0xA4)
 #define MSM89XX_CDC_CORE_TOP_CTL__POR			(0x01)
@@ -427,129 +429,145 @@
 #define MSM89XX_CDC_CORE_COMP0_B5_CTL__POR		(0x7F)
 #define MSM89XX_CDC_CORE_COMP0_B6_CTL		(0xC4)
 #define MSM89XX_CDC_CORE_COMP0_B6_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_COMP0_SHUT_DOWN_STATUS		(0xC8)
-#define MSM89XX_CDC_CORE_COMP0_SHUT_DOWN_STATUS__POR		(0x03)
+#define MSM89XX_CDC_CORE_COMP0_SHUT_DOWN_STATUS	(0xC8)
+#define MSM89XX_CDC_CORE_COMP0_SHUT_DOWN_STATUS__POR	(0x03)
 #define MSM89XX_CDC_CORE_COMP0_FS_CFG		(0xCC)
 #define MSM89XX_CDC_CORE_COMP0_FS_CFG__POR		(0x03)
-#define MSM89XX_CDC_CORE_COMP0_DELAY_BUF_CTL		(0xD0)
-#define MSM89XX_CDC_CORE_COMP0_DELAY_BUF_CTL__POR		(0x02)
-#define MSM89XX_CDC_CORE_DEBUG_DESER1_CTL		(0xE0)
+#define MSM89XX_CDC_CORE_COMP0_DELAY_BUF_CTL	(0xD0)
+#define MSM89XX_CDC_CORE_COMP0_DELAY_BUF_CTL__POR	(0x02)
+#define MSM89XX_CDC_CORE_DEBUG_DESER1_CTL	(0xE0)
 #define MSM89XX_CDC_CORE_DEBUG_DESER1_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_DEBUG_DESER2_CTL		(0xE4)
+#define MSM89XX_CDC_CORE_DEBUG_DESER2_CTL	(0xE4)
 #define MSM89XX_CDC_CORE_DEBUG_DESER2_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_DEBUG_B1_CTL_CFG		(0xE8)
+#define MSM89XX_CDC_CORE_DEBUG_B1_CTL_CFG	(0xE8)
 #define MSM89XX_CDC_CORE_DEBUG_B1_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_DEBUG_B2_CTL_CFG		(0xEC)
+#define MSM89XX_CDC_CORE_DEBUG_B2_CTL_CFG	(0xEC)
 #define MSM89XX_CDC_CORE_DEBUG_B2_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_DEBUG_B3_CTL_CFG		(0xF0)
+#define MSM89XX_CDC_CORE_DEBUG_B3_CTL_CFG	(0xF0)
 #define MSM89XX_CDC_CORE_DEBUG_B3_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_IIR1_GAIN_B1_CTL		(0x100)
+#define MSM89XX_CDC_CORE_IIR1_GAIN_B1_CTL	(0x100)
 #define MSM89XX_CDC_CORE_IIR1_GAIN_B1_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL		(0x140)
+#define MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL	(0x140)
 #define MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_IIR1_GAIN_B2_CTL		(0x104)
+#define MSM89XX_CDC_CORE_IIR1_GAIN_B2_CTL	(0x104)
 #define MSM89XX_CDC_CORE_IIR1_GAIN_B2_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_IIR2_GAIN_B2_CTL		(0x144)
+#define MSM89XX_CDC_CORE_IIR2_GAIN_B2_CTL	(0x144)
 #define MSM89XX_CDC_CORE_IIR2_GAIN_B2_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_IIR1_GAIN_B3_CTL		(0x108)
+#define MSM89XX_CDC_CORE_IIR1_GAIN_B3_CTL	(0x108)
 #define MSM89XX_CDC_CORE_IIR1_GAIN_B3_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_IIR2_GAIN_B3_CTL		(0x148)
+#define MSM89XX_CDC_CORE_IIR2_GAIN_B3_CTL	(0x148)
 #define MSM89XX_CDC_CORE_IIR2_GAIN_B3_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_IIR1_GAIN_B4_CTL		(0x10C)
+#define MSM89XX_CDC_CORE_IIR1_GAIN_B4_CTL	(0x10C)
 #define MSM89XX_CDC_CORE_IIR1_GAIN_B4_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_IIR2_GAIN_B4_CTL		(0x14C)
+#define MSM89XX_CDC_CORE_IIR2_GAIN_B4_CTL	(0x14C)
 #define MSM89XX_CDC_CORE_IIR2_GAIN_B4_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_IIR1_GAIN_B5_CTL		(0x110)
+#define MSM89XX_CDC_CORE_IIR1_GAIN_B5_CTL	(0x110)
 #define MSM89XX_CDC_CORE_IIR1_GAIN_B5_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_IIR2_GAIN_B5_CTL		(0x150)
+#define MSM89XX_CDC_CORE_IIR2_GAIN_B5_CTL	(0x150)
 #define MSM89XX_CDC_CORE_IIR2_GAIN_B5_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_IIR1_GAIN_B6_CTL		(0x114)
+#define MSM89XX_CDC_CORE_IIR1_GAIN_B6_CTL	(0x114)
 #define MSM89XX_CDC_CORE_IIR1_GAIN_B6_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_IIR2_GAIN_B6_CTL		(0x154)
+#define MSM89XX_CDC_CORE_IIR2_GAIN_B6_CTL	(0x154)
 #define MSM89XX_CDC_CORE_IIR2_GAIN_B6_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_IIR1_GAIN_B7_CTL		(0x118)
+#define MSM89XX_CDC_CORE_IIR1_GAIN_B7_CTL	(0x118)
 #define MSM89XX_CDC_CORE_IIR1_GAIN_B7_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_IIR2_GAIN_B7_CTL		(0x158)
+#define MSM89XX_CDC_CORE_IIR2_GAIN_B7_CTL	(0x158)
 #define MSM89XX_CDC_CORE_IIR2_GAIN_B7_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_IIR1_GAIN_B8_CTL		(0x11C)
+#define MSM89XX_CDC_CORE_IIR1_GAIN_B8_CTL	(0x11C)
 #define MSM89XX_CDC_CORE_IIR1_GAIN_B8_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_IIR2_GAIN_B8_CTL		(0x15C)
+#define MSM89XX_CDC_CORE_IIR2_GAIN_B8_CTL	(0x15C)
 #define MSM89XX_CDC_CORE_IIR2_GAIN_B8_CTL__POR		(0x00)
 #define MSM89XX_CDC_CORE_IIR1_CTL		(0x120)
-#define MSM89XX_CDC_CORE_IIR1_CTL__POR		(0x40)
+#define MSM89XX_CDC_CORE_IIR1_CTL__POR			(0x40)
 #define MSM89XX_CDC_CORE_IIR2_CTL		(0x160)
-#define MSM89XX_CDC_CORE_IIR2_CTL__POR		(0x40)
-#define MSM89XX_CDC_CORE_IIR1_GAIN_TIMER_CTL		(0x124)
-#define MSM89XX_CDC_CORE_IIR1_GAIN_TIMER_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_IIR2_GAIN_TIMER_CTL		(0x164)
-#define MSM89XX_CDC_CORE_IIR2_GAIN_TIMER_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL		(0x128)
+#define MSM89XX_CDC_CORE_IIR2_CTL__POR			(0x40)
+#define MSM89XX_CDC_CORE_IIR1_GAIN_TIMER_CTL	(0x124)
+#define MSM89XX_CDC_CORE_IIR1_GAIN_TIMER_CTL__POR	(0x00)
+#define MSM89XX_CDC_CORE_IIR2_GAIN_TIMER_CTL	(0x164)
+#define MSM89XX_CDC_CORE_IIR2_GAIN_TIMER_CTL__POR	(0x00)
+#define MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL	(0x128)
 #define MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_IIR2_COEF_B1_CTL		(0x168)
+#define MSM89XX_CDC_CORE_IIR2_COEF_B1_CTL	(0x168)
 #define MSM89XX_CDC_CORE_IIR2_COEF_B1_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL		(0x12C)
+#define MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL	(0x12C)
 #define MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_IIR2_COEF_B2_CTL		(0x16C)
+#define MSM89XX_CDC_CORE_IIR2_COEF_B2_CTL	(0x16C)
 #define MSM89XX_CDC_CORE_IIR2_COEF_B2_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_CONN_RX1_B1_CTL		(0x180)
+#define MSM89XX_CDC_CORE_CONN_RX1_B1_CTL	(0x180)
 #define MSM89XX_CDC_CORE_CONN_RX1_B1_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_CONN_RX1_B2_CTL		(0x184)
+#define MSM89XX_CDC_CORE_CONN_RX1_B2_CTL	(0x184)
 #define MSM89XX_CDC_CORE_CONN_RX1_B2_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_CONN_RX1_B3_CTL		(0x188)
+#define MSM89XX_CDC_CORE_CONN_RX1_B3_CTL	(0x188)
 #define MSM89XX_CDC_CORE_CONN_RX1_B3_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_CONN_RX2_B1_CTL		(0x18C)
+#define MSM89XX_CDC_CORE_CONN_RX2_B1_CTL	(0x18C)
 #define MSM89XX_CDC_CORE_CONN_RX2_B1_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_CONN_RX2_B2_CTL		(0x190)
+#define MSM89XX_CDC_CORE_CONN_RX2_B2_CTL	(0x190)
 #define MSM89XX_CDC_CORE_CONN_RX2_B2_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_CONN_RX2_B3_CTL		(0x194)
+#define MSM89XX_CDC_CORE_CONN_RX2_B3_CTL	(0x194)
 #define MSM89XX_CDC_CORE_CONN_RX2_B3_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_CONN_RX3_B1_CTL		(0x198)
+#define MSM89XX_CDC_CORE_CONN_RX3_B1_CTL	(0x198)
 #define MSM89XX_CDC_CORE_CONN_RX3_B1_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_CONN_RX3_B2_CTL		(0x19C)
+#define MSM89XX_CDC_CORE_CONN_RX3_B2_CTL	(0x19C)
 #define MSM89XX_CDC_CORE_CONN_RX3_B2_CTL__POR		(0x00)
 #define MSM89XX_CDC_CORE_CONN_TX_B1_CTL		(0x1A0)
 #define MSM89XX_CDC_CORE_CONN_TX_B1_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_CONN_EQ1_B1_CTL		(0x1A8)
+#define MSM89XX_CDC_CORE_CONN_TX_B2_CTL		(0x1A4)
+#define MSM89XX_CDC_CORE_CONN_TX_B2_CTL__POR		(0x00)
+#define MSM89XX_CDC_CORE_CONN_EQ1_B1_CTL	(0x1A8)
 #define MSM89XX_CDC_CORE_CONN_EQ1_B1_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_CONN_EQ1_B2_CTL		(0x1AC)
+#define MSM89XX_CDC_CORE_CONN_EQ1_B2_CTL	(0x1AC)
 #define MSM89XX_CDC_CORE_CONN_EQ1_B2_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_CONN_EQ1_B3_CTL		(0x1B0)
+#define MSM89XX_CDC_CORE_CONN_EQ1_B3_CTL	(0x1B0)
 #define MSM89XX_CDC_CORE_CONN_EQ1_B3_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_CONN_EQ1_B4_CTL		(0x1B4)
+#define MSM89XX_CDC_CORE_CONN_EQ1_B4_CTL	(0x1B4)
 #define MSM89XX_CDC_CORE_CONN_EQ1_B4_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_CONN_EQ2_B1_CTL		(0x1B8)
+#define MSM89XX_CDC_CORE_CONN_EQ2_B1_CTL	(0x1B8)
 #define MSM89XX_CDC_CORE_CONN_EQ2_B1_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_CONN_EQ2_B2_CTL		(0x1BC)
+#define MSM89XX_CDC_CORE_CONN_EQ2_B2_CTL	(0x1BC)
 #define MSM89XX_CDC_CORE_CONN_EQ2_B2_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_CONN_EQ2_B3_CTL		(0x1C0)
+#define MSM89XX_CDC_CORE_CONN_EQ2_B3_CTL	(0x1C0)
 #define MSM89XX_CDC_CORE_CONN_EQ2_B3_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_CONN_EQ2_B4_CTL		(0x1C4)
+#define MSM89XX_CDC_CORE_CONN_EQ2_B4_CTL	(0x1C4)
 #define MSM89XX_CDC_CORE_CONN_EQ2_B4_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_CONN_TX_I2S_SD1_CTL		(0x1C8)
-#define MSM89XX_CDC_CORE_CONN_TX_I2S_SD1_CTL__POR		(0x00)
-#define MSM89XX_CDC_CORE_TX1_VOL_CTL_TIMER		(0x280)
+#define MSM89XX_CDC_CORE_CONN_TX_I2S_SD1_CTL	(0x1C8)
+#define MSM89XX_CDC_CORE_CONN_TX_I2S_SD1_CTL__POR	(0x00)
+#define MSM89XX_CDC_CORE_CONN_TX_B3_CTL		(0x1CC)
+#define MSM89XX_CDC_CORE_CONN_TX_B3_CTL__POR		(0x00)
+#define MSM89XX_CDC_CORE_TX5_VOL_CTL_TIMER	(0x1E0)
+#define MSM89XX_CDC_CORE_TX5_VOL_CTL_TIMER__POR		(0x00)
+#define MSM89XX_CDC_CORE_TX5_VOL_CTL_GAIN	(0x1E4)
+#define MSM89XX_CDC_CORE_TX5_VOL_CTL_GAIN__POR		(0x00)
+#define MSM89XX_CDC_CORE_TX5_VOL_CTL_CFG	(0x1E8)
+#define MSM89XX_CDC_CORE_TX5_VOL_CTL_CFG__POR		(0x00)
+#define MSM89XX_CDC_CORE_TX5_MUX_CTL		(0x1EC)
+#define MSM89XX_CDC_CORE_TX5_MUX_CTL__POR		(0x00)
+#define MSM89XX_CDC_CORE_TX5_CLK_FS_CTL		(0x1F0)
+#define MSM89XX_CDC_CORE_TX5_CLK_FS_CTL__POR		(0x03)
+#define MSM89XX_CDC_CORE_TX5_DMIC_CTL		(0x1F4)
+#define MSM89XX_CDC_CORE_TX5_DMIC_CTL__POR		(0x00)
+#define MSM89XX_CDC_CORE_TX1_VOL_CTL_TIMER	(0x280)
 #define MSM89XX_CDC_CORE_TX1_VOL_CTL_TIMER__POR		(0x00)
-#define MSM89XX_CDC_CORE_TX2_VOL_CTL_TIMER		(0x2A0)
+#define MSM89XX_CDC_CORE_TX2_VOL_CTL_TIMER	(0x2A0)
 #define MSM89XX_CDC_CORE_TX2_VOL_CTL_TIMER__POR		(0x00)
-#define MSM89XX_CDC_CORE_TX3_VOL_CTL_TIMER		(0x2C0)
+#define MSM89XX_CDC_CORE_TX3_VOL_CTL_TIMER	(0x2C0)
 #define MSM89XX_CDC_CORE_TX3_VOL_CTL_TIMER__POR		(0x00)
-#define MSM89XX_CDC_CORE_TX4_VOL_CTL_TIMER		(0x2E0)
+#define MSM89XX_CDC_CORE_TX4_VOL_CTL_TIMER	(0x2E0)
 #define MSM89XX_CDC_CORE_TX4_VOL_CTL_TIMER__POR		(0x00)
-#define MSM89XX_CDC_CORE_TX1_VOL_CTL_GAIN		(0x284)
+#define MSM89XX_CDC_CORE_TX1_VOL_CTL_GAIN	(0x284)
 #define MSM89XX_CDC_CORE_TX1_VOL_CTL_GAIN__POR		(0x00)
-#define MSM89XX_CDC_CORE_TX2_VOL_CTL_GAIN		(0x2A4)
+#define MSM89XX_CDC_CORE_TX2_VOL_CTL_GAIN	(0x2A4)
 #define MSM89XX_CDC_CORE_TX2_VOL_CTL_GAIN__POR		(0x00)
-#define MSM89XX_CDC_CORE_TX3_VOL_CTL_GAIN		(0x2C4)
+#define MSM89XX_CDC_CORE_TX3_VOL_CTL_GAIN	(0x2C4)
 #define MSM89XX_CDC_CORE_TX3_VOL_CTL_GAIN__POR		(0x00)
-#define MSM89XX_CDC_CORE_TX4_VOL_CTL_GAIN		(0x2E4)
+#define MSM89XX_CDC_CORE_TX4_VOL_CTL_GAIN	(0x2E4)
 #define MSM89XX_CDC_CORE_TX4_VOL_CTL_GAIN__POR		(0x00)
-#define MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG		(0x288)
+#define MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG	(0x288)
 #define MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG__POR		(0x00)
-#define MSM89XX_CDC_CORE_TX2_VOL_CTL_CFG		(0x2A8)
+#define MSM89XX_CDC_CORE_TX2_VOL_CTL_CFG	(0x2A8)
 #define MSM89XX_CDC_CORE_TX2_VOL_CTL_CFG__POR		(0x00)
-#define MSM89XX_CDC_CORE_TX3_VOL_CTL_CFG		(0x2C8)
+#define MSM89XX_CDC_CORE_TX3_VOL_CTL_CFG	(0x2C8)
 #define MSM89XX_CDC_CORE_TX3_VOL_CTL_CFG__POR		(0x00)
-#define MSM89XX_CDC_CORE_TX4_VOL_CTL_CFG		(0x2E8)
+#define MSM89XX_CDC_CORE_TX4_VOL_CTL_CFG	(0x2E8)
 #define MSM89XX_CDC_CORE_TX4_VOL_CTL_CFG__POR		(0x00)
 #define MSM89XX_CDC_CORE_TX1_MUX_CTL		(0x28C)
 #define MSM89XX_CDC_CORE_TX1_MUX_CTL__POR		(0x00)
diff --git a/sound/soc/codecs/msm8x16/msm89xx-regmap.c b/sound/soc/codecs/sdm660_cdc/sdm660-regmap.c
similarity index 67%
rename from sound/soc/codecs/msm8x16/msm89xx-regmap.c
rename to sound/soc/codecs/sdm660_cdc/sdm660-regmap.c
index 007b74c..fff1fdc 100644
--- a/sound/soc/codecs/msm8x16/msm89xx-regmap.c
+++ b/sound/soc/codecs/sdm660_cdc/sdm660-regmap.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -12,8 +12,7 @@
  */
 
 #include <linux/regmap.h>
-#include <linux/device.h>
-#include "msm8x16-wcd.h"
+#include "sdm660-cdc-registers.h"
 
 /*
  * Default register reset values that are common across different versions
@@ -21,7 +20,7 @@
  * then remove it from this structure and add it in version specific
  * structures.
  */
-static struct reg_default
+struct reg_default
 	msm89xx_cdc_core_defaults[MSM89XX_CDC_CORE_CACHE_SIZE] = {
 	{MSM89XX_CDC_CORE_CLK_RX_RESET_CTL, 0x00},
 	{MSM89XX_CDC_CORE_CLK_TX_RESET_B1_CTL, 0x00},
@@ -35,8 +34,9 @@
 	{MSM89XX_CDC_CORE_CLK_MCLK_CTL, 0x00},
 	{MSM89XX_CDC_CORE_CLK_PDM_CTL, 0x00},
 	{MSM89XX_CDC_CORE_CLK_SD_CTL, 0x00},
-	{MSM89XX_CDC_CORE_CLK_WSA_VI_B1_CTL, 0x00},
+	{MSM89XX_CDC_CORE_CLK_DMIC_B2_CTL, 0x00},
 	{MSM89XX_CDC_CORE_CLK_RX_B2_CTL, 0x00},
+	{MSM89XX_CDC_CORE_CLK_TX2_I2S_CTL, 0x13},
 	{MSM89XX_CDC_CORE_RX1_B1_CTL, 0x00},
 	{MSM89XX_CDC_CORE_RX2_B1_CTL, 0x00},
 	{MSM89XX_CDC_CORE_RX3_B1_CTL, 0x00},
@@ -78,8 +78,9 @@
 	{MSM89XX_CDC_CORE_DEBUG_B2_CTL_CFG, 0x00},
 	{MSM89XX_CDC_CORE_DEBUG_B3_CTL_CFG, 0x00},
 	{MSM89XX_CDC_CORE_IIR1_GAIN_B1_CTL, 0x00},
-	{MSM89XX_CDC_CORE_IIR2_GAIN_B2_CTL, 0x00},
+	{MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL, 0x00},
 	{MSM89XX_CDC_CORE_IIR1_GAIN_B2_CTL, 0x00},
+	{MSM89XX_CDC_CORE_IIR2_GAIN_B2_CTL, 0x00},
 	{MSM89XX_CDC_CORE_IIR1_GAIN_B3_CTL, 0x00},
 	{MSM89XX_CDC_CORE_IIR2_GAIN_B3_CTL, 0x00},
 	{MSM89XX_CDC_CORE_IIR1_GAIN_B4_CTL, 0x00},
@@ -92,7 +93,6 @@
 	{MSM89XX_CDC_CORE_IIR2_GAIN_B7_CTL, 0x00},
 	{MSM89XX_CDC_CORE_IIR1_GAIN_B8_CTL, 0x00},
 	{MSM89XX_CDC_CORE_IIR2_GAIN_B8_CTL, 0x00},
-	{MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL, 0x00},
 	{MSM89XX_CDC_CORE_IIR1_CTL, 0x40},
 	{MSM89XX_CDC_CORE_IIR2_CTL, 0x40},
 	{MSM89XX_CDC_CORE_IIR1_GAIN_TIMER_CTL, 0x00},
@@ -110,6 +110,7 @@
 	{MSM89XX_CDC_CORE_CONN_RX3_B1_CTL, 0x00},
 	{MSM89XX_CDC_CORE_CONN_RX3_B2_CTL, 0x00},
 	{MSM89XX_CDC_CORE_CONN_TX_B1_CTL, 0x00},
+	{MSM89XX_CDC_CORE_CONN_TX_B2_CTL, 0x00},
 	{MSM89XX_CDC_CORE_CONN_EQ1_B1_CTL, 0x00},
 	{MSM89XX_CDC_CORE_CONN_EQ1_B2_CTL, 0x00},
 	{MSM89XX_CDC_CORE_CONN_EQ1_B3_CTL, 0x00},
@@ -119,6 +120,13 @@
 	{MSM89XX_CDC_CORE_CONN_EQ2_B3_CTL, 0x00},
 	{MSM89XX_CDC_CORE_CONN_EQ2_B4_CTL, 0x00},
 	{MSM89XX_CDC_CORE_CONN_TX_I2S_SD1_CTL, 0x00},
+	{MSM89XX_CDC_CORE_CONN_TX_B3_CTL, 0x00},
+	{MSM89XX_CDC_CORE_TX5_VOL_CTL_TIMER, 0x00},
+	{MSM89XX_CDC_CORE_TX5_VOL_CTL_GAIN, 0x00},
+	{MSM89XX_CDC_CORE_TX5_VOL_CTL_CFG, 0x00},
+	{MSM89XX_CDC_CORE_TX5_MUX_CTL, 0x00},
+	{MSM89XX_CDC_CORE_TX5_CLK_FS_CTL, 0x03},
+	{MSM89XX_CDC_CORE_TX5_DMIC_CTL, 0x00},
 	{MSM89XX_CDC_CORE_TX1_VOL_CTL_TIMER, 0x00},
 	{MSM89XX_CDC_CORE_TX2_VOL_CTL_TIMER, 0x00},
 	{MSM89XX_CDC_CORE_TX3_VOL_CTL_TIMER, 0x00},
@@ -145,7 +153,7 @@
 	{MSM89XX_CDC_CORE_TX4_DMIC_CTL, 0x00},
 };
 
-static struct reg_default
+struct reg_default
 	msm89xx_pmic_cdc_defaults[MSM89XX_PMIC_CDC_CACHE_SIZE] = {
 	{MSM89XX_PMIC_DIGITAL_REVISION1, 0x00},
 	{MSM89XX_PMIC_DIGITAL_REVISION2, 0x00},
@@ -304,114 +312,148 @@
 	{MSM89XX_PMIC_ANALOG_TRIM_CTRL4, 0x00},
 };
 
-static bool msm89xx_cdc_core_readable_reg(struct device *dev, unsigned int reg)
+static const u8 msm89xx_cdc_core_reg_readable[MSM89XX_CDC_CORE_CACHE_SIZE] = {
+		[MSM89XX_CDC_CORE_CLK_RX_RESET_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_TX_RESET_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_DMIC_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_RX_I2S_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_TX_I2S_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_OTHR_RESET_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_OTHR_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_RX_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_MCLK_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_PDM_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_SD_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_DMIC_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_RX_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_TX2_I2S_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX1_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX2_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX3_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX1_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX2_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX3_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX1_B3_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX2_B3_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX3_B3_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX1_B4_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX2_B4_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX3_B4_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX1_B5_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX2_B5_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX3_B5_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX1_B6_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX2_B6_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX3_B6_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX1_VOL_CTL_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX2_VOL_CTL_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX3_VOL_CTL_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX1_VOL_CTL_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX2_VOL_CTL_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX3_VOL_CTL_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_TOP_GAIN_UPDATE] = 1,
+		[MSM89XX_CDC_CORE_TOP_CTL] = 1,
+		[MSM89XX_CDC_CORE_COMP0_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_COMP0_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_COMP0_B3_CTL] = 1,
+		[MSM89XX_CDC_CORE_COMP0_B4_CTL] = 1,
+		[MSM89XX_CDC_CORE_COMP0_B5_CTL] = 1,
+		[MSM89XX_CDC_CORE_COMP0_B6_CTL] = 1,
+		[MSM89XX_CDC_CORE_COMP0_SHUT_DOWN_STATUS] = 1,
+		[MSM89XX_CDC_CORE_COMP0_FS_CFG] = 1,
+		[MSM89XX_CDC_CORE_COMP0_DELAY_BUF_CTL] = 1,
+		[MSM89XX_CDC_CORE_DEBUG_DESER1_CTL] = 1,
+		[MSM89XX_CDC_CORE_DEBUG_DESER2_CTL] = 1,
+		[MSM89XX_CDC_CORE_DEBUG_B1_CTL_CFG] = 1,
+		[MSM89XX_CDC_CORE_DEBUG_B2_CTL_CFG] = 1,
+		[MSM89XX_CDC_CORE_DEBUG_B3_CTL_CFG] = 1,
+		[MSM89XX_CDC_CORE_IIR1_GAIN_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR1_GAIN_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_GAIN_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR1_GAIN_B3_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_GAIN_B3_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR1_GAIN_B4_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_GAIN_B4_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR1_GAIN_B5_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_GAIN_B5_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR1_GAIN_B6_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_GAIN_B6_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR1_GAIN_B7_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_GAIN_B7_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR1_GAIN_B8_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_GAIN_B8_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR1_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR1_GAIN_TIMER_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_GAIN_TIMER_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_COEF_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_COEF_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_RX1_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_RX1_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_RX1_B3_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_RX2_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_RX2_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_RX2_B3_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_RX3_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_RX3_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_TX_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_TX_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_EQ1_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_EQ1_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_EQ1_B3_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_EQ1_B4_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_EQ2_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_EQ2_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_EQ2_B3_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_EQ2_B4_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_TX_I2S_SD1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_TX_B3_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX1_VOL_CTL_TIMER] = 1,
+		[MSM89XX_CDC_CORE_TX2_VOL_CTL_TIMER] = 1,
+		[MSM89XX_CDC_CORE_TX3_VOL_CTL_TIMER] = 1,
+		[MSM89XX_CDC_CORE_TX4_VOL_CTL_TIMER] = 1,
+		[MSM89XX_CDC_CORE_TX1_VOL_CTL_GAIN] = 1,
+		[MSM89XX_CDC_CORE_TX2_VOL_CTL_GAIN] = 1,
+		[MSM89XX_CDC_CORE_TX3_VOL_CTL_GAIN] = 1,
+		[MSM89XX_CDC_CORE_TX4_VOL_CTL_GAIN] = 1,
+		[MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG] = 1,
+		[MSM89XX_CDC_CORE_TX2_VOL_CTL_CFG] = 1,
+		[MSM89XX_CDC_CORE_TX3_VOL_CTL_CFG] = 1,
+		[MSM89XX_CDC_CORE_TX4_VOL_CTL_CFG] = 1,
+		[MSM89XX_CDC_CORE_TX1_MUX_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX2_MUX_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX3_MUX_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX4_MUX_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX1_CLK_FS_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX2_CLK_FS_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX3_CLK_FS_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX4_CLK_FS_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX5_VOL_CTL_TIMER] = 1,
+		[MSM89XX_CDC_CORE_TX5_VOL_CTL_GAIN] = 1,
+		[MSM89XX_CDC_CORE_TX5_VOL_CTL_CFG] = 1,
+		[MSM89XX_CDC_CORE_TX5_MUX_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX5_CLK_FS_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX5_DMIC_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX1_DMIC_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX2_DMIC_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX3_DMIC_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX4_DMIC_CTL] = 1,
+};
+
+bool msm89xx_cdc_core_readable_reg(struct device *dev, unsigned int reg)
 {
 	return msm89xx_cdc_core_reg_readable[reg];
 }
 
-static bool msm89xx_pmic_cdc_readable_reg(struct device *dev, unsigned int reg)
-{
-	return msm89xx_pmic_cdc_reg_readable[reg];
-}
-
-static bool msm89xx_cdc_core_volatile_reg(struct device *dev, unsigned int reg)
+bool msm89xx_cdc_core_volatile_reg(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case MSM89XX_CDC_CORE_RX1_B1_CTL:
-	case MSM89XX_CDC_CORE_RX2_B1_CTL:
-	case MSM89XX_CDC_CORE_RX3_B1_CTL:
-	case MSM89XX_CDC_CORE_RX1_B6_CTL:
-	case MSM89XX_CDC_CORE_RX2_B6_CTL:
-	case MSM89XX_CDC_CORE_RX3_B6_CTL:
-	case MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG:
-	case MSM89XX_CDC_CORE_TX2_VOL_CTL_CFG:
-	case MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL:
-	case MSM89XX_CDC_CORE_IIR2_COEF_B1_CTL:
-	case MSM89XX_CDC_CORE_CLK_MCLK_CTL:
-	case MSM89XX_CDC_CORE_CLK_PDM_CTL:
-	case MSM89XX_PMIC_ANALOG_BYPASS_MODE:
-	case MSM89XX_PMIC_ANALOG_BOOST_EN_CTL:
-	case MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL:
-	case MSM89XX_PMIC_ANALOG_CURRENT_LIMIT:
-	case MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL:
-	case MSM89XX_PMIC_ANALOG_NCP_FBCTRL:
-	case MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1:
-		return true;
+	/* cache bypass for initial version */
 	default:
-		return false;
+		return true;
 	}
 }
-
-static bool msm89xx_pmic_cdc_volatile_reg(struct device *dev, unsigned int reg)
-{
-	switch (reg) {
-	case MSM89XX_PMIC_DIGITAL_REVISION1:
-	case MSM89XX_PMIC_DIGITAL_REVISION2:
-	case MSM89XX_PMIC_DIGITAL_PERPH_TYPE:
-	case MSM89XX_PMIC_DIGITAL_PERPH_SUBTYPE:
-	case MSM89XX_PMIC_DIGITAL_INT_RT_STS:
-	case MSM89XX_PMIC_DIGITAL_INT_SET_TYPE:
-	case MSM89XX_PMIC_DIGITAL_INT_POLARITY_HIGH:
-	case MSM89XX_PMIC_DIGITAL_INT_POLARITY_LOW:
-	case MSM89XX_PMIC_DIGITAL_INT_LATCHED_STS:
-	case MSM89XX_PMIC_DIGITAL_INT_PENDING_STS:
-	case MSM89XX_PMIC_DIGITAL_PIN_STATUS:
-	case MSM89XX_PMIC_DIGITAL_SEC_ACCESS:
-	case MSM89XX_PMIC_ANALOG_SEC_ACCESS:
-	case MSM89XX_PMIC_ANALOG_REVISION1:
-	case MSM89XX_PMIC_ANALOG_REVISION2:
-	case MSM89XX_PMIC_ANALOG_REVISION3:
-	case MSM89XX_PMIC_ANALOG_REVISION4:
-	case MSM89XX_PMIC_ANALOG_PERPH_TYPE:
-	case MSM89XX_PMIC_ANALOG_PERPH_SUBTYPE:
-	case MSM89XX_PMIC_ANALOG_INT_RT_STS:
-	case MSM89XX_PMIC_ANALOG_INT_SET_TYPE:
-	case MSM89XX_PMIC_ANALOG_INT_POLARITY_HIGH:
-	case MSM89XX_PMIC_ANALOG_INT_POLARITY_LOW:
-	case MSM89XX_PMIC_ANALOG_INT_LATCHED_STS:
-	case MSM89XX_PMIC_ANALOG_INT_PENDING_STS:
-	case MSM89XX_PMIC_ANALOG_MBHC_BTN_RESULT:
-	case MSM89XX_PMIC_ANALOG_MBHC_ZDET_ELECT_RESULT:
-	case MSM89XX_PMIC_ANALOG_RX_HPH_STATUS:
-	case MSM89XX_PMIC_ANALOG_RX_EAR_STATUS:
-	case MSM89XX_PMIC_ANALOG_SPKR_SAR_STATUS:
-	case MSM89XX_PMIC_ANALOG_SPKR_DRV_STATUS:
-		return true;
-	default:
-		return false;
-	}
-}
-
-struct regmap_config msm89xx_pmic_cdc_regmap_config = {
-	.reg_bits	= 16,
-	.val_bits	= 8,
-	.max_register	= MSM89XX_PMIC_CDC_CACHE_SIZE,
-	.fast_io	= true,
-	.reg_defaults = msm89xx_pmic_cdc_defaults,
-	.num_reg_defaults = ARRAY_SIZE(msm89xx_pmic_cdc_defaults),
-	.readable_reg = msm89xx_pmic_cdc_readable_reg,
-	.volatile_reg = msm89xx_pmic_cdc_volatile_reg,
-	.cache_type = REGCACHE_RBTREE,
-	.reg_format_endian = REGMAP_ENDIAN_NATIVE,
-	.val_format_endian = REGMAP_ENDIAN_NATIVE,
-	.can_multi_write = true,
-	.lock = enable_digital_callback,
-	.unlock = disable_digital_callback,
-
-};
-
-struct regmap_config msm89xx_cdc_core_regmap_config = {
-	.reg_bits = 32,
-	.reg_stride = 4,
-	.val_bits = 32,
-
-	.max_register = MSM89XX_CDC_CORE_CACHE_SIZE,
-	.reg_defaults = msm89xx_cdc_core_defaults,
-	.num_reg_defaults = ARRAY_SIZE(msm89xx_cdc_core_defaults),
-	.readable_reg = msm89xx_cdc_core_readable_reg,
-	.volatile_reg = msm89xx_cdc_core_volatile_reg,
-	.cache_type = REGCACHE_RBTREE,
-	.reg_format_endian = REGMAP_ENDIAN_NATIVE,
-	.val_format_endian = REGMAP_ENDIAN_NATIVE,
-	.can_multi_write = true,
-};
diff --git a/sound/soc/codecs/wcd-dsp-mgr.c b/sound/soc/codecs/wcd-dsp-mgr.c
index 5c27f10..ae53294 100644
--- a/sound/soc/codecs/wcd-dsp-mgr.c
+++ b/sound/soc/codecs/wcd-dsp-mgr.c
@@ -881,12 +881,50 @@
 
 static int wdsp_suspend(struct device *wdsp_dev)
 {
-	return 0;
+	struct wdsp_mgr_priv *wdsp;
+	int rc = 0, i;
+
+	if (!wdsp_dev) {
+		pr_err("%s: Invalid handle to device\n", __func__);
+		return -EINVAL;
+	}
+
+	wdsp = dev_get_drvdata(wdsp_dev);
+
+	for (i =  WDSP_CMPNT_TYPE_MAX - 1; i >= 0; i--) {
+		rc = wdsp_unicast_event(wdsp, i, WDSP_EVENT_SUSPEND, NULL);
+		if (rc < 0) {
+			WDSP_ERR(wdsp, "component %s failed to suspend\n",
+				WDSP_GET_CMPNT_TYPE_STR(i));
+			break;
+		}
+	}
+
+	return rc;
 }
 
 static int wdsp_resume(struct device *wdsp_dev)
 {
-	return 0;
+	struct wdsp_mgr_priv *wdsp;
+	int rc = 0, i;
+
+	if (!wdsp_dev) {
+		pr_err("%s: Invalid handle to device\n", __func__);
+		return -EINVAL;
+	}
+
+	wdsp = dev_get_drvdata(wdsp_dev);
+
+	for (i =  0; i < WDSP_CMPNT_TYPE_MAX; i++) {
+		rc = wdsp_unicast_event(wdsp, i, WDSP_EVENT_RESUME, NULL);
+		if (rc < 0) {
+			WDSP_ERR(wdsp, "component %s failed to resume\n",
+				WDSP_GET_CMPNT_TYPE_STR(i));
+			break;
+		}
+	}
+
+	return rc;
 }
 
 static struct wdsp_mgr_ops wdsp_ops = {
diff --git a/sound/soc/codecs/wcd-mbhc-adc.c b/sound/soc/codecs/wcd-mbhc-adc.c
new file mode 100644
index 0000000..2c7d667
--- /dev/null
+++ b/sound/soc/codecs/wcd-mbhc-adc.c
@@ -0,0 +1,907 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/kernel.h>
+#include <linux/input.h>
+#include <linux/firmware.h>
+#include <linux/completion.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include "wcd-mbhc-adc.h"
+#include "wcd-mbhc-v2.h"
+
+#define WCD_MBHC_ADC_HS_THRESHOLD_MV    1700
+#define WCD_MBHC_ADC_HPH_THRESHOLD_MV   75
+#define WCD_MBHC_ADC_MICBIAS_MV         1800
+
+static int wcd_mbhc_get_micbias(struct wcd_mbhc *mbhc)
+{
+	int micbias = 0;
+	u8 vout_ctl = 0;
+
+	/* Read MBHC Micbias (Mic Bias2) voltage */
+	WCD_MBHC_REG_READ(WCD_MBHC_MICB2_VOUT, vout_ctl);
+
+	/* Formula for getting micbias from vout
+	 * micbias = 1.0V + VOUT_CTL * 50mV
+	 */
+	micbias = 1000 + (vout_ctl * 50);
+	pr_debug("%s: vout_ctl: %d, micbias: %d\n",
+		 __func__, vout_ctl, micbias);
+
+	return micbias;
+}
+
+static int wcd_get_voltage_from_adc(u8 val, int micbias)
+{
+	/* Formula for calculating voltage from ADC
+	 * Voltage = ADC_RESULT*12.5mV*V_MICBIAS/1.8
+	 */
+	return ((val * 125 * micbias)/(WCD_MBHC_ADC_MICBIAS_MV * 10));
+}
+
+static int wcd_measure_adc_continuous(struct wcd_mbhc *mbhc)
+{
+	u8 adc_result = 0;
+	int output_mv = 0;
+	int retry = 3;
+	u8 adc_en = 0;
+
+	pr_debug("%s: enter\n", __func__);
+
+	/* Pre-requisites for ADC continuous measurement */
+	/* Read legacy electircal detection and disable */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0x00);
+	/* Set ADC to continuous measurement */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_MODE, 1);
+	/* Read ADC Enable bit to restore after adc measurement */
+	WCD_MBHC_REG_READ(WCD_MBHC_ADC_EN, adc_en);
+	/* Disable ADC_ENABLE bit */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, 0);
+	/* Disable MBHC FSM */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+	/* Set the MUX selection to IN2P */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MUX_CTL, MUX_CTL_IN2P);
+	/* Enable MBHC FSM */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+	/* Enable ADC_ENABLE bit */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, 1);
+
+	while (retry--) {
+		/* wait for 3 msec before reading ADC result */
+		usleep_range(3000, 3100);
+
+		/* Read ADC result */
+		WCD_MBHC_REG_READ(WCD_MBHC_ADC_RESULT, adc_result);
+	}
+
+	/* Restore ADC Enable */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, adc_en);
+	/* Get voltage from ADC result */
+	output_mv = wcd_get_voltage_from_adc(adc_result,
+					     wcd_mbhc_get_micbias(mbhc));
+	pr_debug("%s: adc_result: 0x%x, output_mv: %d\n",
+		 __func__, adc_result, output_mv);
+
+	return output_mv;
+}
+
+static int wcd_measure_adc_once(struct wcd_mbhc *mbhc, int mux_ctl)
+{
+	u8 adc_timeout = 0;
+	u8 adc_complete = 0;
+	u8 adc_result = 0;
+	int retry = 6;
+	int ret = 0;
+	int output_mv = 0;
+	u8 adc_en = 0;
+
+	pr_debug("%s: enter\n", __func__);
+
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_MODE, 0);
+	/* Read ADC Enable bit to restore after adc measurement */
+	WCD_MBHC_REG_READ(WCD_MBHC_ADC_EN, adc_en);
+	/* Trigger ADC one time measurement */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, 0);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+	/* Set the appropriate MUX selection */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MUX_CTL, mux_ctl);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, 1);
+
+	while (retry--) {
+		/* wait for 600usec to get adc results */
+		usleep_range(600, 610);
+
+		/* check for ADC Timeout */
+		WCD_MBHC_REG_READ(WCD_MBHC_ADC_TIMEOUT, adc_timeout);
+		if (adc_timeout)
+			continue;
+
+		/* Read ADC complete bit */
+		WCD_MBHC_REG_READ(WCD_MBHC_ADC_COMPLETE, adc_complete);
+		if (!adc_complete)
+			continue;
+
+		/* Read ADC result */
+		WCD_MBHC_REG_READ(WCD_MBHC_ADC_RESULT, adc_result);
+
+		pr_debug("%s: ADC result: 0x%x\n", __func__, adc_result);
+		/* Get voltage from ADC result */
+		output_mv = wcd_get_voltage_from_adc(adc_result,
+						wcd_mbhc_get_micbias(mbhc));
+		break;
+	}
+
+	/* Restore ADC Enable */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, adc_en);
+
+	if (retry <= 0) {
+		pr_err("%s: adc complete: %d, adc timeout: %d\n",
+			__func__, adc_complete, adc_timeout);
+		ret = -EINVAL;
+	} else {
+		pr_debug("%s: adc complete: %d, adc timeout: %d output_mV: %d\n",
+			__func__, adc_complete, adc_timeout, output_mv);
+		ret = output_mv;
+	}
+
+	pr_debug("%s: leave\n", __func__);
+
+	return ret;
+}
+
+static bool wcd_mbhc_adc_detect_anc_plug_type(struct wcd_mbhc *mbhc)
+{
+	bool anc_mic_found = false;
+	u16 fsm_en = 0;
+	u8 det = 0;
+	unsigned long retry = 0;
+	int valid_plug_cnt = 0, invalid_plug_cnt = 0;
+	int ret = 0;
+	u8 elect_ctl = 0;
+	u8 adc_mode = 0;
+	u8 vref = 0;
+	int vref_mv[] = {1650, 1500, 1600, 1700};
+
+	if (mbhc->mbhc_cfg->anc_micbias < MIC_BIAS_1 ||
+	    mbhc->mbhc_cfg->anc_micbias > MIC_BIAS_4)
+		return false;
+
+	if (!mbhc->mbhc_cb->mbhc_micbias_control)
+		return false;
+
+	/* Disable Detection done for ADC operation */
+	WCD_MBHC_REG_READ(WCD_MBHC_DETECTION_DONE, det);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_DETECTION_DONE, 0);
+
+	/* Mask ADC COMPLETE interrupt */
+	wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS, false);
+
+	WCD_MBHC_REG_READ(WCD_MBHC_FSM_EN, fsm_en);
+	mbhc->mbhc_cb->mbhc_micbias_control(mbhc->codec,
+					    mbhc->mbhc_cfg->anc_micbias,
+					    MICB_ENABLE);
+
+	/* Read legacy electircal detection and disable */
+	WCD_MBHC_REG_READ(WCD_MBHC_ELECT_SCHMT_ISRC, elect_ctl);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0x00);
+
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ANC_DET_EN, 1);
+	WCD_MBHC_REG_READ(WCD_MBHC_ADC_MODE, adc_mode);
+
+	/*
+	 * wait for button debounce time 20ms. If 4-pole plug is inserted
+	 * into 5-pole jack, then there will be a button press interrupt
+	 * during anc plug detection. In that case though Hs_comp_res is 0,
+	 * it should not be declared as ANC plug type
+	 */
+	usleep_range(20000, 20100);
+
+	/*
+	 * After enabling FSM, to handle slow insertion scenarios,
+	 * check IN3 voltage is below the Vref
+	 */
+	WCD_MBHC_REG_READ(WCD_MBHC_HS_VREF, vref);
+
+	do {
+		if (wcd_swch_level_remove(mbhc)) {
+			pr_debug("%s: Switch level is low\n", __func__);
+			goto done;
+		}
+		pr_debug("%s: Retry attempt %lu\n", __func__, retry + 1);
+		ret = wcd_measure_adc_once(mbhc, MUX_CTL_IN3P);
+		/* TODO - check the logic */
+		if (ret && (ret < vref_mv[vref]))
+			valid_plug_cnt++;
+		else
+			invalid_plug_cnt++;
+		retry++;
+	} while (retry < ANC_DETECT_RETRY_CNT);
+
+	pr_debug("%s: valid: %d, invalid: %d\n", __func__, valid_plug_cnt,
+		 invalid_plug_cnt);
+
+	/* decision logic */
+	if (valid_plug_cnt > invalid_plug_cnt)
+		anc_mic_found = true;
+done:
+	/* Restore ADC mode */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_MODE, adc_mode);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ANC_DET_EN, 0);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+	/* Set the MUX selection to AUTO */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MUX_CTL, MUX_CTL_AUTO);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, fsm_en);
+	/* Restore detection done */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_DETECTION_DONE, det);
+
+	/* Restore electrical detection */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, elect_ctl);
+
+	mbhc->mbhc_cb->mbhc_micbias_control(mbhc->codec,
+					    mbhc->mbhc_cfg->anc_micbias,
+					    MICB_DISABLE);
+	pr_debug("%s: anc mic %sfound\n", __func__,
+		 anc_mic_found ? "" : "not ");
+
+	return anc_mic_found;
+}
+
+/* To determine if cross connection occurred */
+static int wcd_check_cross_conn(struct wcd_mbhc *mbhc)
+{
+	enum wcd_mbhc_plug_type plug_type = MBHC_PLUG_TYPE_NONE;
+	int hphl_adc_res = 0, hphr_adc_res = 0;
+	u8 fsm_en = 0;
+	int ret = 0;
+	u8 adc_mode = 0;
+	u8 elect_ctl = 0;
+	u8 adc_en = 0;
+
+	pr_debug("%s: enter\n", __func__);
+	/* Check for button press and plug detection */
+	if (wcd_swch_level_remove(mbhc)) {
+		pr_debug("%s: Switch level is low\n", __func__);
+		return -EINVAL;
+	}
+
+	/* If PA is enabled, dont check for cross-connection */
+	if (mbhc->mbhc_cb->hph_pa_on_status)
+		if (mbhc->mbhc_cb->hph_pa_on_status(mbhc->codec))
+			return -EINVAL;
+
+	/* Read legacy electircal detection and disable */
+	WCD_MBHC_REG_READ(WCD_MBHC_ELECT_SCHMT_ISRC, elect_ctl);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0x00);
+
+	/* Read and set ADC to single measurement */
+	WCD_MBHC_REG_READ(WCD_MBHC_ADC_MODE, adc_mode);
+	/* Read ADC Enable bit to restore after adc measurement */
+	WCD_MBHC_REG_READ(WCD_MBHC_ADC_EN, adc_en);
+	/* Read FSM status */
+	WCD_MBHC_REG_READ(WCD_MBHC_FSM_EN, fsm_en);
+
+	/* Get adc result for HPH L */
+	hphl_adc_res = wcd_measure_adc_once(mbhc, MUX_CTL_HPH_L);
+	if (hphl_adc_res < 0) {
+		pr_err("%s: hphl_adc_res adc measurement failed\n", __func__);
+		ret = hphl_adc_res;
+		goto done;
+	}
+
+	/* Get adc result for HPH R in mV */
+	hphr_adc_res = wcd_measure_adc_once(mbhc, MUX_CTL_HPH_R);
+	if (hphr_adc_res < 0) {
+		pr_err("%s: hphr_adc_res adc measurement failed\n", __func__);
+		ret = hphr_adc_res;
+		goto done;
+	}
+
+	if (hphl_adc_res > 100 && hphr_adc_res > 100) {
+		plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
+		pr_debug("%s: Cross connection identified\n", __func__);
+	} else {
+		pr_debug("%s: No Cross connection found\n", __func__);
+	}
+
+done:
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+	/* Set the MUX selection to Auto */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MUX_CTL, MUX_CTL_AUTO);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+
+	/* Restore ADC Enable */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, adc_en);
+
+	/* Restore ADC mode */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_MODE, adc_mode);
+
+	/* Restore FSM state */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, fsm_en);
+
+	/* Restore electrical detection */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, elect_ctl);
+
+	pr_debug("%s: leave, plug type: %d\n", __func__,  plug_type);
+
+	return (plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP) ? true : false;
+}
+
+static bool wcd_mbhc_adc_check_for_spl_headset(struct wcd_mbhc *mbhc,
+					   int *spl_hs_cnt)
+{
+	bool spl_hs = false;
+	int output_mv = 0;
+	int adc_threshold = 0;
+
+	pr_debug("%s: enter\n", __func__);
+	if (!mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic)
+		goto exit;
+
+	/* Bump up MB2 to 2.7V */
+	mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(mbhc->codec,
+				mbhc->mbhc_cfg->mbhc_micbias, true);
+	usleep_range(10000, 10100);
+
+	/*
+	 * Use ADC single mode to minimize the chance of missing out
+	 * btn press/relesae for HEADSET type during correct work.
+	 */
+	output_mv = wcd_measure_adc_once(mbhc, MUX_CTL_IN2P);
+	adc_threshold = ((WCD_MBHC_ADC_HS_THRESHOLD_MV *
+			  wcd_mbhc_get_micbias(mbhc))/WCD_MBHC_ADC_MICBIAS_MV);
+
+	if (output_mv > adc_threshold) {
+		spl_hs = false;
+	} else {
+		spl_hs = true;
+		if (spl_hs_cnt)
+			*spl_hs_cnt += 1;
+	}
+
+	/* MB2 back to 1.8v if the type is not special headset */
+	if (!spl_hs) {
+		mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(mbhc->codec,
+				mbhc->mbhc_cfg->mbhc_micbias, false);
+		/* Add 10ms delay for micbias to settle */
+		usleep_range(10000, 10100);
+	} else {
+		pr_debug("%s: Detected special HS (%d)\n", __func__, spl_hs);
+	}
+
+exit:
+	pr_debug("%s: leave\n", __func__);
+	return spl_hs;
+}
+
+static bool wcd_is_special_headset(struct wcd_mbhc *mbhc)
+{
+	int delay = 0;
+	bool ret = false;
+	bool is_spl_hs = false;
+	int spl_hs_count = 0;
+
+	while (!is_spl_hs) {
+		delay += 50;
+		if (mbhc->hs_detect_work_stop) {
+			pr_debug("%s: stop requested: %d\n", __func__,
+					mbhc->hs_detect_work_stop);
+			break;
+		}
+		/* Wait for 50ms for FSM to update result */
+		msleep(50);
+		is_spl_hs = wcd_mbhc_adc_check_for_spl_headset(mbhc,
+							       &spl_hs_count);
+		if (is_spl_hs)
+			pr_debug("%s: Spl headset detected in %d msecs\n",
+					__func__, delay);
+		if (delay == SPECIAL_HS_DETECT_TIME_MS) {
+			pr_debug("%s: Spl headset not found in 2 sec\n",
+				 __func__);
+			break;
+		}
+	}
+	pr_debug("%s: leave, micb_enable: %d\n", __func__,
+		  mbhc->micbias_enable);
+
+	return ret;
+}
+
+static void wcd_mbhc_adc_update_fsm_source(struct wcd_mbhc *mbhc,
+				       enum wcd_mbhc_plug_type plug_type)
+{
+	bool micbias2;
+
+	micbias2 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
+							MIC_BIAS_2);
+	switch (plug_type) {
+	case MBHC_PLUG_TYPE_HEADPHONE:
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 3);
+		break;
+	case MBHC_PLUG_TYPE_HEADSET:
+	case MBHC_PLUG_TYPE_ANC_HEADPHONE:
+		if (!mbhc->is_hs_recording && !micbias2)
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 3);
+		break;
+	default:
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 0);
+		break;
+
+	};
+}
+
+/* should be called under interrupt context that hold suspend */
+static void wcd_schedule_hs_detect_plug(struct wcd_mbhc *mbhc,
+					    struct work_struct *work)
+{
+	pr_debug("%s: scheduling correct_swch_plug\n", __func__);
+	WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
+	mbhc->hs_detect_work_stop = false;
+	mbhc->mbhc_cb->lock_sleep(mbhc, true);
+	schedule_work(work);
+}
+
+/* called under codec_resource_lock acquisition */
+static void wcd_cancel_hs_detect_plug(struct wcd_mbhc *mbhc,
+					 struct work_struct *work)
+{
+	pr_debug("%s: Canceling correct_plug_swch\n", __func__);
+	mbhc->hs_detect_work_stop = true;
+	WCD_MBHC_RSC_UNLOCK(mbhc);
+	if (cancel_work_sync(work)) {
+		pr_debug("%s: correct_plug_swch is canceled\n",
+			 __func__);
+		mbhc->mbhc_cb->lock_sleep(mbhc, false);
+	}
+	WCD_MBHC_RSC_LOCK(mbhc);
+}
+
+/* called under codec_resource_lock acquisition */
+static void wcd_mbhc_adc_detect_plug_type(struct wcd_mbhc *mbhc)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	pr_debug("%s: enter\n", __func__);
+	WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
+
+	if (mbhc->mbhc_cb->hph_pull_down_ctrl)
+		mbhc->mbhc_cb->hph_pull_down_ctrl(codec, false);
+
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_DETECTION_DONE, 0);
+
+	if (mbhc->mbhc_cb->mbhc_micbias_control) {
+		mbhc->mbhc_cb->mbhc_micbias_control(codec, MIC_BIAS_2,
+						    MICB_ENABLE);
+	} else {
+		pr_err("%s: Mic Bias is not enabled\n", __func__);
+		return;
+	}
+
+	/* Re-initialize button press completion object */
+	reinit_completion(&mbhc->btn_press_compl);
+	wcd_schedule_hs_detect_plug(mbhc, &mbhc->correct_plug_swch);
+	pr_debug("%s: leave\n", __func__);
+}
+
+static void wcd_micbias_disable(struct wcd_mbhc *mbhc)
+{
+	if (mbhc->micbias_enable) {
+		mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
+			mbhc->codec, MIC_BIAS_2, false);
+		if (mbhc->mbhc_cb->set_micbias_value)
+			mbhc->mbhc_cb->set_micbias_value(
+					mbhc->codec);
+		mbhc->micbias_enable = false;
+	}
+}
+
+static int wcd_mbhc_get_plug_from_adc(int adc_result)
+
+{
+	enum wcd_mbhc_plug_type plug_type = MBHC_PLUG_TYPE_INVALID;
+
+	if (adc_result < WCD_MBHC_ADC_HPH_THRESHOLD_MV)
+		plug_type = MBHC_PLUG_TYPE_HEADPHONE;
+	else if (adc_result > WCD_MBHC_ADC_HS_THRESHOLD_MV)
+		plug_type = MBHC_PLUG_TYPE_HIGH_HPH;
+	else
+		plug_type = MBHC_PLUG_TYPE_HEADSET;
+	pr_debug("%s: plug type is %d found\n", __func__, plug_type);
+
+	return plug_type;
+}
+
+static int wcd_mbhc_get_plug_type(struct wcd_mbhc *mbhc)
+{
+	int result_mv = 0;
+
+	/*
+	 * Use ADC single mode to minimize the chance of missing out
+	 * btn press/release for HEADSET type during correct work.
+	 */
+	result_mv = wcd_measure_adc_once(mbhc, MUX_CTL_IN2P);
+
+	return wcd_mbhc_get_plug_from_adc(result_mv);
+}
+
+static void wcd_correct_swch_plug(struct work_struct *work)
+{
+	struct wcd_mbhc *mbhc;
+	struct snd_soc_codec *codec;
+	enum wcd_mbhc_plug_type plug_type = MBHC_PLUG_TYPE_INVALID;
+	unsigned long timeout;
+	bool wrk_complete = false;
+	int gnd_mic_swap_cnt = 0;
+	bool is_pa_on = false, spl_hs = false;
+	int ret = 0;
+	int spl_hs_count = 0;
+	int output_mv = 0;
+	int cross_conn;
+	int try = 0;
+
+	pr_debug("%s: enter\n", __func__);
+
+	mbhc = container_of(work, struct wcd_mbhc, correct_plug_swch);
+	codec = mbhc->codec;
+
+	WCD_MBHC_RSC_LOCK(mbhc);
+	/* Mask ADC COMPLETE interrupt */
+	wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS, false);
+	WCD_MBHC_RSC_UNLOCK(mbhc);
+
+	/* Check for cross connection */
+	do {
+		cross_conn = wcd_check_cross_conn(mbhc);
+		try++;
+	} while (try < GND_MIC_SWAP_THRESHOLD);
+
+	if (cross_conn > 0) {
+		plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
+		pr_debug("%s: cross connection found, Plug type %d\n",
+			 __func__, plug_type);
+		goto correct_plug_type;
+	}
+	/* Find plug type */
+	output_mv = wcd_measure_adc_continuous(mbhc);
+	plug_type = wcd_mbhc_get_plug_from_adc(output_mv);
+
+	/*
+	 * Report plug type if it is either headset or headphone
+	 * else start the 3 sec loop
+	 */
+	if ((plug_type == MBHC_PLUG_TYPE_HEADSET ||
+	     plug_type == MBHC_PLUG_TYPE_HEADPHONE) &&
+	    (!wcd_swch_level_remove(mbhc))) {
+		WCD_MBHC_RSC_LOCK(mbhc);
+		wcd_mbhc_find_plug_and_report(mbhc, plug_type);
+		WCD_MBHC_RSC_UNLOCK(mbhc);
+	}
+
+	/*
+	 * Set DETECTION_DONE bit for HEADSET and ANC_HEADPHONE,
+	 * so that btn press/release interrupt can be generated.
+	 */
+	if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADSET ||
+		mbhc->current_plug == MBHC_PLUG_TYPE_ANC_HEADPHONE) {
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_MODE, 0);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, 0);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_DETECTION_DONE, 1);
+	}
+
+correct_plug_type:
+	timeout = jiffies + msecs_to_jiffies(HS_DETECT_PLUG_TIME_MS);
+	while (!time_after(jiffies, timeout)) {
+		if (mbhc->hs_detect_work_stop) {
+			pr_debug("%s: stop requested: %d\n", __func__,
+					mbhc->hs_detect_work_stop);
+			wcd_micbias_disable(mbhc);
+			goto exit;
+		}
+
+		/* allow sometime and re-check stop requested again */
+		msleep(20);
+		if (mbhc->hs_detect_work_stop) {
+			pr_debug("%s: stop requested: %d\n", __func__,
+					mbhc->hs_detect_work_stop);
+			wcd_micbias_disable(mbhc);
+			goto exit;
+		}
+
+		msleep(180);
+		/*
+		 * Use ADC single mode to minimize the chance of missing out
+		 * btn press/release for HEADSET type during correct work.
+		 */
+		output_mv = wcd_measure_adc_once(mbhc, MUX_CTL_IN2P);
+
+		/*
+		 * instead of hogging system by contineous polling, wait for
+		 * sometime and re-check stop request again.
+		 */
+		plug_type = wcd_mbhc_get_plug_from_adc(output_mv);
+
+		if ((output_mv > WCD_MBHC_ADC_HS_THRESHOLD_MV) &&
+		    (spl_hs_count < WCD_MBHC_SPL_HS_CNT)) {
+			spl_hs = wcd_mbhc_adc_check_for_spl_headset(mbhc,
+								&spl_hs_count);
+
+			if (spl_hs_count == WCD_MBHC_SPL_HS_CNT)
+				mbhc->micbias_enable = true;
+		}
+
+		if (mbhc->mbhc_cb->hph_pa_on_status)
+			is_pa_on = mbhc->mbhc_cb->hph_pa_on_status(mbhc->codec);
+
+		if ((output_mv <= WCD_MBHC_ADC_HS_THRESHOLD_MV) &&
+		    (!is_pa_on)) {
+			/* Check for cross connection*/
+			ret = wcd_check_cross_conn(mbhc);
+			if (ret < 0)
+				continue;
+			if (ret > 0) {
+				/* Found cross connection, swap mic/gnd */
+				if (gnd_mic_swap_cnt > GND_MIC_SWAP_THRESHOLD) {
+					/*
+					 * This is due to GND/MIC switch didn't
+					 * work,  Report unsupported plug.
+					 */
+					pr_debug("%s: switch did not work\n",
+						 __func__);
+					plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
+					goto report;
+				}
+				gnd_mic_swap_cnt++;
+				if (mbhc->mbhc_cfg->swap_gnd_mic &&
+					mbhc->mbhc_cfg->swap_gnd_mic(codec)) {
+					pr_debug("%s: US_EU gpio present,flip switch\n"
+						, __func__);
+					continue;
+				}
+			} else {
+				gnd_mic_swap_cnt++;
+				plug_type = wcd_mbhc_get_plug_type(mbhc);
+				if ((gnd_mic_swap_cnt <=
+				    GND_MIC_SWAP_THRESHOLD) &&
+				    (spl_hs_count != WCD_MBHC_SPL_HS_CNT)) {
+					continue;
+				} else {
+					gnd_mic_swap_cnt = 0;
+				}
+			}
+		}
+
+		if (!spl_hs && (plug_type == MBHC_PLUG_TYPE_HIGH_HPH)) {
+			pr_debug("%s: cable is extension cable\n", __func__);
+			wrk_complete = true;
+		} else {
+			if (plug_type != MBHC_PLUG_TYPE_GND_MIC_SWAP) {
+				if (!spl_hs)
+					plug_type =
+						wcd_mbhc_get_plug_type(mbhc);
+				else
+					plug_type = MBHC_PLUG_TYPE_HEADSET;
+				/*
+				 * Report headset only if not already reported
+				 * and if there is not button press without
+				 * release
+				 */
+				if ((mbhc->current_plug !=
+				      MBHC_PLUG_TYPE_HEADSET) &&
+				     (mbhc->current_plug !=
+				     MBHC_PLUG_TYPE_ANC_HEADPHONE)) {
+					if (plug_type == MBHC_PLUG_TYPE_HEADSET)
+						pr_debug("%s: cable is %s headset\n",
+							__func__,
+							((spl_hs) ?
+							 "special ":""));
+					goto report;
+				}
+			}
+			wrk_complete = false;
+		}
+	}
+	if (!wrk_complete) {
+		/*
+		 * If plug_tye is headset, we might have already reported either
+		 * in detect_plug-type or in above while loop, no need to report
+		 * again
+		 */
+		if ((plug_type == MBHC_PLUG_TYPE_HEADSET) ||
+		    (plug_type == MBHC_PLUG_TYPE_ANC_HEADPHONE)) {
+			pr_debug("%s: plug_type:0x%x already reported\n",
+				 __func__, mbhc->current_plug);
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_MODE, 0);
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, 0);
+			goto enable_supply;
+		}
+	}
+	if (plug_type == MBHC_PLUG_TYPE_HIGH_HPH) {
+		if (wcd_is_special_headset(mbhc)) {
+			pr_debug("%s: Special headset found %d\n",
+					__func__, plug_type);
+			plug_type = MBHC_PLUG_TYPE_HEADSET;
+		} else {
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_ISRC_EN, 1);
+		}
+	}
+
+report:
+	if (wcd_swch_level_remove(mbhc)) {
+		pr_debug("%s: Switch level is low\n", __func__);
+		goto exit;
+	}
+
+	pr_debug("%s: Valid plug found, plug type %d wrk_cmpt %d btn_intr %d\n",
+			__func__, plug_type, wrk_complete,
+			mbhc->btn_press_intr);
+
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_MODE, 0);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, 0);
+
+	WCD_MBHC_RSC_LOCK(mbhc);
+	wcd_mbhc_find_plug_and_report(mbhc, plug_type);
+	WCD_MBHC_RSC_UNLOCK(mbhc);
+enable_supply:
+	/*
+	 * Set DETECTION_DONE bit for HEADSET and ANC_HEADPHONE,
+	 * so that btn press/release interrupt can be generated.
+	 * For other plug type, clear the bit.
+	 */
+	if (plug_type == MBHC_PLUG_TYPE_HEADSET ||
+	    plug_type == MBHC_PLUG_TYPE_ANC_HEADPHONE)
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_DETECTION_DONE, 1);
+	else
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_DETECTION_DONE, 0);
+
+	if (mbhc->mbhc_cb->mbhc_micbias_control)
+		wcd_mbhc_adc_update_fsm_source(mbhc, plug_type);
+exit:
+	if (mbhc->mbhc_cb->mbhc_micbias_control &&
+	    !mbhc->micbias_enable)
+		mbhc->mbhc_cb->mbhc_micbias_control(codec, MIC_BIAS_2,
+						    MICB_DISABLE);
+	if (mbhc->mbhc_cfg->detect_extn_cable &&
+	    ((plug_type == MBHC_PLUG_TYPE_HEADPHONE) ||
+	     (plug_type == MBHC_PLUG_TYPE_HEADSET)) &&
+	    !mbhc->hs_detect_work_stop) {
+		WCD_MBHC_RSC_LOCK(mbhc);
+		wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM, true);
+		WCD_MBHC_RSC_UNLOCK(mbhc);
+	}
+
+	/*
+	 * Enable ADC COMPLETE interrupt for HEADPHONE.
+	 * Btn release may happen after the correct work, ADC COMPLETE
+	 * interrupt needs to be captured to correct plug type.
+	 */
+	if (plug_type == MBHC_PLUG_TYPE_HEADPHONE) {
+		WCD_MBHC_RSC_LOCK(mbhc);
+		wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS,
+				     true);
+		WCD_MBHC_RSC_UNLOCK(mbhc);
+	}
+
+	if (mbhc->mbhc_cb->hph_pull_down_ctrl)
+		mbhc->mbhc_cb->hph_pull_down_ctrl(codec, true);
+
+	mbhc->mbhc_cb->lock_sleep(mbhc, false);
+	pr_debug("%s: leave\n", __func__);
+}
+
+static irqreturn_t wcd_mbhc_adc_hs_rem_irq(int irq, void *data)
+{
+	struct wcd_mbhc *mbhc = data;
+
+	pr_debug("%s: enter\n", __func__);
+	WCD_MBHC_RSC_LOCK(mbhc);
+	/*
+	 * ADC COMPLETE and ELEC_REM interrupts are both enabled for HEADPHONE,
+	 * need to reject the ADC COMPLETE interrupt which follows ELEC_REM one
+	 * when HEADPHONE is removed.
+	 */
+	if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADPHONE)
+		mbhc->extn_cable_hph_rem = true;
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_DETECTION_DONE, 0);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_MODE, 0);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, 0);
+	wcd_mbhc_elec_hs_report_unplug(mbhc);
+	WCD_MBHC_RSC_UNLOCK(mbhc);
+	pr_debug("%s: leave\n", __func__);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t wcd_mbhc_adc_hs_ins_irq(int irq, void *data)
+{
+	struct wcd_mbhc *mbhc = data;
+
+	pr_debug("%s: enter\n", __func__);
+
+	/*
+	 * ADC COMPLETE and ELEC_REM interrupts are both enabled for HEADPHONE,
+	 * need to reject the ADC COMPLETE interrupt which follows ELEC_REM one
+	 * when HEADPHONE is removed.
+	 */
+	if (mbhc->extn_cable_hph_rem == true) {
+		mbhc->extn_cable_hph_rem = false;
+		pr_debug("%s: leave\n", __func__);
+		return IRQ_HANDLED;
+	}
+
+	WCD_MBHC_RSC_LOCK(mbhc);
+	/*
+	 * If current plug is headphone then there is no chance to
+	 * get ADC complete interrupt, so connected cable should be
+	 * headset not headphone.
+	 */
+	if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADPHONE) {
+		wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS, false);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_DETECTION_DONE, 1);
+		wcd_mbhc_find_plug_and_report(mbhc, MBHC_PLUG_TYPE_HEADSET);
+		WCD_MBHC_RSC_UNLOCK(mbhc);
+		return IRQ_HANDLED;
+	}
+
+	if (!mbhc->mbhc_cfg->detect_extn_cable) {
+		pr_debug("%s: Returning as Extension cable feature not enabled\n",
+			__func__);
+		WCD_MBHC_RSC_UNLOCK(mbhc);
+		return IRQ_HANDLED;
+	}
+
+	pr_debug("%s: Disable electrical headset insertion interrupt\n",
+		 __func__);
+	wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS, false);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_ISRC_EN, 0);
+	mbhc->is_extn_cable = true;
+	mbhc->btn_press_intr = false;
+	wcd_mbhc_adc_detect_plug_type(mbhc);
+	WCD_MBHC_RSC_UNLOCK(mbhc);
+	pr_debug("%s: leave\n", __func__);
+	return IRQ_HANDLED;
+}
+
+static struct wcd_mbhc_fn mbhc_fn = {
+	.wcd_mbhc_hs_ins_irq = wcd_mbhc_adc_hs_ins_irq,
+	.wcd_mbhc_hs_rem_irq = wcd_mbhc_adc_hs_rem_irq,
+	.wcd_mbhc_detect_plug_type = wcd_mbhc_adc_detect_plug_type,
+	.wcd_mbhc_detect_anc_plug_type = wcd_mbhc_adc_detect_anc_plug_type,
+	.wcd_cancel_hs_detect_plug = wcd_cancel_hs_detect_plug,
+};
+
+/* Function: wcd_mbhc_adc_init
+ * @mbhc: MBHC function pointer
+ * Description: Initialize MBHC ADC related function pointers to MBHC structure
+ */
+void wcd_mbhc_adc_init(struct wcd_mbhc *mbhc)
+{
+	if (!mbhc) {
+		pr_err("%s: mbhc is NULL\n", __func__);
+		return;
+	}
+	mbhc->mbhc_fn = &mbhc_fn;
+	INIT_WORK(&mbhc->correct_plug_swch, wcd_correct_swch_plug);
+}
+EXPORT_SYMBOL(wcd_mbhc_adc_init);
diff --git a/sound/soc/codecs/wcd-mbhc-adc.h b/sound/soc/codecs/wcd-mbhc-adc.h
new file mode 100644
index 0000000..112d508
--- /dev/null
+++ b/sound/soc/codecs/wcd-mbhc-adc.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __WCD_MBHC_ADC_H__
+#define __WCD_MBHC_ADC_H__
+
+#include "wcd-mbhc-v2.h"
+
+enum wcd_mbhc_adc_mux_ctl {
+	MUX_CTL_AUTO = 0,
+	MUX_CTL_IN2P,
+	MUX_CTL_IN3P,
+	MUX_CTL_IN4P,
+	MUX_CTL_HPH_L,
+	MUX_CTL_HPH_R,
+	MUX_CTL_NONE,
+};
+
+#ifdef CONFIG_SND_SOC_WCD_MBHC_ADC
+void wcd_mbhc_adc_init(struct wcd_mbhc *mbhc);
+#else
+static inline void wcd_mbhc_adc_init(struct wcd_mbhc *mbhc)
+{
+
+}
+#endif
+#endif /* __WCD_MBHC_ADC_H__ */
diff --git a/sound/soc/codecs/wcd-mbhc-legacy.c b/sound/soc/codecs/wcd-mbhc-legacy.c
new file mode 100644
index 0000000..ffba7f6
--- /dev/null
+++ b/sound/soc/codecs/wcd-mbhc-legacy.c
@@ -0,0 +1,941 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/kernel.h>
+#include <linux/input.h>
+#include <linux/firmware.h>
+#include <linux/completion.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include "wcd-mbhc-legacy.h"
+#include "wcd-mbhc-v2.h"
+
+static int det_extn_cable_en;
+module_param(det_extn_cable_en, int, 0664);
+MODULE_PARM_DESC(det_extn_cable_en, "enable/disable extn cable detect");
+
+static bool wcd_mbhc_detect_anc_plug_type(struct wcd_mbhc *mbhc)
+{
+	bool anc_mic_found = false;
+	u16 val, hs_comp_res, btn_status = 0;
+	unsigned long retry = 0;
+	int valid_plug_cnt = 0, invalid_plug_cnt = 0;
+	int btn_status_cnt = 0;
+	bool is_check_btn_press = false;
+
+
+	if (mbhc->mbhc_cfg->anc_micbias < MIC_BIAS_1 ||
+	    mbhc->mbhc_cfg->anc_micbias > MIC_BIAS_4)
+		return false;
+
+	if (!mbhc->mbhc_cb->mbhc_micbias_control)
+		return false;
+
+	WCD_MBHC_REG_READ(WCD_MBHC_FSM_EN, val);
+
+	if (val)
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+
+	mbhc->mbhc_cb->mbhc_micbias_control(mbhc->codec,
+					    mbhc->mbhc_cfg->anc_micbias,
+					    MICB_ENABLE);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MUX_CTL, 0x2);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ANC_DET_EN, 1);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+	/*
+	 * wait for button debounce time 20ms. If 4-pole plug is inserted
+	 * into 5-pole jack, then there will be a button press interrupt
+	 * during anc plug detection. In that case though Hs_comp_res is 0,
+	 * it should not be declared as ANC plug type
+	 */
+	usleep_range(20000, 20100);
+
+	/*
+	 * After enabling FSM, to handle slow insertion scenarios,
+	 * check hs_comp_result for few times to see if the IN3 voltage
+	 * is below the Vref
+	 */
+	do {
+		if (wcd_swch_level_remove(mbhc)) {
+			pr_debug("%s: Switch level is low\n", __func__);
+			goto exit;
+		}
+		pr_debug("%s: Retry attempt %lu\n", __func__, retry + 1);
+		WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
+
+		if (!hs_comp_res) {
+			valid_plug_cnt++;
+			is_check_btn_press = true;
+		} else
+			invalid_plug_cnt++;
+		/* Wait 1ms before taking another reading */
+		usleep_range(1000, 1100);
+
+		WCD_MBHC_REG_READ(WCD_MBHC_FSM_STATUS, btn_status);
+		if (btn_status)
+			btn_status_cnt++;
+
+		retry++;
+	} while (retry < ANC_DETECT_RETRY_CNT);
+
+	pr_debug("%s: valid: %d, invalid: %d, btn_status_cnt: %d\n",
+		 __func__, valid_plug_cnt, invalid_plug_cnt, btn_status_cnt);
+
+	/* decision logic */
+	if ((valid_plug_cnt > invalid_plug_cnt) && is_check_btn_press &&
+	    (btn_status_cnt == 0))
+		anc_mic_found = true;
+exit:
+	if (!val)
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ANC_DET_EN, 0);
+
+	mbhc->mbhc_cb->mbhc_micbias_control(mbhc->codec,
+					    mbhc->mbhc_cfg->anc_micbias,
+					    MICB_DISABLE);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MUX_CTL, 0x0);
+	pr_debug("%s: anc mic %sfound\n", __func__,
+		 anc_mic_found ? "" : "not ");
+	return anc_mic_found;
+}
+
+/* To determine if cross connection occurred */
+static int wcd_check_cross_conn(struct wcd_mbhc *mbhc)
+{
+	u16 swap_res = 0;
+	enum wcd_mbhc_plug_type plug_type = MBHC_PLUG_TYPE_NONE;
+	s16 reg1 = 0;
+	bool hphl_sch_res = 0, hphr_sch_res = 0;
+
+	if (wcd_swch_level_remove(mbhc)) {
+		pr_debug("%s: Switch level is low\n", __func__);
+		return -EINVAL;
+	}
+
+	/* If PA is enabled, dont check for cross-connection */
+	if (mbhc->mbhc_cb->hph_pa_on_status)
+		if (mbhc->mbhc_cb->hph_pa_on_status(mbhc->codec))
+			return false;
+
+	WCD_MBHC_REG_READ(WCD_MBHC_ELECT_SCHMT_ISRC, reg1);
+	/*
+	 * Check if there is any cross connection,
+	 * Micbias and schmitt trigger (HPHL-HPHR)
+	 * needs to be enabled. For some codecs like wcd9335,
+	 * pull-up will already be enabled when this function
+	 * is called for cross-connection identification. No
+	 * need to enable micbias in that case.
+	 */
+	wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 2);
+
+	WCD_MBHC_REG_READ(WCD_MBHC_ELECT_RESULT, swap_res);
+	pr_debug("%s: swap_res%x\n", __func__, swap_res);
+
+	/*
+	 * Read reg hphl and hphr schmitt result with cross connection
+	 * bit. These bits will both be "0" in case of cross connection
+	 * otherwise, they stay at 1
+	 */
+	WCD_MBHC_REG_READ(WCD_MBHC_HPHL_SCHMT_RESULT, hphl_sch_res);
+	WCD_MBHC_REG_READ(WCD_MBHC_HPHR_SCHMT_RESULT, hphr_sch_res);
+	if (!(hphl_sch_res || hphr_sch_res)) {
+		plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
+		pr_debug("%s: Cross connection identified\n", __func__);
+	} else {
+		pr_debug("%s: No Cross connection found\n", __func__);
+	}
+
+	/* Disable schmitt trigger and restore micbias */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, reg1);
+	pr_debug("%s: leave, plug type: %d\n", __func__,  plug_type);
+
+	return (plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP) ? true : false;
+}
+
+static bool wcd_is_special_headset(struct wcd_mbhc *mbhc)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	int delay = 0, rc;
+	bool ret = false;
+	u16 hs_comp_res;
+	bool is_spl_hs = false;
+
+	/*
+	 * Increase micbias to 2.7V to detect headsets with
+	 * threshold on microphone
+	 */
+	if (mbhc->mbhc_cb->mbhc_micbias_control &&
+	    !mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic) {
+		pr_debug("%s: callback fn micb_ctrl_thr_mic not defined\n",
+			 __func__);
+		return false;
+	} else if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic) {
+		rc = mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(codec,
+							MIC_BIAS_2, true);
+		if (rc) {
+			pr_err("%s: Micbias control for thr mic failed, rc: %d\n",
+				__func__, rc);
+			return false;
+		}
+	}
+
+	wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+
+	pr_debug("%s: special headset, start register writes\n", __func__);
+
+	WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
+	while (!is_spl_hs)  {
+		if (mbhc->hs_detect_work_stop) {
+			pr_debug("%s: stop requested: %d\n", __func__,
+					mbhc->hs_detect_work_stop);
+			break;
+		}
+		delay = delay + 50;
+		if (mbhc->mbhc_cb->mbhc_common_micb_ctrl) {
+			mbhc->mbhc_cb->mbhc_common_micb_ctrl(codec,
+					MBHC_COMMON_MICB_PRECHARGE,
+					true);
+			mbhc->mbhc_cb->mbhc_common_micb_ctrl(codec,
+					MBHC_COMMON_MICB_SET_VAL,
+					true);
+		}
+		/* Wait for 50msec for MICBIAS to settle down */
+		msleep(50);
+		if (mbhc->mbhc_cb->set_auto_zeroing)
+			mbhc->mbhc_cb->set_auto_zeroing(codec, true);
+		/* Wait for 50msec for FSM to update result values */
+		msleep(50);
+		WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
+		if (!(hs_comp_res)) {
+			pr_debug("%s: Special headset detected in %d msecs\n",
+					__func__, (delay * 2));
+			is_spl_hs = true;
+		}
+		if (delay == SPECIAL_HS_DETECT_TIME_MS) {
+			pr_debug("%s: Spl headset didn't get detect in 4 sec\n",
+					__func__);
+			break;
+		}
+	}
+	if (is_spl_hs) {
+		pr_debug("%s: Headset with threshold found\n",  __func__);
+		mbhc->micbias_enable = true;
+		ret = true;
+	}
+	if (mbhc->mbhc_cb->mbhc_common_micb_ctrl)
+		mbhc->mbhc_cb->mbhc_common_micb_ctrl(codec,
+				MBHC_COMMON_MICB_PRECHARGE,
+				false);
+	if (mbhc->mbhc_cb->set_micbias_value && !mbhc->micbias_enable)
+		mbhc->mbhc_cb->set_micbias_value(codec);
+	if (mbhc->mbhc_cb->set_auto_zeroing)
+		mbhc->mbhc_cb->set_auto_zeroing(codec, false);
+
+	if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic &&
+	    !mbhc->micbias_enable)
+		mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(codec, MIC_BIAS_2,
+						      false);
+
+	pr_debug("%s: leave, micb_enable: %d\n", __func__,
+		  mbhc->micbias_enable);
+	return ret;
+}
+
+static void wcd_mbhc_update_fsm_source(struct wcd_mbhc *mbhc,
+				       enum wcd_mbhc_plug_type plug_type)
+{
+	bool micbias2;
+
+	micbias2 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
+							MIC_BIAS_2);
+	switch (plug_type) {
+	case MBHC_PLUG_TYPE_HEADPHONE:
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 3);
+		break;
+	case MBHC_PLUG_TYPE_HEADSET:
+	case MBHC_PLUG_TYPE_ANC_HEADPHONE:
+		if (!mbhc->is_hs_recording && !micbias2)
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 3);
+		break;
+	default:
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 0);
+		break;
+
+	};
+}
+
+static void wcd_enable_mbhc_supply(struct wcd_mbhc *mbhc,
+			enum wcd_mbhc_plug_type plug_type)
+{
+
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	/*
+	 * Do not disable micbias if recording is going on or
+	 * headset is inserted on the other side of the extn
+	 * cable. If headset has been detected current source
+	 * needs to be kept enabled for button detection to work.
+	 * If the accessory type is invalid or unsupported, we
+	 * dont need to enable either of them.
+	 */
+	if (det_extn_cable_en && mbhc->is_extn_cable &&
+		mbhc->mbhc_cb && mbhc->mbhc_cb->extn_use_mb &&
+		mbhc->mbhc_cb->extn_use_mb(codec)) {
+		if (plug_type == MBHC_PLUG_TYPE_HEADPHONE ||
+		    plug_type == MBHC_PLUG_TYPE_HEADSET)
+			wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+	} else {
+		if (plug_type == MBHC_PLUG_TYPE_HEADSET) {
+			if (mbhc->is_hs_recording || mbhc->micbias_enable) {
+				wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+			} else if ((test_bit(WCD_MBHC_EVENT_PA_HPHL,
+					     &mbhc->event_state)) ||
+				   (test_bit(WCD_MBHC_EVENT_PA_HPHR,
+					     &mbhc->event_state))) {
+				wcd_enable_curr_micbias(mbhc,
+						WCD_MBHC_EN_PULLUP);
+			} else {
+				wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_CS);
+			}
+		} else if (plug_type == MBHC_PLUG_TYPE_HEADPHONE) {
+			wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_CS);
+		} else {
+			wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_NONE);
+		}
+	}
+}
+
+static bool wcd_mbhc_check_for_spl_headset(struct wcd_mbhc *mbhc,
+					   int *spl_hs_cnt)
+{
+	u16 hs_comp_res_1_8v = 0, hs_comp_res_2_7v = 0;
+	bool spl_hs = false;
+
+	if (!mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic)
+		goto done;
+
+	if (!spl_hs_cnt) {
+		pr_err("%s: spl_hs_cnt is NULL\n", __func__);
+		goto done;
+	}
+	/* Read back hs_comp_res @ 1.8v Micbias */
+	WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res_1_8v);
+	if (!hs_comp_res_1_8v) {
+		spl_hs = false;
+		goto done;
+	}
+
+	/* Bump up MB2 to 2.7v */
+	mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(mbhc->codec,
+				mbhc->mbhc_cfg->mbhc_micbias, true);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+	usleep_range(10000, 10100);
+
+	/* Read back HS_COMP_RESULT */
+	WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res_2_7v);
+	if (!hs_comp_res_2_7v && hs_comp_res_1_8v)
+		spl_hs = true;
+
+	if (spl_hs)
+		*spl_hs_cnt += 1;
+
+	/* MB2 back to 1.8v */
+	if (*spl_hs_cnt != WCD_MBHC_SPL_HS_CNT) {
+		mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(mbhc->codec,
+				mbhc->mbhc_cfg->mbhc_micbias, false);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+		usleep_range(10000, 10100);
+	}
+
+	if (spl_hs)
+		pr_debug("%s: Detected special HS (%d)\n", __func__, spl_hs);
+
+done:
+	return spl_hs;
+}
+
+/* should be called under interrupt context that hold suspend */
+static void wcd_schedule_hs_detect_plug(struct wcd_mbhc *mbhc,
+					    struct work_struct *work)
+{
+	pr_debug("%s: scheduling correct_swch_plug\n", __func__);
+	WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
+	mbhc->hs_detect_work_stop = false;
+	mbhc->mbhc_cb->lock_sleep(mbhc, true);
+	schedule_work(work);
+}
+
+/* called under codec_resource_lock acquisition */
+static void wcd_cancel_hs_detect_plug(struct wcd_mbhc *mbhc,
+					 struct work_struct *work)
+{
+	pr_debug("%s: Canceling correct_plug_swch\n", __func__);
+	mbhc->hs_detect_work_stop = true;
+	WCD_MBHC_RSC_UNLOCK(mbhc);
+	if (cancel_work_sync(work)) {
+		pr_debug("%s: correct_plug_swch is canceled\n",
+			 __func__);
+		mbhc->mbhc_cb->lock_sleep(mbhc, false);
+	}
+	WCD_MBHC_RSC_LOCK(mbhc);
+}
+
+/* called under codec_resource_lock acquisition */
+static void wcd_mbhc_detect_plug_type(struct wcd_mbhc *mbhc)
+{
+	struct snd_soc_codec *codec = mbhc->codec;
+	bool micbias1 = false;
+
+	pr_debug("%s: enter\n", __func__);
+	WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
+
+	if (mbhc->mbhc_cb->hph_pull_down_ctrl)
+		mbhc->mbhc_cb->hph_pull_down_ctrl(codec, false);
+
+	if (mbhc->mbhc_cb->micbias_enable_status)
+		micbias1 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
+								MIC_BIAS_1);
+
+	if (mbhc->mbhc_cb->set_cap_mode)
+		mbhc->mbhc_cb->set_cap_mode(codec, micbias1, true);
+
+	if (mbhc->mbhc_cb->mbhc_micbias_control)
+		mbhc->mbhc_cb->mbhc_micbias_control(codec, MIC_BIAS_2,
+						    MICB_ENABLE);
+	else
+		wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+
+	/* Re-initialize button press completion object */
+	reinit_completion(&mbhc->btn_press_compl);
+	wcd_schedule_hs_detect_plug(mbhc, &mbhc->correct_plug_swch);
+	pr_debug("%s: leave\n", __func__);
+}
+
+static void wcd_correct_swch_plug(struct work_struct *work)
+{
+	struct wcd_mbhc *mbhc;
+	struct snd_soc_codec *codec;
+	enum wcd_mbhc_plug_type plug_type = MBHC_PLUG_TYPE_INVALID;
+	unsigned long timeout;
+	u16 hs_comp_res = 0, hphl_sch = 0, mic_sch = 0, btn_result = 0;
+	bool wrk_complete = false;
+	int pt_gnd_mic_swap_cnt = 0;
+	int no_gnd_mic_swap_cnt = 0;
+	bool is_pa_on = false, spl_hs = false;
+	bool micbias2 = false;
+	bool micbias1 = false;
+	int ret = 0;
+	int rc, spl_hs_count = 0;
+	int cross_conn;
+	int try = 0;
+
+	pr_debug("%s: enter\n", __func__);
+
+	mbhc = container_of(work, struct wcd_mbhc, correct_plug_swch);
+	codec = mbhc->codec;
+
+	/*
+	 * Enable micbias/pullup for detection in correct work.
+	 * This work will get scheduled from detect_plug_type which
+	 * will already request for pullup/micbias. If the pullup/micbias
+	 * is handled with ref-counts by individual codec drivers, there is
+	 * no need to enabale micbias/pullup here
+	 */
+
+	wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+
+	/* Enable HW FSM */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+	/*
+	 * Check for any button press interrupts before starting 3-sec
+	 * loop.
+	 */
+	rc = wait_for_completion_timeout(&mbhc->btn_press_compl,
+			msecs_to_jiffies(WCD_MBHC_BTN_PRESS_COMPL_TIMEOUT_MS));
+
+	WCD_MBHC_REG_READ(WCD_MBHC_BTN_RESULT, btn_result);
+	WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
+
+	if (!rc) {
+		pr_debug("%s No btn press interrupt\n", __func__);
+		if (!btn_result && !hs_comp_res)
+			plug_type = MBHC_PLUG_TYPE_HEADSET;
+		else if (!btn_result && hs_comp_res)
+			plug_type = MBHC_PLUG_TYPE_HIGH_HPH;
+		else
+			plug_type = MBHC_PLUG_TYPE_INVALID;
+	} else {
+		if (!btn_result && !hs_comp_res)
+			plug_type = MBHC_PLUG_TYPE_HEADPHONE;
+		else
+			plug_type = MBHC_PLUG_TYPE_INVALID;
+	}
+
+	do {
+		cross_conn = wcd_check_cross_conn(mbhc);
+		try++;
+	} while (try < GND_MIC_SWAP_THRESHOLD);
+
+	/*
+	 * Check for cross connection 4 times.
+	 * Consider the result of the fourth iteration.
+	 */
+	if (cross_conn > 0) {
+		pr_debug("%s: cross con found, start polling\n",
+			 __func__);
+		plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
+		pr_debug("%s: Plug found, plug type is %d\n",
+			 __func__, plug_type);
+		goto correct_plug_type;
+	}
+
+	if ((plug_type == MBHC_PLUG_TYPE_HEADSET ||
+	     plug_type == MBHC_PLUG_TYPE_HEADPHONE) &&
+	    (!wcd_swch_level_remove(mbhc))) {
+		WCD_MBHC_RSC_LOCK(mbhc);
+		if (mbhc->current_plug ==  MBHC_PLUG_TYPE_HIGH_HPH)
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE,
+						 0);
+		wcd_mbhc_find_plug_and_report(mbhc, plug_type);
+		WCD_MBHC_RSC_UNLOCK(mbhc);
+	}
+
+correct_plug_type:
+
+	timeout = jiffies + msecs_to_jiffies(HS_DETECT_PLUG_TIME_MS);
+	while (!time_after(jiffies, timeout)) {
+		if (mbhc->hs_detect_work_stop) {
+			pr_debug("%s: stop requested: %d\n", __func__,
+					mbhc->hs_detect_work_stop);
+			wcd_enable_curr_micbias(mbhc,
+						WCD_MBHC_EN_NONE);
+			if (mbhc->micbias_enable) {
+				mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
+					mbhc->codec, MIC_BIAS_2, false);
+				if (mbhc->mbhc_cb->set_micbias_value)
+					mbhc->mbhc_cb->set_micbias_value(
+							mbhc->codec);
+				mbhc->micbias_enable = false;
+			}
+			goto exit;
+		}
+		if (mbhc->btn_press_intr) {
+			wcd_cancel_btn_work(mbhc);
+			mbhc->btn_press_intr = false;
+		}
+		/* Toggle FSM */
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+
+		/* allow sometime and re-check stop requested again */
+		msleep(20);
+		if (mbhc->hs_detect_work_stop) {
+			pr_debug("%s: stop requested: %d\n", __func__,
+					mbhc->hs_detect_work_stop);
+			wcd_enable_curr_micbias(mbhc,
+						WCD_MBHC_EN_NONE);
+			if (mbhc->micbias_enable) {
+				mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
+					mbhc->codec, MIC_BIAS_2, false);
+				if (mbhc->mbhc_cb->set_micbias_value)
+					mbhc->mbhc_cb->set_micbias_value(
+							mbhc->codec);
+				mbhc->micbias_enable = false;
+			}
+			goto exit;
+		}
+		WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
+
+		pr_debug("%s: hs_comp_res: %x\n", __func__, hs_comp_res);
+		if (mbhc->mbhc_cb->hph_pa_on_status)
+			is_pa_on = mbhc->mbhc_cb->hph_pa_on_status(codec);
+
+		/*
+		 * instead of hogging system by contineous polling, wait for
+		 * sometime and re-check stop request again.
+		 */
+		msleep(180);
+		if (hs_comp_res && (spl_hs_count < WCD_MBHC_SPL_HS_CNT)) {
+			spl_hs = wcd_mbhc_check_for_spl_headset(mbhc,
+								&spl_hs_count);
+
+			if (spl_hs_count == WCD_MBHC_SPL_HS_CNT) {
+				hs_comp_res = 0;
+				spl_hs = true;
+				mbhc->micbias_enable = true;
+			}
+		}
+
+		if ((!hs_comp_res) && (!is_pa_on)) {
+			/* Check for cross connection*/
+			ret = wcd_check_cross_conn(mbhc);
+			if (ret < 0) {
+				continue;
+			} else if (ret > 0) {
+				pt_gnd_mic_swap_cnt++;
+				no_gnd_mic_swap_cnt = 0;
+				if (pt_gnd_mic_swap_cnt <
+						GND_MIC_SWAP_THRESHOLD) {
+					continue;
+				} else if (pt_gnd_mic_swap_cnt >
+						GND_MIC_SWAP_THRESHOLD) {
+					/*
+					 * This is due to GND/MIC switch didn't
+					 * work,  Report unsupported plug.
+					 */
+					pr_debug("%s: switch didn't work\n",
+						  __func__);
+					plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
+					goto report;
+				} else {
+					plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
+				}
+			} else {
+				no_gnd_mic_swap_cnt++;
+				pt_gnd_mic_swap_cnt = 0;
+				plug_type = MBHC_PLUG_TYPE_HEADSET;
+				if ((no_gnd_mic_swap_cnt <
+				    GND_MIC_SWAP_THRESHOLD) &&
+				    (spl_hs_count != WCD_MBHC_SPL_HS_CNT)) {
+					continue;
+				} else {
+					no_gnd_mic_swap_cnt = 0;
+				}
+			}
+			if ((pt_gnd_mic_swap_cnt == GND_MIC_SWAP_THRESHOLD) &&
+				(plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP)) {
+				/*
+				 * if switch is toggled, check again,
+				 * otherwise report unsupported plug
+				 */
+				if (mbhc->mbhc_cfg->swap_gnd_mic &&
+					mbhc->mbhc_cfg->swap_gnd_mic(codec)) {
+					pr_debug("%s: US_EU gpio present,flip switch\n"
+						, __func__);
+					continue;
+				}
+			}
+		}
+
+		WCD_MBHC_REG_READ(WCD_MBHC_HPHL_SCHMT_RESULT, hphl_sch);
+		WCD_MBHC_REG_READ(WCD_MBHC_MIC_SCHMT_RESULT, mic_sch);
+		if (hs_comp_res && !(hphl_sch || mic_sch)) {
+			pr_debug("%s: cable is extension cable\n", __func__);
+			plug_type = MBHC_PLUG_TYPE_HIGH_HPH;
+			wrk_complete = true;
+		} else {
+			pr_debug("%s: cable might be headset: %d\n", __func__,
+					plug_type);
+			if (!(plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP)) {
+				plug_type = MBHC_PLUG_TYPE_HEADSET;
+				/*
+				 * Report headset only if not already reported
+				 * and if there is not button press without
+				 * release
+				 */
+				if (((mbhc->current_plug !=
+				      MBHC_PLUG_TYPE_HEADSET) &&
+				     (mbhc->current_plug !=
+				      MBHC_PLUG_TYPE_ANC_HEADPHONE)) &&
+				    !wcd_swch_level_remove(mbhc) &&
+				    !mbhc->btn_press_intr) {
+					pr_debug("%s: cable is %sheadset\n",
+						__func__,
+						((spl_hs_count ==
+							WCD_MBHC_SPL_HS_CNT) ?
+							"special ":""));
+					goto report;
+				}
+			}
+			wrk_complete = false;
+		}
+	}
+	if (!wrk_complete && mbhc->btn_press_intr) {
+		pr_debug("%s: Can be slow insertion of headphone\n", __func__);
+		wcd_cancel_btn_work(mbhc);
+		plug_type = MBHC_PLUG_TYPE_HEADPHONE;
+	}
+	/*
+	 * If plug_tye is headset, we might have already reported either in
+	 * detect_plug-type or in above while loop, no need to report again
+	 */
+	if (!wrk_complete && ((plug_type == MBHC_PLUG_TYPE_HEADSET) ||
+	    (plug_type == MBHC_PLUG_TYPE_ANC_HEADPHONE))) {
+		pr_debug("%s: plug_type:0x%x already reported\n",
+			 __func__, mbhc->current_plug);
+		goto enable_supply;
+	}
+
+	if (plug_type == MBHC_PLUG_TYPE_HIGH_HPH &&
+		(!det_extn_cable_en)) {
+		if (wcd_is_special_headset(mbhc)) {
+			pr_debug("%s: Special headset found %d\n",
+					__func__, plug_type);
+			plug_type = MBHC_PLUG_TYPE_HEADSET;
+			goto report;
+		}
+	}
+
+report:
+	if (wcd_swch_level_remove(mbhc)) {
+		pr_debug("%s: Switch level is low\n", __func__);
+		goto exit;
+	}
+	if (plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP && mbhc->btn_press_intr) {
+		pr_debug("%s: insertion of headphone with swap\n", __func__);
+		wcd_cancel_btn_work(mbhc);
+		plug_type = MBHC_PLUG_TYPE_HEADPHONE;
+	}
+	pr_debug("%s: Valid plug found, plug type %d wrk_cmpt %d btn_intr %d\n",
+			__func__, plug_type, wrk_complete,
+			mbhc->btn_press_intr);
+	WCD_MBHC_RSC_LOCK(mbhc);
+	wcd_mbhc_find_plug_and_report(mbhc, plug_type);
+	WCD_MBHC_RSC_UNLOCK(mbhc);
+enable_supply:
+	if (mbhc->mbhc_cb->mbhc_micbias_control)
+		wcd_mbhc_update_fsm_source(mbhc, plug_type);
+	else
+		wcd_enable_mbhc_supply(mbhc, plug_type);
+exit:
+	if (mbhc->mbhc_cb->mbhc_micbias_control &&
+	    !mbhc->micbias_enable)
+		mbhc->mbhc_cb->mbhc_micbias_control(codec, MIC_BIAS_2,
+						    MICB_DISABLE);
+	if (mbhc->mbhc_cb->micbias_enable_status) {
+		micbias1 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
+								MIC_BIAS_1);
+		micbias2 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
+								MIC_BIAS_2);
+	}
+
+	if (mbhc->mbhc_cfg->detect_extn_cable &&
+	    ((plug_type == MBHC_PLUG_TYPE_HEADPHONE) ||
+	     (plug_type == MBHC_PLUG_TYPE_HEADSET)) &&
+	    !mbhc->hs_detect_work_stop) {
+		WCD_MBHC_RSC_LOCK(mbhc);
+		wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM, true);
+		WCD_MBHC_RSC_UNLOCK(mbhc);
+	}
+	if (mbhc->mbhc_cb->set_cap_mode)
+		mbhc->mbhc_cb->set_cap_mode(codec, micbias1, micbias2);
+
+	if (mbhc->mbhc_cb->hph_pull_down_ctrl)
+		mbhc->mbhc_cb->hph_pull_down_ctrl(codec, true);
+
+	mbhc->mbhc_cb->lock_sleep(mbhc, false);
+	pr_debug("%s: leave\n", __func__);
+}
+
+static irqreturn_t wcd_mbhc_hs_rem_irq(int irq, void *data)
+{
+	struct wcd_mbhc *mbhc = data;
+	u8 hs_comp_result = 0, hphl_sch = 0, mic_sch = 0;
+	static u16 hphl_trigerred;
+	static u16 mic_trigerred;
+	unsigned long timeout;
+	bool removed = true;
+	int retry = 0;
+
+	pr_debug("%s: enter\n", __func__);
+
+	WCD_MBHC_RSC_LOCK(mbhc);
+
+	timeout = jiffies +
+		  msecs_to_jiffies(WCD_FAKE_REMOVAL_MIN_PERIOD_MS);
+	do {
+		retry++;
+		/*
+		 * read the result register every 10ms to look for
+		 * any change in HS_COMP_RESULT bit
+		 */
+		usleep_range(10000, 10100);
+		WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_result);
+		pr_debug("%s: Check result reg for fake removal: hs_comp_res %x\n",
+			 __func__, hs_comp_result);
+		if ((!hs_comp_result) &&
+		    retry > FAKE_REM_RETRY_ATTEMPTS) {
+			removed = false;
+			break;
+		}
+	} while (!time_after(jiffies, timeout));
+
+	if (wcd_swch_level_remove(mbhc)) {
+		pr_debug("%s: Switch level is low ", __func__);
+		goto exit;
+	}
+	pr_debug("%s: headset %s actually removed\n", __func__,
+		removed ? "" : "not ");
+
+	WCD_MBHC_REG_READ(WCD_MBHC_HPHL_SCHMT_RESULT, hphl_sch);
+	WCD_MBHC_REG_READ(WCD_MBHC_MIC_SCHMT_RESULT, mic_sch);
+	WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_result);
+
+	if (removed) {
+		if (!(hphl_sch && mic_sch && hs_comp_result)) {
+			/*
+			 * extension cable is still plugged in
+			 * report it as LINEOUT device
+			 */
+			goto report_unplug;
+		} else {
+			if (!mic_sch) {
+				mic_trigerred++;
+				pr_debug("%s: Removal MIC trigerred %d\n",
+					 __func__, mic_trigerred);
+			}
+			if (!hphl_sch) {
+				hphl_trigerred++;
+				pr_debug("%s: Removal HPHL trigerred %d\n",
+					 __func__, hphl_trigerred);
+			}
+			if (mic_trigerred && hphl_trigerred) {
+				/*
+				 * extension cable is still plugged in
+				 * report it as LINEOUT device
+				 */
+				goto report_unplug;
+			}
+		}
+	}
+exit:
+	WCD_MBHC_RSC_UNLOCK(mbhc);
+	pr_debug("%s: leave\n", __func__);
+	return IRQ_HANDLED;
+
+report_unplug:
+	wcd_mbhc_elec_hs_report_unplug(mbhc);
+	hphl_trigerred = 0;
+	mic_trigerred = 0;
+	WCD_MBHC_RSC_UNLOCK(mbhc);
+	pr_debug("%s: leave\n", __func__);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t wcd_mbhc_hs_ins_irq(int irq, void *data)
+{
+	struct wcd_mbhc *mbhc = data;
+	bool detection_type = 0, hphl_sch = 0, mic_sch = 0;
+	u16 elect_result = 0;
+	static u16 hphl_trigerred;
+	static u16 mic_trigerred;
+
+	pr_debug("%s: enter\n", __func__);
+	if (!mbhc->mbhc_cfg->detect_extn_cable) {
+		pr_debug("%s: Returning as Extension cable feature not enabled\n",
+			__func__);
+		return IRQ_HANDLED;
+	}
+	WCD_MBHC_RSC_LOCK(mbhc);
+
+	WCD_MBHC_REG_READ(WCD_MBHC_ELECT_DETECTION_TYPE, detection_type);
+	WCD_MBHC_REG_READ(WCD_MBHC_ELECT_RESULT, elect_result);
+
+	pr_debug("%s: detection_type %d, elect_result %x\n", __func__,
+				detection_type, elect_result);
+	if (detection_type) {
+		/* check if both Left and MIC Schmitt triggers are triggered */
+		WCD_MBHC_REG_READ(WCD_MBHC_HPHL_SCHMT_RESULT, hphl_sch);
+		WCD_MBHC_REG_READ(WCD_MBHC_MIC_SCHMT_RESULT, mic_sch);
+		if (hphl_sch && mic_sch) {
+			/* Go for plug type determination */
+			pr_debug("%s: Go for plug type determination\n",
+				  __func__);
+			goto determine_plug;
+
+		} else {
+			if (mic_sch) {
+				mic_trigerred++;
+				pr_debug("%s: Insertion MIC trigerred %d\n",
+					 __func__, mic_trigerred);
+				WCD_MBHC_REG_UPDATE_BITS(
+						WCD_MBHC_ELECT_SCHMT_ISRC,
+						0);
+				msleep(20);
+				WCD_MBHC_REG_UPDATE_BITS(
+						WCD_MBHC_ELECT_SCHMT_ISRC,
+						1);
+			}
+			if (hphl_sch) {
+				hphl_trigerred++;
+				pr_debug("%s: Insertion HPHL trigerred %d\n",
+					 __func__, hphl_trigerred);
+			}
+			if (mic_trigerred && hphl_trigerred) {
+				/* Go for plug type determination */
+				pr_debug("%s: Go for plug type determination\n",
+					 __func__);
+				goto determine_plug;
+			}
+		}
+	}
+	WCD_MBHC_RSC_UNLOCK(mbhc);
+	pr_debug("%s: leave\n", __func__);
+	return IRQ_HANDLED;
+
+determine_plug:
+	/*
+	 * Disable HPHL trigger and MIC Schmitt triggers.
+	 * Setup for insertion detection.
+	 */
+	pr_debug("%s: Disable insertion interrupt\n", __func__);
+	wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS,
+			     false);
+
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
+	hphl_trigerred = 0;
+	mic_trigerred = 0;
+	mbhc->is_extn_cable = true;
+	mbhc->btn_press_intr = false;
+	wcd_mbhc_detect_plug_type(mbhc);
+	WCD_MBHC_RSC_UNLOCK(mbhc);
+	pr_debug("%s: leave\n", __func__);
+	return IRQ_HANDLED;
+}
+
+static struct wcd_mbhc_fn mbhc_fn = {
+	.wcd_mbhc_hs_ins_irq = wcd_mbhc_hs_ins_irq,
+	.wcd_mbhc_hs_rem_irq = wcd_mbhc_hs_rem_irq,
+	.wcd_mbhc_detect_plug_type = wcd_mbhc_detect_plug_type,
+	.wcd_mbhc_detect_anc_plug_type = wcd_mbhc_detect_anc_plug_type,
+	.wcd_cancel_hs_detect_plug = wcd_cancel_hs_detect_plug,
+};
+
+/* Function: wcd_mbhc_legacy_init
+ * @mbhc: MBHC function pointer
+ * Description: Initialize MBHC legacy based function pointers to MBHC structure
+ */
+void wcd_mbhc_legacy_init(struct wcd_mbhc *mbhc)
+{
+	if (!mbhc) {
+		pr_err("%s: mbhc is NULL\n", __func__);
+		return;
+	}
+	mbhc->mbhc_fn = &mbhc_fn;
+	INIT_WORK(&mbhc->correct_plug_swch, wcd_correct_swch_plug);
+}
+EXPORT_SYMBOL(wcd_mbhc_legacy_init);
diff --git a/sound/soc/codecs/wcd-mbhc-legacy.h b/sound/soc/codecs/wcd-mbhc-legacy.h
new file mode 100644
index 0000000..594393d
--- /dev/null
+++ b/sound/soc/codecs/wcd-mbhc-legacy.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __WCD_MBHC_LEGACY_H__
+#define __WCD_MBHC_LEGACY_H__
+
+#include "wcdcal-hwdep.h"
+#include "wcd-mbhc-v2.h"
+
+#ifdef CONFIG_SND_SOC_WCD_MBHC_LEGACY
+void wcd_mbhc_legacy_init(struct wcd_mbhc *mbhc);
+#else
+static inline void wcd_mbhc_legacy_init(struct wcd_mbhc *mbhc)
+{
+}
+#endif
+
+#endif /* __WCD_MBHC_LEGACY_H__ */
diff --git a/sound/soc/codecs/wcd-mbhc-v2-api.h b/sound/soc/codecs/wcd-mbhc-v2-api.h
new file mode 100644
index 0000000..fab2b49
--- /dev/null
+++ b/sound/soc/codecs/wcd-mbhc-v2-api.h
@@ -0,0 +1,60 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __WCD_MBHC_V2_API_H__
+#define __WCD_MBHC_V2_API_H__
+
+#include "wcd-mbhc-v2.h"
+
+#ifdef CONFIG_SND_SOC_WCD_MBHC
+int wcd_mbhc_start(struct wcd_mbhc *mbhc,
+		       struct wcd_mbhc_config *mbhc_cfg);
+void wcd_mbhc_stop(struct wcd_mbhc *mbhc);
+int wcd_mbhc_init(struct wcd_mbhc *mbhc, struct snd_soc_codec *codec,
+		      const struct wcd_mbhc_cb *mbhc_cb,
+		      const struct wcd_mbhc_intr *mbhc_cdc_intr_ids,
+		      struct wcd_mbhc_register *wcd_mbhc_regs,
+		      bool impedance_det_en);
+int wcd_mbhc_get_impedance(struct wcd_mbhc *mbhc, uint32_t *zl,
+			   uint32_t *zr);
+void wcd_mbhc_deinit(struct wcd_mbhc *mbhc);
+
+#else
+static inline void wcd_mbhc_stop(struct wcd_mbhc *mbhc)
+{
+}
+int wcd_mbhc_init(struct wcd_mbhc *mbhc, struct snd_soc_codec *codec,
+		      const struct wcd_mbhc_cb *mbhc_cb,
+		      const struct wcd_mbhc_intr *mbhc_cdc_intr_ids,
+		      struct wcd_mbhc_register *wcd_mbhc_regs,
+		      bool impedance_det_en)
+{
+	return 0;
+}
+static inline int wcd_mbhc_start(struct wcd_mbhc *mbhc,
+				 struct wcd_mbhc_config *mbhc_cfg)
+{
+	return 0;
+}
+static inline int wcd_mbhc_get_impedance(struct wcd_mbhc *mbhc,
+					 uint32_t *zl,
+					 uint32_t *zr)
+{
+	*zl = 0;
+	*zr = 0;
+	return -EINVAL;
+}
+static inline void wcd_mbhc_deinit(struct wcd_mbhc *mbhc)
+{
+}
+#endif
+
+#endif /* __WCD_MBHC_V2_API_H__ */
diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c
index f4c68ff..510a8dc 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.c
+++ b/sound/soc/codecs/wcd-mbhc-v2.c
@@ -25,51 +25,20 @@
 #include <linux/input.h>
 #include <linux/firmware.h>
 #include <linux/completion.h>
+#include <linux/mfd/msm-cdc-pinctrl.h>
 #include <sound/soc.h>
 #include <sound/jack.h>
-#include "wcd-mbhc-v2.h"
 #include "wcdcal-hwdep.h"
+#include "wcd-mbhc-legacy.h"
+#include "wcd-mbhc-adc.h"
+#include "wcd-mbhc-v2-api.h"
 
-#define WCD_MBHC_JACK_MASK (SND_JACK_HEADSET | SND_JACK_OC_HPHL | \
-			   SND_JACK_OC_HPHR | SND_JACK_LINEOUT | \
-			   SND_JACK_MECHANICAL | SND_JACK_MICROPHONE2 | \
-			   SND_JACK_UNSUPPORTED)
-
-#define WCD_MBHC_JACK_BUTTON_MASK (SND_JACK_BTN_0 | SND_JACK_BTN_1 | \
-				  SND_JACK_BTN_2 | SND_JACK_BTN_3 | \
-				  SND_JACK_BTN_4 | SND_JACK_BTN_5)
-#define OCP_ATTEMPT 20
-#define HS_DETECT_PLUG_TIME_MS (3 * 1000)
-#define SPECIAL_HS_DETECT_TIME_MS (2 * 1000)
-#define MBHC_BUTTON_PRESS_THRESHOLD_MIN 250
-#define GND_MIC_SWAP_THRESHOLD 4
-#define WCD_FAKE_REMOVAL_MIN_PERIOD_MS 100
-#define HS_VREF_MIN_VAL 1400
-#define FW_READ_ATTEMPTS 15
-#define FW_READ_TIMEOUT 4000000
-#define FAKE_REM_RETRY_ATTEMPTS 3
-#define MAX_IMPED 60000
-
-#define WCD_MBHC_BTN_PRESS_COMPL_TIMEOUT_MS  50
-#define ANC_DETECT_RETRY_CNT 7
-#define WCD_MBHC_SPL_HS_CNT  1
-
-static int det_extn_cable_en;
-module_param(det_extn_cable_en, int, 0664);
-MODULE_PARM_DESC(det_extn_cable_en, "enable/disable extn cable detect");
-
-enum wcd_mbhc_cs_mb_en_flag {
-	WCD_MBHC_EN_CS = 0,
-	WCD_MBHC_EN_MB,
-	WCD_MBHC_EN_PULLUP,
-	WCD_MBHC_EN_NONE,
-};
-
-static void wcd_mbhc_jack_report(struct wcd_mbhc *mbhc,
-				struct snd_soc_jack *jack, int status, int mask)
+void wcd_mbhc_jack_report(struct wcd_mbhc *mbhc,
+			  struct snd_soc_jack *jack, int status, int mask)
 {
 	snd_soc_jack_report(jack, status, mask);
 }
+EXPORT_SYMBOL(wcd_mbhc_jack_report);
 
 static void __hphocp_off_report(struct wcd_mbhc *mbhc, u32 jack_status,
 				int irq)
@@ -143,7 +112,7 @@
 				   micbias);
 }
 
-static void wcd_enable_curr_micbias(const struct wcd_mbhc *mbhc,
+void wcd_enable_curr_micbias(const struct wcd_mbhc *mbhc,
 				const enum wcd_mbhc_cs_mb_en_flag cs_mb_en)
 {
 
@@ -193,6 +162,7 @@
 
 	pr_debug("%s: exit\n", __func__);
 }
+EXPORT_SYMBOL(wcd_enable_curr_micbias);
 
 static const char *wcd_mbhc_get_event_string(int event)
 {
@@ -413,7 +383,7 @@
 	return 0;
 }
 
-static int wcd_cancel_btn_work(struct wcd_mbhc *mbhc)
+int wcd_cancel_btn_work(struct wcd_mbhc *mbhc)
 {
 	int r;
 
@@ -426,40 +396,16 @@
 		mbhc->mbhc_cb->lock_sleep(mbhc, false);
 	return r;
 }
+EXPORT_SYMBOL(wcd_cancel_btn_work);
 
-static bool wcd_swch_level_remove(struct wcd_mbhc *mbhc)
+bool wcd_swch_level_remove(struct wcd_mbhc *mbhc)
 {
 	u16 result2 = 0;
 
 	WCD_MBHC_REG_READ(WCD_MBHC_SWCH_LEVEL_REMOVE, result2);
 	return (result2) ? true : false;
 }
-
-/* should be called under interrupt context that hold suspend */
-static void wcd_schedule_hs_detect_plug(struct wcd_mbhc *mbhc,
-					    struct work_struct *work)
-{
-	pr_debug("%s: scheduling correct_swch_plug\n", __func__);
-	WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
-	mbhc->hs_detect_work_stop = false;
-	mbhc->mbhc_cb->lock_sleep(mbhc, true);
-	schedule_work(work);
-}
-
-/* called under codec_resource_lock acquisition */
-static void wcd_cancel_hs_detect_plug(struct wcd_mbhc *mbhc,
-					 struct work_struct *work)
-{
-	pr_debug("%s: Canceling correct_plug_swch\n", __func__);
-	mbhc->hs_detect_work_stop = true;
-	WCD_MBHC_RSC_UNLOCK(mbhc);
-	if (cancel_work_sync(work)) {
-		pr_debug("%s: correct_plug_swch is canceled\n",
-			 __func__);
-		mbhc->mbhc_cb->lock_sleep(mbhc, false);
-	}
-	WCD_MBHC_RSC_LOCK(mbhc);
-}
+EXPORT_SYMBOL(wcd_swch_level_remove);
 
 static void wcd_mbhc_clr_and_turnon_hph_padac(struct wcd_mbhc *mbhc)
 {
@@ -538,8 +484,9 @@
 	else
 		return -EINVAL;
 }
+EXPORT_SYMBOL(wcd_mbhc_get_impedance);
 
-static void wcd_mbhc_hs_elec_irq(struct wcd_mbhc *mbhc, int irq_type,
+void wcd_mbhc_hs_elec_irq(struct wcd_mbhc *mbhc, int irq_type,
 				 bool enable)
 {
 	int irq;
@@ -566,10 +513,15 @@
 			clear_bit(irq_type, &mbhc->intr_status);
 	}
 }
+EXPORT_SYMBOL(wcd_mbhc_hs_elec_irq);
 
 static void wcd_mbhc_report_plug(struct wcd_mbhc *mbhc, int insertion,
 				enum snd_jack_types jack_type)
 {
+	struct snd_soc_codec *codec = mbhc->codec;
+	bool is_pa_on = false;
+	u8 fsm_en = 0;
+
 	WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
 
 	pr_debug("%s: enter insertion %d hph_status %x\n",
@@ -595,14 +547,14 @@
 		if (mbhc->micbias_enable) {
 			if (mbhc->mbhc_cb->mbhc_micbias_control)
 				mbhc->mbhc_cb->mbhc_micbias_control(
-						mbhc->codec, MIC_BIAS_2,
+						codec, MIC_BIAS_2,
 						MICB_DISABLE);
 			if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic)
 				mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
-						mbhc->codec,
+						codec,
 						MIC_BIAS_2, false);
 			if (mbhc->mbhc_cb->set_micbias_value) {
-				mbhc->mbhc_cb->set_micbias_value(mbhc->codec);
+				mbhc->mbhc_cb->set_micbias_value(codec);
 				WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MICB_CTRL, 0);
 			}
 			mbhc->micbias_enable = false;
@@ -632,15 +584,15 @@
 			if (mbhc->micbias_enable) {
 				if (mbhc->mbhc_cb->mbhc_micbias_control)
 					mbhc->mbhc_cb->mbhc_micbias_control(
-						mbhc->codec, MIC_BIAS_2,
+						codec, MIC_BIAS_2,
 						MICB_DISABLE);
 				if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic)
 					mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
-						mbhc->codec,
+						codec,
 						MIC_BIAS_2, false);
 				if (mbhc->mbhc_cb->set_micbias_value) {
 					mbhc->mbhc_cb->set_micbias_value(
-							mbhc->codec);
+							codec);
 					WCD_MBHC_REG_UPDATE_BITS(
 							WCD_MBHC_MICB_CTRL, 0);
 				}
@@ -660,9 +612,6 @@
 				wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
 				pr_debug("%s: set up elec removal detection\n",
 					  __func__);
-				WCD_MBHC_REG_UPDATE_BITS(
-						WCD_MBHC_ELECT_DETECTION_TYPE,
-						0);
 				usleep_range(200, 210);
 				wcd_mbhc_hs_elec_irq(mbhc,
 						     WCD_MBHC_ELEC_HS_REM,
@@ -691,11 +640,23 @@
 		} else if (jack_type == SND_JACK_ANC_HEADPHONE)
 			mbhc->current_plug = MBHC_PLUG_TYPE_ANC_HEADPHONE;
 
+		if (mbhc->mbhc_cb->hph_pa_on_status)
+			is_pa_on = mbhc->mbhc_cb->hph_pa_on_status(codec);
+
 		if (mbhc->impedance_detect &&
 			mbhc->mbhc_cb->compute_impedance &&
-			(mbhc->mbhc_cfg->linein_th != 0)) {
+			(mbhc->mbhc_cfg->linein_th != 0) &&
+			(!is_pa_on)) {
+			/* Set MUX_CTL to AUTO for Z-det */
+			WCD_MBHC_REG_READ(WCD_MBHC_FSM_EN, fsm_en);
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MUX_CTL,
+						 MUX_CTL_AUTO);
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
 			mbhc->mbhc_cb->compute_impedance(mbhc,
 					&mbhc->zl, &mbhc->zr);
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN,
+						 fsm_en);
 			if ((mbhc->zl > mbhc->mbhc_cfg->linein_th &&
 				mbhc->zl < MAX_IMPED) &&
 				(mbhc->zr > mbhc->mbhc_cfg->linein_th &&
@@ -729,94 +690,47 @@
 	pr_debug("%s: leave hph_status %x\n", __func__, mbhc->hph_status);
 }
 
-static bool wcd_mbhc_detect_anc_plug_type(struct wcd_mbhc *mbhc)
+void wcd_mbhc_elec_hs_report_unplug(struct wcd_mbhc *mbhc)
 {
-	bool anc_mic_found = false;
-	u16 val, hs_comp_res, btn_status = 0;
-	unsigned long retry = 0;
-	int valid_plug_cnt = 0, invalid_plug_cnt = 0;
-	int btn_status_cnt = 0;
-	bool is_check_btn_press = false;
+	/* cancel pending button press */
+	if (wcd_cancel_btn_work(mbhc))
+		pr_debug("%s: button press is canceled\n", __func__);
+	/* cancel correct work function */
+	if (mbhc->mbhc_fn->wcd_cancel_hs_detect_plug)
+		mbhc->mbhc_fn->wcd_cancel_hs_detect_plug(mbhc,
+						&mbhc->correct_plug_swch);
+	else
+		pr_info("%s: hs_detect_plug work not cancelled\n", __func__);
 
-
-	if (mbhc->mbhc_cfg->anc_micbias < MIC_BIAS_1 ||
-	    mbhc->mbhc_cfg->anc_micbias > MIC_BIAS_4)
-		return false;
-
-	if (!mbhc->mbhc_cb->mbhc_micbias_control)
-		return false;
-
-	WCD_MBHC_REG_READ(WCD_MBHC_FSM_EN, val);
-
-	if (val)
-		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
-
-	mbhc->mbhc_cb->mbhc_micbias_control(mbhc->codec,
-					    mbhc->mbhc_cfg->anc_micbias,
-					    MICB_ENABLE);
-	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MUX_CTL, 0x2);
-	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ANC_DET_EN, 1);
-	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+	pr_debug("%s: Report extension cable\n", __func__);
+	wcd_mbhc_report_plug(mbhc, 1, SND_JACK_LINEOUT);
 	/*
-	 * wait for button debounce time 20ms. If 4-pole plug is inserted
-	 * into 5-pole jack, then there will be a button press interrupt
-	 * during anc plug detection. In that case though Hs_comp_res is 0,
-	 * it should not be declared as ANC plug type
+	 * If PA is enabled HPHL schmitt trigger can
+	 * be unreliable, make sure to disable it
 	 */
-	usleep_range(20000, 20100);
-
+	if (test_bit(WCD_MBHC_EVENT_PA_HPHL,
+		&mbhc->event_state))
+		wcd_mbhc_set_and_turnoff_hph_padac(mbhc);
 	/*
-	 * After enabling FSM, to handle slow insertion scenarios,
-	 * check hs_comp_result for few times to see if the IN3 voltage
-	 * is below the Vref
+	 * Disable HPHL trigger and MIC Schmitt triggers.
+	 * Setup for insertion detection.
 	 */
-	do {
-		if (wcd_swch_level_remove(mbhc)) {
-			pr_debug("%s: Switch level is low\n", __func__);
-			goto exit;
-		}
-		pr_debug("%s: Retry attempt %lu\n", __func__, retry + 1);
-		WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
+	wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM,
+			     false);
+	wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_NONE);
+	/* Disable HW FSM */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 3);
 
-		if (!hs_comp_res) {
-			valid_plug_cnt++;
-			is_check_btn_press = true;
-		} else
-			invalid_plug_cnt++;
-		/* Wait 1ms before taking another reading */
-		usleep_range(1000, 1100);
-
-		WCD_MBHC_REG_READ(WCD_MBHC_FSM_STATUS, btn_status);
-		if (btn_status)
-			btn_status_cnt++;
-
-		retry++;
-	} while (retry < ANC_DETECT_RETRY_CNT);
-
-	pr_debug("%s: valid: %d, invalid: %d, btn_status_cnt: %d\n",
-		 __func__, valid_plug_cnt, invalid_plug_cnt, btn_status_cnt);
-
-	/* decision logic */
-	if ((valid_plug_cnt > invalid_plug_cnt) && is_check_btn_press &&
-	    (btn_status_cnt == 0))
-		anc_mic_found = true;
-exit:
-	if (!val)
-		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
-
-	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ANC_DET_EN, 0);
-
-	mbhc->mbhc_cb->mbhc_micbias_control(mbhc->codec,
-					    mbhc->mbhc_cfg->anc_micbias,
-					    MICB_DISABLE);
-	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MUX_CTL, 0x0);
-	pr_debug("%s: anc mic %sfound\n", __func__,
-		 anc_mic_found ? "" : "not ");
-	return anc_mic_found;
+	/* Set the detection type appropriately */
+	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE, 1);
+	wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS,
+			     true);
 }
+EXPORT_SYMBOL(wcd_mbhc_elec_hs_report_unplug);
 
-static void wcd_mbhc_find_plug_and_report(struct wcd_mbhc *mbhc,
-					 enum wcd_mbhc_plug_type plug_type)
+void wcd_mbhc_find_plug_and_report(struct wcd_mbhc *mbhc,
+				   enum wcd_mbhc_plug_type plug_type)
 {
 	bool anc_mic_found = false;
 	enum snd_jack_types jack_type;
@@ -844,9 +758,10 @@
 			wcd_mbhc_report_plug(mbhc, 0, SND_JACK_HEADSET);
 		wcd_mbhc_report_plug(mbhc, 1, SND_JACK_UNSUPPORTED);
 	} else if (plug_type == MBHC_PLUG_TYPE_HEADSET) {
-		if (mbhc->mbhc_cfg->enable_anc_mic_detect)
-			anc_mic_found = wcd_mbhc_detect_anc_plug_type(mbhc);
-
+		if (mbhc->mbhc_cfg->enable_anc_mic_detect &&
+		    mbhc->mbhc_fn->wcd_mbhc_detect_anc_plug_type)
+			anc_mic_found =
+			mbhc->mbhc_fn->wcd_mbhc_detect_anc_plug_type(mbhc);
 		jack_type = SND_JACK_HEADSET;
 		if (anc_mic_found)
 			jack_type = SND_JACK_ANC_HEADPHONE;
@@ -887,622 +802,17 @@
 exit:
 	pr_debug("%s: leave\n", __func__);
 }
-
-/* To determine if cross connection occurred */
-static int wcd_check_cross_conn(struct wcd_mbhc *mbhc)
-{
-	u16 swap_res = 0;
-	enum wcd_mbhc_plug_type plug_type = MBHC_PLUG_TYPE_NONE;
-	s16 reg1 = 0;
-	bool hphl_sch_res = 0, hphr_sch_res = 0;
-
-	if (wcd_swch_level_remove(mbhc)) {
-		pr_debug("%s: Switch level is low\n", __func__);
-		return -EINVAL;
-	}
-
-	/* If PA is enabled, dont check for cross-connection */
-	if (mbhc->mbhc_cb->hph_pa_on_status)
-		if (mbhc->mbhc_cb->hph_pa_on_status(mbhc->codec))
-			return false;
-
-	WCD_MBHC_REG_READ(WCD_MBHC_ELECT_SCHMT_ISRC, reg1);
-	/*
-	 * Check if there is any cross connection,
-	 * Micbias and schmitt trigger (HPHL-HPHR)
-	 * needs to be enabled. For some codecs like wcd9335,
-	 * pull-up will already be enabled when this function
-	 * is called for cross-connection identification. No
-	 * need to enable micbias in that case.
-	 */
-	wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
-	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 2);
-
-	WCD_MBHC_REG_READ(WCD_MBHC_ELECT_RESULT, swap_res);
-	pr_debug("%s: swap_res%x\n", __func__, swap_res);
-
-	/*
-	 * Read reg hphl and hphr schmitt result with cross connection
-	 * bit. These bits will both be "0" in case of cross connection
-	 * otherwise, they stay at 1
-	 */
-	WCD_MBHC_REG_READ(WCD_MBHC_HPHL_SCHMT_RESULT, hphl_sch_res);
-	WCD_MBHC_REG_READ(WCD_MBHC_HPHR_SCHMT_RESULT, hphr_sch_res);
-	if (!(hphl_sch_res || hphr_sch_res)) {
-		plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
-		pr_debug("%s: Cross connection identified\n", __func__);
-	} else {
-		pr_debug("%s: No Cross connection found\n", __func__);
-	}
-
-	/* Disable schmitt trigger and restore micbias */
-	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, reg1);
-	pr_debug("%s: leave, plug type: %d\n", __func__,  plug_type);
-
-	return (plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP) ? true : false;
-}
-
-static bool wcd_is_special_headset(struct wcd_mbhc *mbhc)
-{
-	struct snd_soc_codec *codec = mbhc->codec;
-	int delay = 0, rc;
-	bool ret = false;
-	u16 hs_comp_res;
-	bool is_spl_hs = false;
-
-	/*
-	 * Increase micbias to 2.7V to detect headsets with
-	 * threshold on microphone
-	 */
-	if (mbhc->mbhc_cb->mbhc_micbias_control &&
-	    !mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic) {
-		pr_debug("%s: callback fn micb_ctrl_thr_mic not defined\n",
-			 __func__);
-		return false;
-	} else if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic) {
-		rc = mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(codec,
-							MIC_BIAS_2, true);
-		if (rc) {
-			pr_err("%s: Micbias control for thr mic failed, rc: %d\n",
-				__func__, rc);
-			return false;
-		}
-	}
-
-	wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
-
-	pr_debug("%s: special headset, start register writes\n", __func__);
-
-	WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
-	while (!is_spl_hs)  {
-		if (mbhc->hs_detect_work_stop) {
-			pr_debug("%s: stop requested: %d\n", __func__,
-					mbhc->hs_detect_work_stop);
-			break;
-		}
-		delay = delay + 50;
-		if (mbhc->mbhc_cb->mbhc_common_micb_ctrl) {
-			mbhc->mbhc_cb->mbhc_common_micb_ctrl(codec,
-					MBHC_COMMON_MICB_PRECHARGE,
-					true);
-			mbhc->mbhc_cb->mbhc_common_micb_ctrl(codec,
-					MBHC_COMMON_MICB_SET_VAL,
-					true);
-		}
-		/* Wait for 50msec for MICBIAS to settle down */
-		msleep(50);
-		if (mbhc->mbhc_cb->set_auto_zeroing)
-			mbhc->mbhc_cb->set_auto_zeroing(codec, true);
-		/* Wait for 50msec for FSM to update result values */
-		msleep(50);
-		WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
-		if (!(hs_comp_res)) {
-			pr_debug("%s: Special headset detected in %d msecs\n",
-					__func__, (delay * 2));
-			is_spl_hs = true;
-		}
-		if (delay == SPECIAL_HS_DETECT_TIME_MS) {
-			pr_debug("%s: Spl headset did not get detect in 4 sec\n",
-					__func__);
-			break;
-		}
-	}
-	if (is_spl_hs) {
-		pr_debug("%s: Headset with threshold found\n",  __func__);
-		mbhc->micbias_enable = true;
-		ret = true;
-	}
-	if (mbhc->mbhc_cb->mbhc_common_micb_ctrl)
-		mbhc->mbhc_cb->mbhc_common_micb_ctrl(codec,
-				MBHC_COMMON_MICB_PRECHARGE,
-				false);
-	if (mbhc->mbhc_cb->set_micbias_value && !mbhc->micbias_enable)
-		mbhc->mbhc_cb->set_micbias_value(codec);
-	if (mbhc->mbhc_cb->set_auto_zeroing)
-		mbhc->mbhc_cb->set_auto_zeroing(codec, false);
-
-	if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic &&
-	    !mbhc->micbias_enable)
-		mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(codec, MIC_BIAS_2,
-						      false);
-
-	pr_debug("%s: leave, micb_enable: %d\n", __func__,
-		  mbhc->micbias_enable);
-	return ret;
-}
-
-static void wcd_mbhc_update_fsm_source(struct wcd_mbhc *mbhc,
-				       enum wcd_mbhc_plug_type plug_type)
-{
-	bool micbias2;
-
-	micbias2 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
-							MIC_BIAS_2);
-	switch (plug_type) {
-	case MBHC_PLUG_TYPE_HEADPHONE:
-		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 3);
-		break;
-	case MBHC_PLUG_TYPE_HEADSET:
-	case MBHC_PLUG_TYPE_ANC_HEADPHONE:
-		if (!mbhc->is_hs_recording && !micbias2)
-			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 3);
-		break;
-	default:
-		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 0);
-		break;
-
-	};
-}
-
-static void wcd_enable_mbhc_supply(struct wcd_mbhc *mbhc,
-			enum wcd_mbhc_plug_type plug_type)
-{
-
-	struct snd_soc_codec *codec = mbhc->codec;
-
-	/*
-	 * Do not disable micbias if recording is going on or
-	 * headset is inserted on the other side of the extn
-	 * cable. If headset has been detected current source
-	 * needs to be kept enabled for button detection to work.
-	 * If the accessory type is invalid or unsupported, we
-	 * dont need to enable either of them.
-	 */
-	if (det_extn_cable_en && mbhc->is_extn_cable &&
-		mbhc->mbhc_cb && mbhc->mbhc_cb->extn_use_mb &&
-		mbhc->mbhc_cb->extn_use_mb(codec)) {
-		if (plug_type == MBHC_PLUG_TYPE_HEADPHONE ||
-		    plug_type == MBHC_PLUG_TYPE_HEADSET)
-			wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
-	} else {
-		if (plug_type == MBHC_PLUG_TYPE_HEADSET) {
-			if (mbhc->is_hs_recording || mbhc->micbias_enable)
-				wcd_enable_curr_micbias(mbhc,
-							WCD_MBHC_EN_MB);
-			else if ((test_bit(WCD_MBHC_EVENT_PA_HPHL,
-						&mbhc->event_state)) ||
-				 (test_bit(WCD_MBHC_EVENT_PA_HPHR,
-						&mbhc->event_state)))
-				wcd_enable_curr_micbias(mbhc,
-							WCD_MBHC_EN_PULLUP);
-			else
-				wcd_enable_curr_micbias(mbhc,
-							WCD_MBHC_EN_CS);
-		} else if (plug_type == MBHC_PLUG_TYPE_HEADPHONE) {
-			wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_CS);
-		} else {
-			wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_NONE);
-		}
-	}
-}
-
-static bool wcd_mbhc_check_for_spl_headset(struct wcd_mbhc *mbhc,
-					   int *spl_hs_cnt)
-{
-	u16 hs_comp_res_1_8v = 0, hs_comp_res_2_7v = 0;
-	bool spl_hs = false;
-
-	if (!mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic)
-		goto exit;
-
-	/* Read back hs_comp_res @ 1.8v Micbias */
-	WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res_1_8v);
-	if (!hs_comp_res_1_8v) {
-		spl_hs = false;
-		goto exit;
-	}
-
-	/* Bump up MB2 to 2.7v */
-	mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(mbhc->codec,
-				mbhc->mbhc_cfg->mbhc_micbias, true);
-	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
-	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
-	usleep_range(10000, 10100);
-
-	/* Read back HS_COMP_RESULT */
-	WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res_2_7v);
-	if (!hs_comp_res_2_7v && hs_comp_res_1_8v)
-		spl_hs = true;
-
-	if (spl_hs && spl_hs_cnt)
-		*spl_hs_cnt += 1;
-
-	/* MB2 back to 1.8v */
-	if (*spl_hs_cnt != WCD_MBHC_SPL_HS_CNT) {
-		mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(mbhc->codec,
-				mbhc->mbhc_cfg->mbhc_micbias, false);
-		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
-		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
-		usleep_range(10000, 10100);
-	}
-
-	if (spl_hs)
-		pr_debug("%s: Detected special HS (%d)\n", __func__, spl_hs);
-
-exit:
-	return spl_hs;
-}
-
-static void wcd_correct_swch_plug(struct work_struct *work)
-{
-	struct wcd_mbhc *mbhc;
-	struct snd_soc_codec *codec;
-	enum wcd_mbhc_plug_type plug_type = MBHC_PLUG_TYPE_INVALID;
-	unsigned long timeout;
-	u16 hs_comp_res = 0, hphl_sch = 0, mic_sch = 0, btn_result = 0;
-	bool wrk_complete = false;
-	int pt_gnd_mic_swap_cnt = 0;
-	int no_gnd_mic_swap_cnt = 0;
-	bool is_pa_on = false, spl_hs = false;
-	bool micbias2 = false;
-	bool micbias1 = false;
-	int ret = 0;
-	int rc, spl_hs_count = 0;
-
-	pr_debug("%s: enter\n", __func__);
-
-	mbhc = container_of(work, struct wcd_mbhc, correct_plug_swch);
-	codec = mbhc->codec;
-
-	/*
-	 * Enable micbias/pullup for detection in correct work.
-	 * This work will get scheduled from detect_plug_type which
-	 * will already request for pullup/micbias. If the pullup/micbias
-	 * is handled with ref-counts by individual codec drivers, there is
-	 * no need to enabale micbias/pullup here
-	 */
-
-	wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
-
-
-	if (mbhc->current_plug == MBHC_PLUG_TYPE_GND_MIC_SWAP) {
-		mbhc->current_plug = MBHC_PLUG_TYPE_NONE;
-		goto correct_plug_type;
-	}
-
-	/* Enable HW FSM */
-	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
-	/*
-	 * Check for any button press interrupts before starting 3-sec
-	 * loop.
-	 */
-	rc = wait_for_completion_timeout(&mbhc->btn_press_compl,
-			msecs_to_jiffies(WCD_MBHC_BTN_PRESS_COMPL_TIMEOUT_MS));
-
-	WCD_MBHC_REG_READ(WCD_MBHC_BTN_RESULT, btn_result);
-	WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
-
-	if (!rc) {
-		pr_debug("%s No btn press interrupt\n", __func__);
-		if (!btn_result && !hs_comp_res)
-			plug_type = MBHC_PLUG_TYPE_HEADSET;
-		else if (!btn_result && hs_comp_res)
-			plug_type = MBHC_PLUG_TYPE_HIGH_HPH;
-		else
-			plug_type = MBHC_PLUG_TYPE_INVALID;
-	} else {
-		if (!btn_result && !hs_comp_res)
-			plug_type = MBHC_PLUG_TYPE_HEADPHONE;
-		else
-			plug_type = MBHC_PLUG_TYPE_INVALID;
-	}
-
-	pr_debug("%s: Valid plug found, plug type is %d\n",
-			 __func__, plug_type);
-	if ((plug_type == MBHC_PLUG_TYPE_HEADSET ||
-	     plug_type == MBHC_PLUG_TYPE_HEADPHONE) &&
-	    (!wcd_swch_level_remove(mbhc))) {
-		WCD_MBHC_RSC_LOCK(mbhc);
-		wcd_mbhc_find_plug_and_report(mbhc, plug_type);
-		WCD_MBHC_RSC_UNLOCK(mbhc);
-	}
-
-correct_plug_type:
-
-	timeout = jiffies + msecs_to_jiffies(HS_DETECT_PLUG_TIME_MS);
-	while (!time_after(jiffies, timeout)) {
-		if (mbhc->hs_detect_work_stop) {
-			pr_debug("%s: stop requested: %d\n", __func__,
-					mbhc->hs_detect_work_stop);
-			wcd_enable_curr_micbias(mbhc,
-						WCD_MBHC_EN_NONE);
-			if (mbhc->micbias_enable) {
-				mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
-					mbhc->codec, MIC_BIAS_2, false);
-				if (mbhc->mbhc_cb->set_micbias_value)
-					mbhc->mbhc_cb->set_micbias_value(
-							mbhc->codec);
-				mbhc->micbias_enable = false;
-			}
-			goto exit;
-		}
-		if (mbhc->btn_press_intr) {
-			wcd_cancel_btn_work(mbhc);
-			mbhc->btn_press_intr = false;
-		}
-		/* Toggle FSM */
-		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
-		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
-
-		/* allow sometime and re-check stop requested again */
-		msleep(20);
-		if (mbhc->hs_detect_work_stop) {
-			pr_debug("%s: stop requested: %d\n", __func__,
-					mbhc->hs_detect_work_stop);
-			wcd_enable_curr_micbias(mbhc,
-						WCD_MBHC_EN_NONE);
-			if (mbhc->micbias_enable) {
-				mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
-					mbhc->codec, MIC_BIAS_2, false);
-				if (mbhc->mbhc_cb->set_micbias_value)
-					mbhc->mbhc_cb->set_micbias_value(
-							mbhc->codec);
-				mbhc->micbias_enable = false;
-			}
-			goto exit;
-		}
-		WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
-
-		pr_debug("%s: hs_comp_res: %x\n", __func__, hs_comp_res);
-		if (mbhc->mbhc_cb->hph_pa_on_status)
-			is_pa_on = mbhc->mbhc_cb->hph_pa_on_status(codec);
-
-		/*
-		 * instead of hogging system by contineous polling, wait for
-		 * sometime and re-check stop request again.
-		 */
-		msleep(180);
-		if (hs_comp_res && (spl_hs_count < WCD_MBHC_SPL_HS_CNT)) {
-			spl_hs = wcd_mbhc_check_for_spl_headset(mbhc,
-								&spl_hs_count);
-
-			if (spl_hs_count == WCD_MBHC_SPL_HS_CNT) {
-				hs_comp_res = 0;
-				spl_hs = true;
-				mbhc->micbias_enable = true;
-			}
-		}
-
-		if ((!hs_comp_res) && (!is_pa_on)) {
-			/* Check for cross connection*/
-			ret = wcd_check_cross_conn(mbhc);
-			if (ret < 0) {
-				continue;
-			} else if (ret > 0) {
-				pt_gnd_mic_swap_cnt++;
-				no_gnd_mic_swap_cnt = 0;
-				if (pt_gnd_mic_swap_cnt <
-						GND_MIC_SWAP_THRESHOLD) {
-					continue;
-				} else if (pt_gnd_mic_swap_cnt >
-						GND_MIC_SWAP_THRESHOLD) {
-					/*
-					 * This is due to GND/MIC switch didn't
-					 * work,  Report unsupported plug.
-					 */
-					pr_debug("%s: switch did not work\n",
-						  __func__);
-					plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
-					goto report;
-				} else {
-					plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
-				}
-			} else {
-				no_gnd_mic_swap_cnt++;
-				pt_gnd_mic_swap_cnt = 0;
-				plug_type = MBHC_PLUG_TYPE_HEADSET;
-				if ((no_gnd_mic_swap_cnt <
-				    GND_MIC_SWAP_THRESHOLD) &&
-				    (spl_hs_count != WCD_MBHC_SPL_HS_CNT)) {
-					continue;
-				} else {
-					no_gnd_mic_swap_cnt = 0;
-				}
-			}
-			if ((pt_gnd_mic_swap_cnt == GND_MIC_SWAP_THRESHOLD) &&
-				(plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP)) {
-				/*
-				 * if switch is toggled, check again,
-				 * otherwise report unsupported plug
-				 */
-				if (mbhc->mbhc_cfg->swap_gnd_mic &&
-					mbhc->mbhc_cfg->swap_gnd_mic(codec)) {
-					pr_debug("%s: US_EU gpio present,flip switch\n"
-						, __func__);
-					continue;
-				}
-			}
-		}
-
-		WCD_MBHC_REG_READ(WCD_MBHC_HPHL_SCHMT_RESULT, hphl_sch);
-		WCD_MBHC_REG_READ(WCD_MBHC_MIC_SCHMT_RESULT, mic_sch);
-		if (hs_comp_res && !(hphl_sch || mic_sch)) {
-			pr_debug("%s: cable is extension cable\n", __func__);
-			plug_type = MBHC_PLUG_TYPE_HIGH_HPH;
-			wrk_complete = true;
-		} else {
-			pr_debug("%s: cable might be headset: %d\n", __func__,
-					plug_type);
-			if (!(plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP)) {
-				plug_type = MBHC_PLUG_TYPE_HEADSET;
-				/*
-				 * Report headset only if not already reported
-				 * and if there is not button press without
-				 * release
-				 */
-				if (((mbhc->current_plug !=
-				      MBHC_PLUG_TYPE_HEADSET) &&
-				     (mbhc->current_plug !=
-				      MBHC_PLUG_TYPE_ANC_HEADPHONE)) &&
-				    !wcd_swch_level_remove(mbhc) &&
-				    !mbhc->btn_press_intr) {
-					pr_debug("%s: cable is %sheadset\n",
-						__func__,
-						((spl_hs_count ==
-							WCD_MBHC_SPL_HS_CNT) ?
-							"special ":""));
-					goto report;
-				}
-			}
-			wrk_complete = false;
-		}
-	}
-	if (!wrk_complete && mbhc->btn_press_intr) {
-		pr_debug("%s: Can be slow insertion of headphone\n", __func__);
-		wcd_cancel_btn_work(mbhc);
-		plug_type = MBHC_PLUG_TYPE_HEADPHONE;
-	}
-	/*
-	 * If plug_tye is headset, we might have already reported either in
-	 * detect_plug-type or in above while loop, no need to report again
-	 */
-	if (!wrk_complete && ((plug_type == MBHC_PLUG_TYPE_HEADSET) ||
-	    (plug_type == MBHC_PLUG_TYPE_ANC_HEADPHONE))) {
-		pr_debug("%s: plug_type:0x%x already reported\n",
-			 __func__, mbhc->current_plug);
-		goto enable_supply;
-	}
-
-	if (plug_type == MBHC_PLUG_TYPE_HIGH_HPH &&
-		(!det_extn_cable_en)) {
-		if (wcd_is_special_headset(mbhc)) {
-			pr_debug("%s: Special headset found %d\n",
-					__func__, plug_type);
-			plug_type = MBHC_PLUG_TYPE_HEADSET;
-			goto report;
-		}
-	}
-
-report:
-	if (wcd_swch_level_remove(mbhc)) {
-		pr_debug("%s: Switch level is low\n", __func__);
-		goto exit;
-	}
-	if (plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP && mbhc->btn_press_intr) {
-		pr_debug("%s: insertion of headphone with swap\n", __func__);
-		wcd_cancel_btn_work(mbhc);
-		plug_type = MBHC_PLUG_TYPE_HEADPHONE;
-	}
-	pr_debug("%s: Valid plug found, plug type %d wrk_cmpt %d btn_intr %d\n",
-			__func__, plug_type, wrk_complete,
-			mbhc->btn_press_intr);
-	WCD_MBHC_RSC_LOCK(mbhc);
-	wcd_mbhc_find_plug_and_report(mbhc, plug_type);
-	WCD_MBHC_RSC_UNLOCK(mbhc);
-enable_supply:
-	if (mbhc->mbhc_cb->mbhc_micbias_control)
-		wcd_mbhc_update_fsm_source(mbhc, plug_type);
-	else
-		wcd_enable_mbhc_supply(mbhc, plug_type);
-exit:
-	if (mbhc->mbhc_cb->mbhc_micbias_control &&
-	    !mbhc->micbias_enable)
-		mbhc->mbhc_cb->mbhc_micbias_control(codec, MIC_BIAS_2,
-						    MICB_DISABLE);
-	if (mbhc->mbhc_cb->micbias_enable_status) {
-		micbias1 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
-								MIC_BIAS_1);
-		micbias2 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
-								MIC_BIAS_2);
-	}
-
-	if (mbhc->mbhc_cfg->detect_extn_cable &&
-	    ((plug_type == MBHC_PLUG_TYPE_HEADPHONE) ||
-	     (plug_type == MBHC_PLUG_TYPE_HEADSET)) &&
-	    !mbhc->hs_detect_work_stop) {
-		WCD_MBHC_RSC_LOCK(mbhc);
-		wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM, true);
-		WCD_MBHC_RSC_UNLOCK(mbhc);
-	}
-	if (mbhc->mbhc_cb->set_cap_mode)
-		mbhc->mbhc_cb->set_cap_mode(codec, micbias1, micbias2);
-
-	if (mbhc->mbhc_cb->hph_pull_down_ctrl)
-		mbhc->mbhc_cb->hph_pull_down_ctrl(codec, true);
-
-	mbhc->mbhc_cb->lock_sleep(mbhc, false);
-	pr_debug("%s: leave\n", __func__);
-}
-
-/* called under codec_resource_lock acquisition */
-static void wcd_mbhc_detect_plug_type(struct wcd_mbhc *mbhc)
-{
-	struct snd_soc_codec *codec = mbhc->codec;
-	enum wcd_mbhc_plug_type plug_type;
-	bool micbias1 = false;
-	int cross_conn;
-	int try = 0;
-
-	pr_debug("%s: enter\n", __func__);
-	WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
-
-	if (mbhc->mbhc_cb->hph_pull_down_ctrl)
-		mbhc->mbhc_cb->hph_pull_down_ctrl(codec, false);
-
-	if (mbhc->mbhc_cb->micbias_enable_status)
-		micbias1 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
-								MIC_BIAS_1);
-
-	if (mbhc->mbhc_cb->set_cap_mode)
-		mbhc->mbhc_cb->set_cap_mode(codec, micbias1, true);
-
-	if (mbhc->mbhc_cb->mbhc_micbias_control)
-		mbhc->mbhc_cb->mbhc_micbias_control(codec, MIC_BIAS_2,
-						    MICB_ENABLE);
-	else
-		wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
-
-	do {
-		cross_conn = wcd_check_cross_conn(mbhc);
-		try++;
-	} while (try < GND_MIC_SWAP_THRESHOLD);
-
-	if (cross_conn > 0) {
-		pr_debug("%s: cross con found, start polling\n",
-			 __func__);
-		plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
-		if (!mbhc->current_plug)
-			mbhc->current_plug = plug_type;
-		pr_debug("%s: Plug found, plug type is %d\n",
-			 __func__, plug_type);
-	}
-
-	/* Re-initialize button press completion object */
-	reinit_completion(&mbhc->btn_press_compl);
-	wcd_schedule_hs_detect_plug(mbhc, &mbhc->correct_plug_swch);
-	pr_debug("%s: leave\n", __func__);
-}
+EXPORT_SYMBOL(wcd_mbhc_find_plug_and_report);
 
 static void wcd_mbhc_swch_irq_handler(struct wcd_mbhc *mbhc)
 {
 	bool detection_type = 0;
 	bool micbias1 = false;
 	struct snd_soc_codec *codec = mbhc->codec;
+	enum snd_jack_types jack_type;
 
 	dev_dbg(codec->dev, "%s: enter\n", __func__);
-
 	WCD_MBHC_RSC_LOCK(mbhc);
-
 	mbhc->in_swch_irq_handler = true;
 
 	/* cancel pending button press */
@@ -1517,7 +827,11 @@
 
 	pr_debug("%s: mbhc->current_plug: %d detection_type: %d\n", __func__,
 			mbhc->current_plug, detection_type);
-	wcd_cancel_hs_detect_plug(mbhc, &mbhc->correct_plug_swch);
+	if (mbhc->mbhc_fn->wcd_cancel_hs_detect_plug)
+		mbhc->mbhc_fn->wcd_cancel_hs_detect_plug(mbhc,
+						&mbhc->correct_plug_swch);
+	else
+		pr_info("%s: hs_detect_plug work not cancelled\n", __func__);
 
 	if (mbhc->mbhc_cb->micbias_enable_status)
 		micbias1 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
@@ -1549,7 +863,9 @@
 		if (mbhc->mbhc_cb->enable_mb_source)
 			mbhc->mbhc_cb->enable_mb_source(mbhc, true);
 		mbhc->btn_press_intr = false;
-		wcd_mbhc_detect_plug_type(mbhc);
+		mbhc->is_btn_press = false;
+		if (mbhc->mbhc_fn)
+			mbhc->mbhc_fn->wcd_mbhc_detect_plug_type(mbhc);
 	} else if ((mbhc->current_plug != MBHC_PLUG_TYPE_NONE)
 			&& !detection_type) {
 		/* Disable external voltage source to micbias if present */
@@ -1566,54 +882,42 @@
 			mbhc->mbhc_cb->set_cap_mode(codec, micbias1, false);
 
 		mbhc->btn_press_intr = false;
-		if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADPHONE) {
-			wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM,
-					     false);
-			wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS,
-					     false);
-			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE,
-						 1);
-			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
-			wcd_mbhc_report_plug(mbhc, 0, SND_JACK_HEADPHONE);
-		} else if (mbhc->current_plug == MBHC_PLUG_TYPE_GND_MIC_SWAP) {
-			wcd_mbhc_report_plug(mbhc, 0, SND_JACK_UNSUPPORTED);
-		} else if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADSET) {
+		mbhc->is_btn_press = false;
+		switch (mbhc->current_plug) {
+		case MBHC_PLUG_TYPE_HEADPHONE:
+			jack_type = SND_JACK_HEADPHONE;
+			break;
+		case MBHC_PLUG_TYPE_GND_MIC_SWAP:
+			jack_type = SND_JACK_UNSUPPORTED;
+			break;
+		case MBHC_PLUG_TYPE_HEADSET:
 			/* make sure to turn off Rbias */
 			if (mbhc->mbhc_cb->micb_internal)
 				mbhc->mbhc_cb->micb_internal(codec, 1, false);
-
 			/* Pulldown micbias */
 			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_PULLDOWN_CTRL, 1);
-			wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM,
-					     false);
-			wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS,
-					     false);
-			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE,
-						 1);
-			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
-			wcd_mbhc_report_plug(mbhc, 0, SND_JACK_HEADSET);
-		} else if (mbhc->current_plug == MBHC_PLUG_TYPE_HIGH_HPH) {
+			jack_type = SND_JACK_HEADSET;
+			break;
+		case MBHC_PLUG_TYPE_HIGH_HPH:
 			mbhc->is_extn_cable = false;
-			wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM,
-					     false);
-			wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS,
-					     false);
-			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE,
-						 1);
-			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
-			wcd_mbhc_report_plug(mbhc, 0, SND_JACK_LINEOUT);
-		} else if (mbhc->current_plug == MBHC_PLUG_TYPE_ANC_HEADPHONE) {
-			mbhc->mbhc_cb->irq_control(codec,
-					mbhc->intr_ids->mbhc_hs_rem_intr,
-					false);
-			mbhc->mbhc_cb->irq_control(codec,
-					mbhc->intr_ids->mbhc_hs_ins_intr,
-					false);
-			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE,
-						 0);
-			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
-			wcd_mbhc_report_plug(mbhc, 0, SND_JACK_ANC_HEADPHONE);
+			jack_type = SND_JACK_LINEOUT;
+			break;
+		case MBHC_PLUG_TYPE_ANC_HEADPHONE:
+			jack_type = SND_JACK_ANC_HEADPHONE;
+			break;
+		default:
+			pr_info("%s: Invalid current plug: %d\n",
+				__func__, mbhc->current_plug);
+			jack_type = SND_JACK_UNSUPPORTED;
+			break;
 		}
+		wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM, false);
+		wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS, false);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE, 1);
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
+		mbhc->extn_cable_hph_rem = false;
+		wcd_mbhc_report_plug(mbhc, 0, jack_type);
+
 	} else if (!detection_type) {
 		/* Disable external voltage source to micbias if present */
 		if (mbhc->mbhc_cb->enable_mb_source)
@@ -1621,6 +925,7 @@
 		/* Disable HW FSM */
 		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
 		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 0);
+		mbhc->extn_cable_hph_rem = false;
 	}
 
 	mbhc->in_swch_irq_handler = false;
@@ -1646,7 +951,7 @@
 	return r;
 }
 
-static int wcd_mbhc_get_button_mask(struct wcd_mbhc *mbhc)
+int wcd_mbhc_get_button_mask(struct wcd_mbhc *mbhc)
 {
 	int mask = 0;
 	int btn;
@@ -1678,202 +983,7 @@
 
 	return mask;
 }
-
-static irqreturn_t wcd_mbhc_hs_ins_irq(int irq, void *data)
-{
-	struct wcd_mbhc *mbhc = data;
-	bool detection_type = 0, hphl_sch = 0, mic_sch = 0;
-	u16 elect_result = 0;
-	static u16 hphl_trigerred;
-	static u16 mic_trigerred;
-
-	pr_debug("%s: enter\n", __func__);
-	if (!mbhc->mbhc_cfg->detect_extn_cable) {
-		pr_debug("%s: Returning as Extension cable feature not enabled\n",
-			__func__);
-		return IRQ_HANDLED;
-	}
-	WCD_MBHC_RSC_LOCK(mbhc);
-
-	WCD_MBHC_REG_READ(WCD_MBHC_ELECT_DETECTION_TYPE, detection_type);
-	WCD_MBHC_REG_READ(WCD_MBHC_ELECT_RESULT, elect_result);
-
-	pr_debug("%s: detection_type %d, elect_result %x\n", __func__,
-				detection_type, elect_result);
-	if (detection_type) {
-		/* check if both Left and MIC Schmitt triggers are triggered */
-		WCD_MBHC_REG_READ(WCD_MBHC_HPHL_SCHMT_RESULT, hphl_sch);
-		WCD_MBHC_REG_READ(WCD_MBHC_MIC_SCHMT_RESULT, mic_sch);
-		if (hphl_sch && mic_sch) {
-			/* Go for plug type determination */
-			pr_debug("%s: Go for plug type determination\n",
-				  __func__);
-			goto determine_plug;
-
-		} else {
-			if (mic_sch) {
-				mic_trigerred++;
-				pr_debug("%s: Insertion MIC trigerred %d\n",
-					 __func__, mic_trigerred);
-				WCD_MBHC_REG_UPDATE_BITS(
-						WCD_MBHC_ELECT_SCHMT_ISRC,
-						0);
-				msleep(20);
-				WCD_MBHC_REG_UPDATE_BITS(
-						WCD_MBHC_ELECT_SCHMT_ISRC,
-						1);
-			}
-			if (hphl_sch) {
-				hphl_trigerred++;
-				pr_debug("%s: Insertion HPHL trigerred %d\n",
-					 __func__, hphl_trigerred);
-			}
-			if (mic_trigerred && hphl_trigerred) {
-				/* Go for plug type determination */
-				pr_debug("%s: Go for plug type determination\n",
-					 __func__);
-				goto determine_plug;
-			}
-		}
-	}
-	WCD_MBHC_RSC_UNLOCK(mbhc);
-	pr_debug("%s: leave\n", __func__);
-	return IRQ_HANDLED;
-
-determine_plug:
-	/*
-	 * Disable HPHL trigger and MIC Schmitt triggers.
-	 * Setup for insertion detection.
-	 */
-	pr_debug("%s: Disable insertion interrupt\n", __func__);
-	wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS,
-			     false);
-
-	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
-	hphl_trigerred = 0;
-	mic_trigerred = 0;
-	mbhc->is_extn_cable = true;
-	mbhc->btn_press_intr = false;
-	wcd_mbhc_detect_plug_type(mbhc);
-	WCD_MBHC_RSC_UNLOCK(mbhc);
-	pr_debug("%s: leave\n", __func__);
-	return IRQ_HANDLED;
-}
-
-static irqreturn_t wcd_mbhc_hs_rem_irq(int irq, void *data)
-{
-	struct wcd_mbhc *mbhc = data;
-	u8 hs_comp_result = 0, hphl_sch = 0, mic_sch = 0;
-	static u16 hphl_trigerred;
-	static u16 mic_trigerred;
-	unsigned long timeout;
-	bool removed = true;
-	int retry = 0;
-
-	pr_debug("%s: enter\n", __func__);
-
-	WCD_MBHC_RSC_LOCK(mbhc);
-
-	timeout = jiffies +
-		  msecs_to_jiffies(WCD_FAKE_REMOVAL_MIN_PERIOD_MS);
-	do {
-		retry++;
-		/*
-		 * read the result register every 10ms to look for
-		 * any change in HS_COMP_RESULT bit
-		 */
-		usleep_range(10000, 10100);
-		WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_result);
-		pr_debug("%s: Check result reg for fake removal: hs_comp_res %x\n",
-			 __func__, hs_comp_result);
-		if ((!hs_comp_result) &&
-		    retry > FAKE_REM_RETRY_ATTEMPTS) {
-			removed = false;
-			break;
-		}
-	} while (!time_after(jiffies, timeout));
-
-	if (wcd_swch_level_remove(mbhc)) {
-		pr_debug("%s: Switch level is low ", __func__);
-		goto exit;
-	}
-	pr_debug("%s: headset %s actually removed\n", __func__,
-		removed ? "" : "not ");
-
-	WCD_MBHC_REG_READ(WCD_MBHC_HPHL_SCHMT_RESULT, hphl_sch);
-	WCD_MBHC_REG_READ(WCD_MBHC_MIC_SCHMT_RESULT, mic_sch);
-	WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_result);
-
-	if (removed) {
-		if (!(hphl_sch && mic_sch && hs_comp_result)) {
-			/*
-			 * extension cable is still plugged in
-			 * report it as LINEOUT device
-			 */
-			goto report_unplug;
-		} else {
-			if (!mic_sch) {
-				mic_trigerred++;
-				pr_debug("%s: Removal MIC trigerred %d\n",
-					 __func__, mic_trigerred);
-			}
-			if (!hphl_sch) {
-				hphl_trigerred++;
-				pr_debug("%s: Removal HPHL trigerred %d\n",
-					 __func__, hphl_trigerred);
-			}
-			if (mic_trigerred && hphl_trigerred) {
-				/*
-				 * extension cable is still plugged in
-				 * report it as LINEOUT device
-				 */
-				goto report_unplug;
-			}
-		}
-	}
-exit:
-	WCD_MBHC_RSC_UNLOCK(mbhc);
-	pr_debug("%s: leave\n", __func__);
-	return IRQ_HANDLED;
-
-report_unplug:
-
-	/* cancel pending button press */
-	if (wcd_cancel_btn_work(mbhc))
-		pr_debug("%s: button press is canceled\n", __func__);
-	/* cancel correct work function */
-	wcd_cancel_hs_detect_plug(mbhc, &mbhc->correct_plug_swch);
-
-	pr_debug("%s: Report extension cable\n", __func__);
-	wcd_mbhc_report_plug(mbhc, 1, SND_JACK_LINEOUT);
-	/*
-	 * If PA is enabled HPHL schmitt trigger can
-	 * be unreliable, make sure to disable it
-	 */
-	if (test_bit(WCD_MBHC_EVENT_PA_HPHL,
-		&mbhc->event_state))
-		wcd_mbhc_set_and_turnoff_hph_padac(mbhc);
-	/*
-	 * Disable HPHL trigger and MIC Schmitt triggers.
-	 * Setup for insertion detection.
-	 */
-	wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM,
-			     false);
-	wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_NONE);
-	/* Disable HW FSM */
-	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
-	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 3);
-
-	/* Set the detection type appropriately */
-	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE, 1);
-	wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS,
-			     true);
-	hphl_trigerred = 0;
-	mic_trigerred = 0;
-	WCD_MBHC_RSC_UNLOCK(mbhc);
-	pr_debug("%s: leave\n", __func__);
-	return IRQ_HANDLED;
-}
+EXPORT_SYMBOL(wcd_mbhc_get_button_mask);
 
 static void wcd_btn_lpress_fn(struct work_struct *work)
 {
@@ -1930,15 +1040,13 @@
 	pr_debug("%s: enter\n", __func__);
 	complete(&mbhc->btn_press_compl);
 	WCD_MBHC_RSC_LOCK(mbhc);
-	/* send event to sw intr handler*/
-	mbhc->is_btn_press = true;
 	wcd_cancel_btn_work(mbhc);
 	if (wcd_swch_level_remove(mbhc)) {
 		pr_debug("%s: Switch level is low ", __func__);
 		goto done;
 	}
-	mbhc->btn_press_intr = true;
 
+	mbhc->is_btn_press = true;
 	msec_val = jiffies_to_msecs(jiffies - mbhc->jiffies_atreport);
 	pr_debug("%s: msec_val = %ld\n", __func__, msec_val);
 	if (msec_val < MBHC_BUTTON_PRESS_THRESHOLD_MIN) {
@@ -1952,12 +1060,15 @@
 			 __func__);
 		goto done;
 	}
+	mask = wcd_mbhc_get_button_mask(mbhc);
+	if (mask == SND_JACK_BTN_0)
+		mbhc->btn_press_intr = true;
+
 	if (mbhc->current_plug != MBHC_PLUG_TYPE_HEADSET) {
 		pr_debug("%s: Plug isn't headset, ignore button press\n",
 				__func__);
 		goto done;
 	}
-	mask = wcd_mbhc_get_button_mask(mbhc);
 	mbhc->buttons_pressed |= mask;
 	mbhc->mbhc_cb->lock_sleep(mbhc, true);
 	if (schedule_delayed_work(&mbhc->mbhc_btn_dwork,
@@ -1983,8 +1094,8 @@
 		goto exit;
 	}
 
-	if (mbhc->btn_press_intr) {
-		mbhc->btn_press_intr = false;
+	if (mbhc->is_btn_press) {
+		mbhc->is_btn_press = false;
 	} else {
 		pr_debug("%s: This release is for fake btn press\n", __func__);
 		goto exit;
@@ -1994,8 +1105,11 @@
 	 * If current plug is headphone then there is no chance to
 	 * get btn release interrupt, so connected cable should be
 	 * headset not headphone.
+	 * For ADC MBHC, ADC_COMPLETE interrupt will be generated
+	 * in this case. So skip the check here.
 	 */
-	if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADPHONE) {
+	if (!WCD_MBHC_DETECTION &&
+		mbhc->current_plug == MBHC_PLUG_TYPE_HEADPHONE) {
 		wcd_mbhc_find_plug_and_report(mbhc, MBHC_PLUG_TYPE_HEADSET);
 		goto exit;
 
@@ -2128,6 +1242,22 @@
 	if (mbhc->mbhc_cfg->moisture_en && mbhc->mbhc_cb->mbhc_moisture_config)
 		mbhc->mbhc_cb->mbhc_moisture_config(mbhc);
 
+	/*
+	 * For USB analog we need to override the switch configuration.
+	 * Also, disable hph_l pull-up current source as HS_DET_L is driven
+	 * by an external source
+	 */
+	if (mbhc->mbhc_cfg->enable_usbc_analog) {
+		mbhc->hphl_swh = 1;
+		mbhc->gnd_swh = 1;
+
+		if (mbhc->mbhc_cb->hph_pull_up_control)
+			mbhc->mbhc_cb->hph_pull_up_control(codec, I_OFF);
+		else
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HS_L_DET_PULL_UP_CTRL,
+						 0);
+	}
+
 	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HPHL_PLUG_TYPE, mbhc->hphl_swh);
 	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_GND_PLUG_TYPE, mbhc->gnd_swh);
 	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_SW_HPH_LP_100K_TO_GND, 1);
@@ -2136,8 +1266,14 @@
 	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HS_L_DET_PULL_UP_COMP_CTRL, 1);
 	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_L_DET_EN, 1);
 
-	/* Insertion debounce set to 96ms */
-	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_INSREM_DBNC, 6);
+	if (mbhc->mbhc_cfg->enable_usbc_analog) {
+		/* Insertion debounce set to 48ms */
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_INSREM_DBNC, 4);
+	} else {
+		/* Insertion debounce set to 96ms */
+		WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_INSREM_DBNC, 6);
+	}
+
 	/* Button Debounce set to 16ms */
 	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_DBNC, 2);
 
@@ -2155,7 +1291,6 @@
 
 	wcd_program_btn_threshold(mbhc, false);
 
-	INIT_WORK(&mbhc->correct_plug_swch, wcd_correct_swch_plug);
 
 	init_completion(&mbhc->btn_press_compl);
 
@@ -2245,7 +1380,7 @@
 	(void) wcd_mbhc_initialise(mbhc);
 }
 
-int wcd_mbhc_set_keycode(struct wcd_mbhc *mbhc)
+static int wcd_mbhc_set_keycode(struct wcd_mbhc *mbhc)
 {
 	enum snd_jack_types type;
 	int i, ret, result = 0;
@@ -2300,15 +1435,280 @@
 	return result;
 }
 
-int wcd_mbhc_start(struct wcd_mbhc *mbhc,
-		       struct wcd_mbhc_config *mbhc_cfg)
+static int wcd_mbhc_usb_c_analog_setup_gpios(struct wcd_mbhc *mbhc,
+					     bool active)
 {
 	int rc = 0;
+	struct usbc_ana_audio_config *config =
+		&mbhc->mbhc_cfg->usbc_analog_cfg;
+	union power_supply_propval pval;
 
-	pr_debug("%s: enter\n", __func__);
+	dev_dbg(mbhc->codec->dev, "%s: setting GPIOs active = %d\n",
+		__func__, active);
+
+	memset(&pval, 0, sizeof(pval));
+
+	if (active) {
+		pval.intval = POWER_SUPPLY_TYPEC_PR_SOURCE;
+		if (power_supply_set_property(mbhc->usb_psy,
+				POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &pval))
+			dev_info(mbhc->codec->dev, "%s: force PR_SOURCE mode unsuccessful\n",
+				 __func__);
+		else
+			mbhc->usbc_force_pr_mode = true;
+
+		if (config->usbc_en1_gpio_p)
+			rc = msm_cdc_pinctrl_select_active_state(
+				config->usbc_en1_gpio_p);
+		if (rc == 0 && config->usbc_en2n_gpio_p)
+			rc = msm_cdc_pinctrl_select_active_state(
+				config->usbc_en2n_gpio_p);
+		if (rc == 0 && config->usbc_force_gpio_p)
+			rc = msm_cdc_pinctrl_select_active_state(
+				config->usbc_force_gpio_p);
+		mbhc->usbc_mode = POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER;
+	} else {
+		/* no delay is required when disabling GPIOs */
+		if (config->usbc_en2n_gpio_p)
+			msm_cdc_pinctrl_select_sleep_state(
+				config->usbc_en2n_gpio_p);
+		if (config->usbc_en1_gpio_p)
+			msm_cdc_pinctrl_select_sleep_state(
+				config->usbc_en1_gpio_p);
+		if (config->usbc_force_gpio_p)
+			msm_cdc_pinctrl_select_sleep_state(
+				config->usbc_force_gpio_p);
+
+		if (mbhc->usbc_force_pr_mode) {
+			pval.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+			if (power_supply_set_property(mbhc->usb_psy,
+				POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &pval))
+				dev_info(mbhc->codec->dev, "%s: force PR_DUAL mode unsuccessful\n",
+					 __func__);
+
+			mbhc->usbc_force_pr_mode = false;
+		}
+
+		mbhc->usbc_mode = POWER_SUPPLY_TYPEC_NONE;
+	}
+
+	return rc;
+}
+
+/* workqueue */
+static void wcd_mbhc_usbc_analog_work_fn(struct work_struct *work)
+{
+	struct wcd_mbhc *mbhc =
+		container_of(work, struct wcd_mbhc, usbc_analog_work);
+
+	wcd_mbhc_usb_c_analog_setup_gpios(mbhc,
+			mbhc->usbc_mode != POWER_SUPPLY_TYPEC_NONE);
+}
+
+/* this callback function is used to process PMI notification */
+static int wcd_mbhc_usb_c_event_changed(struct notifier_block *nb,
+					unsigned long evt, void *ptr)
+{
+	int ret;
+	union power_supply_propval mode;
+	struct wcd_mbhc *mbhc = container_of(nb, struct wcd_mbhc, psy_nb);
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	if (ptr != mbhc->usb_psy || evt != PSY_EVENT_PROP_CHANGED)
+		return 0;
+
+	ret = power_supply_get_property(mbhc->usb_psy,
+			POWER_SUPPLY_PROP_TYPEC_MODE, &mode);
+	if (ret) {
+		dev_err(codec->dev, "%s: Unable to read USB TYPEC_MODE: %d\n",
+			__func__, ret);
+		return ret;
+	}
+
+	dev_dbg(codec->dev, "%s: USB change event received\n",
+		__func__);
+	dev_dbg(codec->dev, "%s: supply mode %d, expected %d\n", __func__,
+		mode.intval, POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER);
+
+	switch (mode.intval) {
+	case POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER:
+	case POWER_SUPPLY_TYPEC_NONE:
+		dev_dbg(codec->dev, "%s: usbc_mode: %d; mode.intval: %d\n",
+			__func__, mbhc->usbc_mode, mode.intval);
+
+		if (mbhc->usbc_mode == mode.intval)
+			break; /* filter notifications received before */
+		mbhc->usbc_mode = mode.intval;
+
+		dev_dbg(codec->dev, "%s: queueing usbc_analog_work\n",
+			__func__);
+		schedule_work(&mbhc->usbc_analog_work);
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+/* PMI registration code */
+static int wcd_mbhc_usb_c_analog_init(struct wcd_mbhc *mbhc)
+{
+	int ret = 0;
+	struct snd_soc_codec *codec = mbhc->codec;
+
+	dev_dbg(mbhc->codec->dev, "%s: usb-c analog setup start\n", __func__);
+	INIT_WORK(&mbhc->usbc_analog_work, wcd_mbhc_usbc_analog_work_fn);
+
+	mbhc->usb_psy = power_supply_get_by_name("usb");
+	if (IS_ERR_OR_NULL(mbhc->usb_psy)) {
+		dev_err(codec->dev, "%s: could not get USB psy info\n",
+			__func__);
+		ret = -EPROBE_DEFER;
+		if (IS_ERR(mbhc->usb_psy))
+			ret = PTR_ERR(mbhc->usb_psy);
+		mbhc->usb_psy = NULL;
+		goto err;
+	}
+
+	ret = wcd_mbhc_usb_c_analog_setup_gpios(mbhc, false);
+	if (ret) {
+		dev_err(codec->dev, "%s: error while setting USBC ana gpios\n",
+			__func__);
+		goto err;
+	}
+
+	mbhc->psy_nb.notifier_call = wcd_mbhc_usb_c_event_changed;
+	mbhc->psy_nb.priority = 0;
+	ret = power_supply_reg_notifier(&mbhc->psy_nb);
+	if (ret) {
+		dev_err(codec->dev, "%s: power supply registration failed\n",
+			__func__);
+		goto err;
+	}
+
+	/*
+	 * as part of the init sequence check if there is a connected
+	 * USB C analog adapter
+	 */
+	dev_dbg(mbhc->codec->dev, "%s: verify if USB adapter is already inserted\n",
+		__func__);
+	ret = wcd_mbhc_usb_c_event_changed(&mbhc->psy_nb,
+					   PSY_EVENT_PROP_CHANGED,
+					   mbhc->usb_psy);
+
+err:
+	return ret;
+}
+
+static int wcd_mbhc_usb_c_analog_deinit(struct wcd_mbhc *mbhc)
+{
+	wcd_mbhc_usb_c_analog_setup_gpios(mbhc, false);
+
+	/* deregister from PMI */
+	power_supply_unreg_notifier(&mbhc->psy_nb);
+
+	return 0;
+}
+
+static int wcd_mbhc_init_gpio(struct wcd_mbhc *mbhc,
+			      struct wcd_mbhc_config *mbhc_cfg,
+			      const char *gpio_dt_str,
+			      int *gpio, struct device_node **gpio_dn)
+{
+	int rc = 0;
+	struct snd_soc_codec *codec = mbhc->codec;
+	struct snd_soc_card *card = codec->component.card;
+
+	dev_dbg(mbhc->codec->dev, "%s: gpio %s\n", __func__, gpio_dt_str);
+
+	*gpio_dn = of_parse_phandle(card->dev->of_node, gpio_dt_str, 0);
+
+	if (!(*gpio_dn)) {
+		*gpio = of_get_named_gpio(card->dev->of_node, gpio_dt_str, 0);
+		if (!gpio_is_valid(*gpio)) {
+			dev_err(card->dev, "%s, property %s not in node %s",
+				__func__, gpio_dt_str,
+				card->dev->of_node->full_name);
+			rc = -EINVAL;
+		}
+	}
+
+	return rc;
+}
+
+int wcd_mbhc_start(struct wcd_mbhc *mbhc, struct wcd_mbhc_config *mbhc_cfg)
+{
+	int rc = 0;
+	struct usbc_ana_audio_config *config;
+	struct snd_soc_codec *codec;
+	struct snd_soc_card *card;
+	const char *usb_c_dt = "qcom,msm-mbhc-usbc-audio-supported";
+
+	if (!mbhc || !mbhc_cfg)
+		return -EINVAL;
+
+	config = &mbhc_cfg->usbc_analog_cfg;
+	codec = mbhc->codec;
+	card = codec->component.card;
+
 	/* update the mbhc config */
 	mbhc->mbhc_cfg = mbhc_cfg;
 
+	dev_dbg(mbhc->codec->dev, "%s: enter\n", __func__);
+
+	/* check if USB C analog is defined on device tree */
+	mbhc_cfg->enable_usbc_analog = 0;
+	if (of_find_property(card->dev->of_node, usb_c_dt, NULL)) {
+		rc = of_property_read_u32(card->dev->of_node, usb_c_dt,
+				&mbhc_cfg->enable_usbc_analog);
+	}
+	if (mbhc_cfg->enable_usbc_analog == 0 || rc != 0) {
+		dev_info(card->dev,
+				"%s: %s in dt node is missing or false\n",
+				__func__, usb_c_dt);
+		dev_info(card->dev,
+			"%s: skipping USB c analog configuration\n", __func__);
+	}
+
+	/* initialize GPIOs */
+	if (mbhc_cfg->enable_usbc_analog) {
+		dev_dbg(mbhc->codec->dev, "%s: usbc analog enabled\n",
+				__func__);
+		rc = wcd_mbhc_init_gpio(mbhc, mbhc_cfg,
+				"qcom,usbc-analog-en1_gpio",
+				&config->usbc_en1_gpio,
+				&config->usbc_en1_gpio_p);
+		if (rc)
+			goto err;
+
+		rc = wcd_mbhc_init_gpio(mbhc, mbhc_cfg,
+				"qcom,usbc-analog-en2_n_gpio",
+				&config->usbc_en2n_gpio,
+				&config->usbc_en2n_gpio_p);
+		if (rc)
+			goto err;
+
+		if (of_find_property(card->dev->of_node,
+				     "qcom,usbc-analog-force_detect_gpio",
+				     NULL)) {
+			rc = wcd_mbhc_init_gpio(mbhc, mbhc_cfg,
+					"qcom,usbc-analog-force_detect_gpio",
+					&config->usbc_force_gpio,
+					&config->usbc_force_gpio_p);
+			if (rc)
+				goto err;
+		}
+
+		dev_dbg(mbhc->codec->dev, "%s: calling usb_c_analog_init\n",
+			__func__);
+		/* init PMI notifier */
+		rc = wcd_mbhc_usb_c_analog_init(mbhc);
+		if (rc) {
+			rc = EPROBE_DEFER;
+			goto err;
+		}
+	}
+
 	/* Set btn key code */
 	if ((!mbhc->is_btn_already_regd) && wcd_mbhc_set_keycode(mbhc))
 		pr_err("Set btn key code error!!!\n");
@@ -2325,14 +1725,44 @@
 			pr_err("%s: Skipping to read mbhc fw, 0x%pK %pK\n",
 				 __func__, mbhc->mbhc_fw, mbhc->mbhc_cal);
 	}
-	pr_debug("%s: leave %d\n", __func__, rc);
+
+	return rc;
+err:
+	if (config->usbc_en1_gpio > 0) {
+		dev_dbg(card->dev, "%s free usb en1 gpio %d\n",
+			__func__, config->usbc_en1_gpio);
+		gpio_free(config->usbc_en1_gpio);
+		config->usbc_en1_gpio = 0;
+	}
+	if (config->usbc_en2n_gpio > 0) {
+		dev_dbg(card->dev, "%s free usb_en2 gpio %d\n",
+			__func__, config->usbc_en2n_gpio);
+		gpio_free(config->usbc_en2n_gpio);
+		config->usbc_en2n_gpio = 0;
+	}
+	if (config->usbc_force_gpio > 0) {
+		dev_dbg(card->dev, "%s free usb_force gpio %d\n",
+			__func__, config->usbc_force_gpio);
+		gpio_free(config->usbc_force_gpio);
+		config->usbc_force_gpio = 0;
+	}
+	if (config->usbc_en1_gpio_p)
+		of_node_put(config->usbc_en1_gpio_p);
+	if (config->usbc_en2n_gpio_p)
+		of_node_put(config->usbc_en2n_gpio_p);
+	if (config->usbc_force_gpio_p)
+		of_node_put(config->usbc_force_gpio_p);
+	dev_dbg(mbhc->codec->dev, "%s: leave %d\n", __func__, rc);
 	return rc;
 }
 EXPORT_SYMBOL(wcd_mbhc_start);
 
 void wcd_mbhc_stop(struct wcd_mbhc *mbhc)
 {
+	struct usbc_ana_audio_config *config = &mbhc->mbhc_cfg->usbc_analog_cfg;
+
 	pr_debug("%s: enter\n", __func__);
+
 	if (mbhc->current_plug != MBHC_PLUG_TYPE_NONE) {
 		if (mbhc->mbhc_cb && mbhc->mbhc_cb->skip_imped_detect)
 			mbhc->mbhc_cb->skip_imped_detect(mbhc->codec);
@@ -2354,6 +1784,25 @@
 		mbhc->mbhc_fw = NULL;
 		mbhc->mbhc_cal = NULL;
 	}
+
+	if (mbhc->mbhc_cfg->enable_usbc_analog) {
+		wcd_mbhc_usb_c_analog_deinit(mbhc);
+		/* free GPIOs */
+		if (config->usbc_en1_gpio > 0)
+			gpio_free(config->usbc_en1_gpio);
+		if (config->usbc_en2n_gpio > 0)
+			gpio_free(config->usbc_en2n_gpio);
+		if (config->usbc_force_gpio)
+			gpio_free(config->usbc_force_gpio);
+
+		if (config->usbc_en1_gpio_p)
+			of_node_put(config->usbc_en1_gpio_p);
+		if (config->usbc_en2n_gpio_p)
+			of_node_put(config->usbc_en2n_gpio_p);
+		if (config->usbc_force_gpio_p)
+			of_node_put(config->usbc_force_gpio_p);
+	}
+
 	pr_debug("%s: leave\n", __func__);
 }
 EXPORT_SYMBOL(wcd_mbhc_stop);
@@ -2372,6 +1821,7 @@
 	int ret = 0;
 	int hph_swh = 0;
 	int gnd_swh = 0;
+	u32 hph_moist_config[3];
 	struct snd_soc_card *card = codec->component.card;
 	const char *hph_switch = "qcom,msm-mbhc-hphl-swh";
 	const char *gnd_switch = "qcom,msm-mbhc-gnd-swh";
@@ -2392,6 +1842,21 @@
 		goto err;
 	}
 
+	ret = of_property_read_u32_array(card->dev->of_node,
+					 "qcom,msm-mbhc-moist-cfg",
+					 hph_moist_config, 3);
+	if (ret) {
+		dev_dbg(card->dev, "%s: no qcom,msm-mbhc-moist-cfg in DT\n",
+			__func__);
+		mbhc->moist_vref = V_45_MV;
+		mbhc->moist_iref = I_3P0_UA;
+		mbhc->moist_rref = R_24_KOHM;
+	} else {
+		mbhc->moist_vref = hph_moist_config[0];
+		mbhc->moist_iref = hph_moist_config[1];
+		mbhc->moist_rref = hph_moist_config[2];
+	}
+
 	mbhc->in_swch_irq_handler = false;
 	mbhc->current_plug = MBHC_PLUG_TYPE_NONE;
 	mbhc->is_btn_press = false;
@@ -2405,6 +1870,7 @@
 	mbhc->btn_press_intr = false;
 	mbhc->is_hs_recording = false;
 	mbhc->is_extn_cable = false;
+	mbhc->extn_cable_hph_rem = false;
 	mbhc->hph_type = WCD_MBHC_HPH_NONE;
 	mbhc->wcd_mbhc_regs = wcd_mbhc_regs;
 
@@ -2428,6 +1894,7 @@
 		return -EINVAL;
 	}
 
+	/* No need to create new sound card jacks if is is already created */
 	if (mbhc->headset_jack.jack == NULL) {
 		ret = snd_soc_card_jack_new(codec->component.card,
 					    "Headset Jack", WCD_MBHC_JACK_MASK,
@@ -2477,6 +1944,27 @@
 	init_waitqueue_head(&mbhc->wait_btn_press);
 	mutex_init(&mbhc->codec_resource_lock);
 
+	switch (WCD_MBHC_DETECTION) {
+	case WCD_DETECTION_LEGACY:
+		wcd_mbhc_legacy_init(mbhc);
+		break;
+	case WCD_DETECTION_ADC:
+		wcd_mbhc_adc_init(mbhc);
+		break;
+	default:
+		pr_err("%s: Unknown detection logic type %d\n",
+			__func__, WCD_MBHC_DETECTION);
+		break;
+	}
+
+	if (!mbhc->mbhc_fn ||
+	    !mbhc->mbhc_fn->wcd_mbhc_hs_ins_irq ||
+	    !mbhc->mbhc_fn->wcd_mbhc_hs_rem_irq ||
+	    !mbhc->mbhc_fn->wcd_mbhc_detect_plug_type ||
+	    !mbhc->mbhc_fn->wcd_cancel_hs_detect_plug) {
+		pr_err("%s: mbhc function pointer is NULL\n", __func__);
+		goto err_mbhc_sw_irq;
+	}
 	ret = mbhc->mbhc_cb->request_irq(codec, mbhc->intr_ids->mbhc_sw_intr,
 				  wcd_mbhc_mech_plug_detect_irq,
 				  "mbhc sw intr", mbhc);
@@ -2489,8 +1977,7 @@
 	ret = mbhc->mbhc_cb->request_irq(codec,
 					 mbhc->intr_ids->mbhc_btn_press_intr,
 					 wcd_mbhc_btn_press_handler,
-					 "Button Press detect",
-					 mbhc);
+					 "Button Press detect", mbhc);
 	if (ret) {
 		pr_err("%s: Failed to request irq %d\n", __func__,
 		       mbhc->intr_ids->mbhc_btn_press_intr);
@@ -2509,7 +1996,7 @@
 
 	ret = mbhc->mbhc_cb->request_irq(codec,
 					 mbhc->intr_ids->mbhc_hs_ins_intr,
-					 wcd_mbhc_hs_ins_irq,
+					 mbhc->mbhc_fn->wcd_mbhc_hs_ins_irq,
 					 "Elect Insert", mbhc);
 	if (ret) {
 		pr_err("%s: Failed to request irq %d\n", __func__,
@@ -2522,7 +2009,7 @@
 
 	ret = mbhc->mbhc_cb->request_irq(codec,
 					 mbhc->intr_ids->mbhc_hs_rem_intr,
-					 wcd_mbhc_hs_rem_irq,
+					 mbhc->mbhc_fn->wcd_mbhc_hs_rem_irq,
 					 "Elect Remove", mbhc);
 	if (ret) {
 		pr_err("%s: Failed to request irq %d\n", __func__,
diff --git a/sound/soc/codecs/wcd-mbhc-v2.h b/sound/soc/codecs/wcd-mbhc-v2.h
index c9bd4fe..dd3d35c 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.h
+++ b/sound/soc/codecs/wcd-mbhc-v2.h
@@ -14,6 +14,7 @@
 
 #include <linux/wait.h>
 #include <linux/stringify.h>
+#include <linux/power_supply.h>
 #include "wcdcal-hwdep.h"
 
 #define TOMBAK_MBHC_NC	0
@@ -26,6 +27,149 @@
 #define WCD_MONO_HS_MIN_THR	2
 #define WCD_MBHC_STRINGIFY(s)  __stringify(s)
 
+#define WCD_MBHC_REGISTER(rid, rreg, rmask, rshift, rinvert) \
+{ .id = rid, .reg = rreg, .mask = rmask, .offset = rshift, .invert = rinvert }
+
+#define WCD_MBHC_RSC_LOCK(mbhc)			\
+{							\
+	pr_debug("%s: Acquiring BCL\n", __func__);	\
+	mutex_lock(&mbhc->codec_resource_lock);		\
+	pr_debug("%s: Acquiring BCL done\n", __func__);	\
+}
+
+#define WCD_MBHC_RSC_UNLOCK(mbhc)			\
+{							\
+	pr_debug("%s: Release BCL\n", __func__);	\
+	mutex_unlock(&mbhc->codec_resource_lock);	\
+}
+
+#define WCD_MBHC_RSC_ASSERT_LOCKED(mbhc)		\
+{							\
+	WARN_ONCE(!mutex_is_locked(&mbhc->codec_resource_lock), \
+		  "%s: BCL should have acquired\n", __func__); \
+}
+
+/*
+ * Macros to update and read mbhc register bits. Check for
+ * "0" before updating or reading the register, because it
+ * is possible that one codec wants to write to that bit and
+ * other codec does not.
+ */
+#define WCD_MBHC_REG_UPDATE_BITS(function, val)         \
+do {                                                    \
+	if (mbhc->wcd_mbhc_regs[function].reg) {        \
+		snd_soc_update_bits(mbhc->codec,	\
+		mbhc->wcd_mbhc_regs[function].reg,	\
+		mbhc->wcd_mbhc_regs[function].mask,	\
+		val << (mbhc->wcd_mbhc_regs[function].offset)); \
+	}                                               \
+} while (0)
+
+#define WCD_MBHC_REG_READ(function, val)	        \
+do {                                                    \
+	if (mbhc->wcd_mbhc_regs[function].reg) {        \
+		val = (((snd_soc_read(mbhc->codec,	\
+		mbhc->wcd_mbhc_regs[function].reg)) &	\
+		(mbhc->wcd_mbhc_regs[function].mask)) >> \
+		(mbhc->wcd_mbhc_regs[function].offset)); \
+	} else {                                         \
+		val = -EINVAL;                           \
+	}                                                \
+} while (0)
+
+#define WCD_MBHC_CAL_SIZE(buttons, rload) ( \
+	sizeof(struct wcd_mbhc_general_cfg) + \
+	sizeof(struct wcd_mbhc_plug_detect_cfg) + \
+	((sizeof(s16) + sizeof(s16)) * buttons) + \
+	    sizeof(struct wcd_mbhc_plug_type_cfg) + \
+	sizeof(struct wcd_mbhc_btn_detect_cfg) + \
+	sizeof(struct wcd_mbhc_imped_detect_cfg) + \
+		((sizeof(u16) + sizeof(u16)) * rload) \
+	)
+
+#define WCD_MBHC_CAL_GENERAL_PTR(cali) ( \
+	(struct wcd_mbhc_general_cfg *) cali)
+#define WCD_MBHC_CAL_PLUG_DET_PTR(cali) ( \
+	(struct wcd_mbhc_plug_detect_cfg *) \
+	&(WCD_MBHC_CAL_GENERAL_PTR(cali)[1]))
+#define WCD_MBHC_CAL_PLUG_TYPE_PTR(cali) ( \
+	(struct wcd_mbhc_plug_type_cfg *) \
+	&(WCD_MBHC_CAL_PLUG_DET_PTR(cali)[1]))
+#define WCD_MBHC_CAL_BTN_DET_PTR(cali) ( \
+	    (struct wcd_mbhc_btn_detect_cfg *) \
+	&(WCD_MBHC_CAL_PLUG_TYPE_PTR(cali)[1]))
+#define WCD_MBHC_CAL_IMPED_DET_PTR(cali) ( \
+	(struct wcd_mbhc_imped_detect_cfg *) \
+	(((void *)&WCD_MBHC_CAL_BTN_DET_PTR(cali)[1]) + \
+	(WCD_MBHC_CAL_BTN_DET_PTR(cali)->num_btn * \
+	(sizeof(WCD_MBHC_CAL_BTN_DET_PTR(cali)->_v_btn_low[0]) + \
+	sizeof(WCD_MBHC_CAL_BTN_DET_PTR(cali)->_v_btn_high[0])))) \
+	)
+
+#define WCD_MBHC_CAL_MIN_SIZE ( \
+	sizeof(struct wcd_mbhc_general_cfg) + \
+	sizeof(struct wcd_mbhc_plug_detect_cfg) + \
+	sizeof(struct wcd_mbhc_plug_type_cfg) + \
+	sizeof(struct wcd_mbhc_btn_detect_cfg) + \
+	sizeof(struct wcd_mbhc_imped_detect_cfg) + \
+	(sizeof(u16)*2)  \
+	)
+
+#define WCD_MBHC_CAL_BTN_SZ(cfg_ptr) ( \
+	sizeof(struct wcd_mbhc_btn_detect_cfg) + \
+	(cfg_ptr->num_btn * (sizeof(cfg_ptr->_v_btn_low[0]) + \
+			sizeof(cfg_ptr->_v_btn_high[0]))))
+
+#define WCD_MBHC_CAL_IMPED_MIN_SZ ( \
+	sizeof(struct wcd_mbhc_imped_detect_cfg) + sizeof(u16) * 2)
+
+#define WCD_MBHC_CAL_IMPED_SZ(cfg_ptr) ( \
+	sizeof(struct wcd_mbhc_imped_detect_cfg) + \
+	(cfg_ptr->_n_rload * \
+	(sizeof(cfg_ptr->_rload[0]) + sizeof(cfg_ptr->_alpha[0]))))
+
+#define WCD_MBHC_JACK_MASK (SND_JACK_HEADSET | SND_JACK_OC_HPHL | \
+			   SND_JACK_OC_HPHR | SND_JACK_LINEOUT | \
+			   SND_JACK_MECHANICAL | SND_JACK_MICROPHONE2 | \
+			   SND_JACK_UNSUPPORTED)
+
+#define WCD_MBHC_JACK_BUTTON_MASK (SND_JACK_BTN_0 | SND_JACK_BTN_1 | \
+				  SND_JACK_BTN_2 | SND_JACK_BTN_3 | \
+				  SND_JACK_BTN_4 | SND_JACK_BTN_5)
+#define OCP_ATTEMPT 20
+#define HS_DETECT_PLUG_TIME_MS (3 * 1000)
+#define SPECIAL_HS_DETECT_TIME_MS (2 * 1000)
+#define MBHC_BUTTON_PRESS_THRESHOLD_MIN 250
+#define GND_MIC_SWAP_THRESHOLD 4
+#define WCD_FAKE_REMOVAL_MIN_PERIOD_MS 100
+#define HS_VREF_MIN_VAL 1400
+#define FW_READ_ATTEMPTS 15
+#define FW_READ_TIMEOUT 4000000
+#define FAKE_REM_RETRY_ATTEMPTS 3
+#define MAX_IMPED 60000
+
+#define WCD_MBHC_BTN_PRESS_COMPL_TIMEOUT_MS  50
+#define ANC_DETECT_RETRY_CNT 7
+#define WCD_MBHC_SPL_HS_CNT  2
+
+enum wcd_mbhc_detect_logic {
+	WCD_DETECTION_LEGACY,
+	WCD_DETECTION_ADC,
+};
+
+#ifdef CONFIG_SND_SOC_WCD_MBHC_ADC
+#define WCD_MBHC_DETECTION	WCD_DETECTION_ADC
+#else
+#define WCD_MBHC_DETECTION	WCD_DETECTION_LEGACY
+#endif
+
+enum wcd_mbhc_cs_mb_en_flag {
+	WCD_MBHC_EN_CS = 0,
+	WCD_MBHC_EN_MB,
+	WCD_MBHC_EN_PULLUP,
+	WCD_MBHC_EN_NONE,
+};
+
 enum {
 	WCD_MBHC_ELEC_HS_INS,
 	WCD_MBHC_ELEC_HS_REM,
@@ -70,6 +214,14 @@
 	WCD_MBHC_HPHR_OCP_DET_EN,
 	WCD_MBHC_HPHL_OCP_STATUS,
 	WCD_MBHC_HPHR_OCP_STATUS,
+	WCD_MBHC_ADC_EN,
+	WCD_MBHC_ADC_COMPLETE,
+	WCD_MBHC_ADC_TIMEOUT,
+	WCD_MBHC_ADC_RESULT,
+	WCD_MBHC_MICB2_VOUT,
+	WCD_MBHC_ADC_MODE,
+	WCD_MBHC_DETECTION_DONE,
+	WCD_MBHC_ELECT_ISRC_EN,
 	WCD_MBHC_REG_FUNC_MAX,
 };
 
@@ -140,6 +292,7 @@
 	WCD_MBHC_EVENT_PA_HPHL,
 	WCD_MBHC_EVENT_PA_HPHR,
 };
+
 struct wcd_mbhc_general_cfg {
 	u8 t_ldoh;
 	u8 t_bg_fast_settle;
@@ -249,6 +402,15 @@
 	R_184_KOHM,
 };
 
+struct usbc_ana_audio_config {
+	int usbc_en1_gpio;
+	int usbc_en2n_gpio;
+	int usbc_force_gpio;
+	struct device_node *usbc_en1_gpio_p; /* used by pinctrl API */
+	struct device_node *usbc_en2n_gpio_p; /* used by pinctrl API */
+	struct device_node *usbc_force_gpio_p; /* used by pinctrl API */
+};
+
 struct wcd_mbhc_config {
 	bool read_fw_bin;
 	void *calibration;
@@ -263,6 +425,8 @@
 	int mbhc_micbias;
 	int anc_micbias;
 	bool enable_anc_mic_detect;
+	u32 enable_usbc_analog;
+	struct usbc_ana_audio_config usbc_analog_cfg;
 };
 
 struct wcd_mbhc_intr {
@@ -283,56 +447,6 @@
 	u8 invert;
 };
 
-#define WCD_MBHC_REGISTER(rid, rreg, rmask, rshift, rinvert) \
-{ .id = rid, .reg = rreg, .mask = rmask, .offset = rshift, .invert = rinvert }
-
-#define WCD_MBHC_RSC_LOCK(mbhc)			\
-{							\
-	pr_debug("%s: Acquiring BCL\n", __func__);	\
-	mutex_lock(&mbhc->codec_resource_lock);		\
-	pr_debug("%s: Acquiring BCL done\n", __func__);	\
-}
-
-#define WCD_MBHC_RSC_UNLOCK(mbhc)			\
-{							\
-	pr_debug("%s: Release BCL\n", __func__);	\
-	mutex_unlock(&mbhc->codec_resource_lock);	\
-}
-
-#define WCD_MBHC_RSC_ASSERT_LOCKED(mbhc)		\
-{							\
-	WARN_ONCE(!mutex_is_locked(&mbhc->codec_resource_lock), \
-		  "%s: BCL should have acquired\n", __func__); \
-}
-
-/*
- * Macros to update and read mbhc register bits. Check for
- * "0" before updating or reading the register, because it
- * is possible that one codec wants to write to that bit and
- * other codec does not.
- */
-#define WCD_MBHC_REG_UPDATE_BITS(function, val)         \
-do {                                                    \
-	if (mbhc->wcd_mbhc_regs[function].reg) {        \
-		snd_soc_update_bits(mbhc->codec,	\
-		mbhc->wcd_mbhc_regs[function].reg,	\
-		mbhc->wcd_mbhc_regs[function].mask,	\
-		val << (mbhc->wcd_mbhc_regs[function].offset)); \
-	}                                               \
-} while (0)
-
-#define WCD_MBHC_REG_READ(function, val)	        \
-do {                                                    \
-	if (mbhc->wcd_mbhc_regs[function].reg) {        \
-		val = (((snd_soc_read(mbhc->codec,	\
-		mbhc->wcd_mbhc_regs[function].reg)) &	\
-		(mbhc->wcd_mbhc_regs[function].mask)) >> \
-		(mbhc->wcd_mbhc_regs[function].offset)); \
-	} else {                                         \
-		val = -EINVAL;                           \
-	}                                                \
-} while (0)
-
 struct wcd_mbhc_cb {
 	int (*enable_mb_source)(struct wcd_mbhc *, bool);
 	void (*trim_btn_reg)(struct snd_soc_codec *);
@@ -376,6 +490,15 @@
 	bool (*hph_register_recovery)(struct wcd_mbhc *);
 };
 
+struct wcd_mbhc_fn {
+	irqreturn_t (*wcd_mbhc_hs_ins_irq)(int irq, void *data);
+	irqreturn_t (*wcd_mbhc_hs_rem_irq)(int irq, void *data);
+	void (*wcd_mbhc_detect_plug_type)(struct wcd_mbhc *mbhc);
+	bool (*wcd_mbhc_detect_anc_plug_type)(struct wcd_mbhc *mbhc);
+	void (*wcd_cancel_hs_detect_plug)(struct wcd_mbhc *mbhc,
+					  struct work_struct *work);
+};
+
 struct wcd_mbhc {
 	/* Delayed work to report long button press */
 	struct delayed_work mbhc_btn_dwork;
@@ -393,6 +516,9 @@
 	bool in_swch_irq_handler;
 	bool hphl_swh; /*track HPHL switch NC / NO */
 	bool gnd_swh; /*track GND switch NC / NO */
+	u32 moist_vref;
+	u32 moist_iref;
+	u32 moist_rref;
 	u8 micbias1_cap_mode; /* track ext cap setting */
 	u8 micbias2_cap_mode; /* track ext cap setting */
 	bool hs_detect_work_stop;
@@ -402,6 +528,7 @@
 	bool is_extn_cable;
 	bool skip_imped_detection;
 	bool is_btn_already_regd;
+	bool extn_cable_hph_rem;
 
 	struct snd_soc_codec *codec;
 	/* Work to perform MBHC Firmware Read */
@@ -440,101 +567,26 @@
 
 	unsigned long intr_status;
 	bool is_hph_ocp_pending;
+
+	bool usbc_force_pr_mode;
+	int usbc_mode;
+	struct notifier_block psy_nb;
+	struct power_supply *usb_psy;
+	struct work_struct usbc_analog_work;
+
+	struct wcd_mbhc_fn *mbhc_fn;
 };
-#define WCD_MBHC_CAL_SIZE(buttons, rload) ( \
-	sizeof(struct wcd_mbhc_general_cfg) + \
-	sizeof(struct wcd_mbhc_plug_detect_cfg) + \
-	((sizeof(s16) + sizeof(s16)) * buttons) + \
-	    sizeof(struct wcd_mbhc_plug_type_cfg) + \
-	sizeof(struct wcd_mbhc_btn_detect_cfg) + \
-	sizeof(struct wcd_mbhc_imped_detect_cfg) + \
-		((sizeof(u16) + sizeof(u16)) * rload) \
-	)
 
-
-#define WCD_MBHC_CAL_GENERAL_PTR(cali) ( \
-	(struct wcd_mbhc_general_cfg *) cali)
-#define WCD_MBHC_CAL_PLUG_DET_PTR(cali) ( \
-	(struct wcd_mbhc_plug_detect_cfg *) \
-	&(WCD_MBHC_CAL_GENERAL_PTR(cali)[1]))
-#define WCD_MBHC_CAL_PLUG_TYPE_PTR(cali) ( \
-	(struct wcd_mbhc_plug_type_cfg *) \
-	&(WCD_MBHC_CAL_PLUG_DET_PTR(cali)[1]))
-#define WCD_MBHC_CAL_BTN_DET_PTR(cali) ( \
-	    (struct wcd_mbhc_btn_detect_cfg *) \
-	&(WCD_MBHC_CAL_PLUG_TYPE_PTR(cali)[1]))
-#define WCD_MBHC_CAL_IMPED_DET_PTR(cali) ( \
-	(struct wcd_mbhc_imped_detect_cfg *) \
-	(((void *)&WCD_MBHC_CAL_BTN_DET_PTR(cali)[1]) + \
-	(WCD_MBHC_CAL_BTN_DET_PTR(cali)->num_btn * \
-	(sizeof(WCD_MBHC_CAL_BTN_DET_PTR(cali)->_v_btn_low[0]) + \
-	sizeof(WCD_MBHC_CAL_BTN_DET_PTR(cali)->_v_btn_high[0])))) \
-	)
-
-#define WCD_MBHC_CAL_MIN_SIZE ( \
-	sizeof(struct wcd_mbhc_general_cfg) + \
-	sizeof(struct wcd_mbhc_plug_detect_cfg) + \
-	sizeof(struct wcd_mbhc_plug_type_cfg) + \
-	sizeof(struct wcd_mbhc_btn_detect_cfg) + \
-	sizeof(struct wcd_mbhc_imped_detect_cfg) + \
-	(sizeof(u16)*2)  \
-	)
-
-#define WCD_MBHC_CAL_BTN_SZ(cfg_ptr) ( \
-	sizeof(struct wcd_mbhc_btn_detect_cfg) + \
-	(cfg_ptr->num_btn * (sizeof(cfg_ptr->_v_btn_low[0]) + \
-			sizeof(cfg_ptr->_v_btn_high[0]))))
-
-#define WCD_MBHC_CAL_IMPED_MIN_SZ ( \
-	sizeof(struct wcd_mbhc_imped_detect_cfg) + sizeof(u16) * 2)
-
-#define WCD_MBHC_CAL_IMPED_SZ(cfg_ptr) ( \
-	sizeof(struct wcd_mbhc_imped_detect_cfg) + \
-	(cfg_ptr->_n_rload * \
-	(sizeof(cfg_ptr->_rload[0]) + sizeof(cfg_ptr->_alpha[0]))))
-
-#ifdef CONFIG_SND_SOC_WCD_MBHC
-int wcd_mbhc_set_keycode(struct wcd_mbhc *mbhc);
-int wcd_mbhc_start(struct wcd_mbhc *mbhc,
-		       struct wcd_mbhc_config *mbhc_cfg);
-void wcd_mbhc_stop(struct wcd_mbhc *mbhc);
-int wcd_mbhc_init(struct wcd_mbhc *mbhc, struct snd_soc_codec *codec,
-		      const struct wcd_mbhc_cb *mbhc_cb,
-		      const struct wcd_mbhc_intr *mbhc_cdc_intr_ids,
-		      struct wcd_mbhc_register *mbhc_reg,
-		      bool impedance_det_en);
-int wcd_mbhc_get_impedance(struct wcd_mbhc *mbhc, uint32_t *zl,
-			   uint32_t *zr);
-void wcd_mbhc_deinit(struct wcd_mbhc *mbhc);
-#else
-static inline void wcd_mbhc_stop(struct wcd_mbhc *mbhc)
-{
-}
-static inline int wcd_mbhc_init(struct wcd_mbhc *mbhc,
-				struct snd_soc_codec *codec,
-				const struct wcd_mbhc_cb *mbhc_cb,
-				const struct wcd_mbhc_intr *mbhc_cdc_intr_ids,
-				struct wcd_mbhc_register *mbhc_reg,
-				bool impedance_det_en)
-{
-	return 0;
-}
-static inline int wcd_mbhc_start(struct wcd_mbhc *mbhc,
-				 struct wcd_mbhc_config *mbhc_cfg)
-{
-	return 0;
-}
-static inline int wcd_mbhc_get_impedance(struct wcd_mbhc *mbhc,
-					 uint32_t *zl,
-					 uint32_t *zr)
-{
-	*zl = 0;
-	*zr = 0;
-	return -EINVAL;
-}
-static inline void wcd_mbhc_deinit(struct wcd_mbhc *mbhc)
-{
-}
-#endif
+void wcd_mbhc_find_plug_and_report(struct wcd_mbhc *mbhc,
+				   enum wcd_mbhc_plug_type plug_type);
+void wcd_mbhc_hs_elec_irq(struct wcd_mbhc *mbhc, int irq_type, bool enable);
+void wcd_mbhc_elec_hs_report_unplug(struct wcd_mbhc *mbhc);
+bool wcd_swch_level_remove(struct wcd_mbhc *mbhc);
+void wcd_enable_curr_micbias(const struct wcd_mbhc *mbhc,
+			     const enum wcd_mbhc_cs_mb_en_flag cs_mb_en);
+void wcd_mbhc_jack_report(struct wcd_mbhc *mbhc,
+			  struct snd_soc_jack *jack, int status, int mask);
+int wcd_cancel_btn_work(struct wcd_mbhc *mbhc);
+int wcd_mbhc_get_button_mask(struct wcd_mbhc *mbhc);
 
 #endif /* __WCD_MBHC_V2_H__ */
diff --git a/sound/soc/codecs/wcd-spi.c b/sound/soc/codecs/wcd-spi.c
index 1a529ba..7e217a6 100644
--- a/sound/soc/codecs/wcd-spi.c
+++ b/sound/soc/codecs/wcd-spi.c
@@ -60,7 +60,8 @@
 
 /* Command delays */
 #define WCD_SPI_CLKREQ_DELAY_USECS (500)
-#define WCD_SPI_CLK_OFF_TIMER_MS   (3000)
+#define WCD_SPI_CLK_OFF_TIMER_MS   (500)
+#define WCD_SPI_RESUME_TIMEOUT_MS 100
 
 /* Command masks */
 #define WCD_CMD_ADDR_MASK            \
@@ -90,6 +91,7 @@
 
 /* Status mask bits */
 #define WCD_SPI_CLK_STATE_ENABLED BIT(0)
+#define WCD_SPI_IS_SUSPENDED BIT(1)
 
 /* Locking related */
 #define WCD_SPI_MUTEX_LOCK(spi, lock)              \
@@ -144,6 +146,9 @@
 
 	/* Debugfs related information */
 	struct wcd_spi_debug_data debug_data;
+
+	/* Completion object to indicate system resume completion */
+	struct completion resume_comp;
 };
 
 enum xfer_request {
@@ -170,6 +175,55 @@
 	xfer->len = 0;
 }
 
+static bool wcd_spi_is_suspended(struct wcd_spi_priv *wcd_spi)
+{
+	return test_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
+}
+
+static bool wcd_spi_can_suspend(struct wcd_spi_priv *wcd_spi)
+{
+	struct spi_device *spi = wcd_spi->spi;
+
+	if (wcd_spi->clk_users > 0 ||
+	    test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask)) {
+		dev_err(&spi->dev, "%s: cannot suspend, clk_users = %d\n",
+			__func__, wcd_spi->clk_users);
+		return false;
+	}
+
+	return true;
+}
+
+static int wcd_spi_wait_for_resume(struct wcd_spi_priv *wcd_spi)
+{
+	struct spi_device *spi = wcd_spi->spi;
+	int rc = 0;
+
+	WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
+	/* If the system is already in resumed state, return right away */
+	if (!wcd_spi_is_suspended(wcd_spi))
+		goto done;
+
+	/* If suspended then wait for resume to happen */
+	reinit_completion(&wcd_spi->resume_comp);
+	WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
+	rc = wait_for_completion_timeout(&wcd_spi->resume_comp,
+				msecs_to_jiffies(WCD_SPI_RESUME_TIMEOUT_MS));
+	WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
+	if (rc == 0) {
+		dev_err(&spi->dev, "%s: failed to resume in %u msec\n",
+			__func__, WCD_SPI_RESUME_TIMEOUT_MS);
+		rc = -EIO;
+		goto done;
+	}
+
+	dev_dbg(&spi->dev, "%s: resume successful\n", __func__);
+	rc = 0;
+done:
+	WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
+	return rc;
+}
+
 static int wcd_spi_read_single(struct spi_device *spi,
 			       u32 remote_addr, u32 *val)
 {
@@ -579,6 +633,18 @@
 	}
 
 	if (request == WCD_SPI_CLK_ENABLE) {
+		/*
+		 * If the SPI bus is suspended, then return error
+		 * as the transaction cannot be completed.
+		 */
+		if (wcd_spi_is_suspended(wcd_spi)) {
+			dev_err(&spi->dev,
+				"%s: SPI suspended, cannot enable clk\n",
+				__func__);
+			ret = -EIO;
+			goto done;
+		}
+
 		/* Cancel the disable clk work */
 		WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
 		cancel_delayed_work_sync(&wcd_spi->clk_dwork);
@@ -855,12 +921,22 @@
 				  void *data)
 {
 	struct spi_device *spi = to_spi_device(dev);
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
 	int ret = 0;
 
 	dev_dbg(&spi->dev, "%s: event type %d\n",
 		__func__, event);
 
 	switch (event) {
+	case WDSP_EVENT_POST_SHUTDOWN:
+		cancel_delayed_work_sync(&wcd_spi->clk_dwork);
+		WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
+		if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
+			wcd_spi_clk_disable(spi);
+		wcd_spi->clk_users = 0;
+		WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
+		break;
+
 	case WDSP_EVENT_PRE_DLOAD_CODE:
 	case WDSP_EVENT_PRE_DLOAD_DATA:
 		ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
@@ -889,6 +965,17 @@
 		ret = wdsp_spi_read_section(spi, data);
 		break;
 
+	case WDSP_EVENT_SUSPEND:
+		WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
+		if (!wcd_spi_can_suspend(wcd_spi))
+			ret = -EBUSY;
+		WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
+		break;
+
+	case WDSP_EVENT_RESUME:
+		ret = wcd_spi_wait_for_resume(wcd_spi);
+		break;
+
 	default:
 		dev_dbg(&spi->dev, "%s: Unhandled event %d\n",
 			__func__, event);
@@ -1293,6 +1380,7 @@
 	mutex_init(&wcd_spi->clk_mutex);
 	mutex_init(&wcd_spi->xfer_mutex);
 	INIT_DELAYED_WORK(&wcd_spi->clk_dwork, wcd_spi_clk_work);
+	init_completion(&wcd_spi->resume_comp);
 
 	wcd_spi->spi = spi;
 	spi_set_drvdata(spi, wcd_spi);
@@ -1330,6 +1418,61 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM
+static int wcd_spi_suspend(struct device *dev)
+{
+	struct spi_device *spi = to_spi_device(dev);
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	int rc = 0;
+
+	WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
+	if (!wcd_spi_can_suspend(wcd_spi)) {
+		rc = -EBUSY;
+		goto done;
+	}
+
+	/*
+	 * If we are here, it is okay to let the suspend go
+	 * through for this driver. But, still need to notify
+	 * the master to make sure all other components can suspend
+	 * as well.
+	 */
+	if (wcd_spi->m_dev && wcd_spi->m_ops &&
+	  wcd_spi->m_ops->suspend) {
+		WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
+		rc = wcd_spi->m_ops->suspend(wcd_spi->m_dev);
+		WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
+	}
+
+	if (rc == 0)
+		set_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
+	else
+		dev_dbg(&spi->dev, "%s: cannot suspend, err = %d\n",
+			__func__, rc);
+done:
+	WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
+	return rc;
+}
+
+static int wcd_spi_resume(struct device *dev)
+{
+	struct spi_device *spi = to_spi_device(dev);
+	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+
+	WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
+	clear_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
+	complete(&wcd_spi->resume_comp);
+	WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
+
+	return 0;
+}
+
+static const struct dev_pm_ops wcd_spi_pm_ops = {
+	.suspend = wcd_spi_suspend,
+	.resume = wcd_spi_resume,
+};
+#endif
+
 static const struct of_device_id wcd_spi_of_match[] = {
 	{ .compatible = "qcom,wcd-spi-v2", },
 	{ }
@@ -1340,6 +1483,9 @@
 	.driver = {
 		.name = "wcd-spi-v2",
 		.of_match_table = wcd_spi_of_match,
+#ifdef CONFIG_PM
+		.pm = &wcd_spi_pm_ops,
+#endif
 	},
 	.probe = wcd_spi_probe,
 	.remove = wcd_spi_remove,
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index 2185f4b..dedf4dc 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -46,6 +46,7 @@
 #include "wcd9xxx-resmgr-v2.h"
 #include "wcd_cpe_core.h"
 #include "wcdcal-hwdep.h"
+#include "wcd-mbhc-v2-api.h"
 
 #define TASHA_RX_PORT_START_NUMBER  16
 
@@ -2197,7 +2198,7 @@
 {
 	struct snd_soc_codec *codec = mbhc->codec;
 
-	if (TASHA_MBHC_MOISTURE_VREF == V_OFF)
+	if (mbhc->moist_vref == V_OFF)
 		return;
 
 	/* Donot enable moisture detection if jack type is NC */
@@ -2208,8 +2209,8 @@
 	}
 
 	snd_soc_update_bits(codec, WCD9335_MBHC_CTL_2,
-			    0x0C, TASHA_MBHC_MOISTURE_VREF << 2);
-	tasha_mbhc_hph_l_pull_up_control(codec, TASHA_MBHC_MOISTURE_IREF);
+			    0x0C, mbhc->moist_vref << 2);
+	tasha_mbhc_hph_l_pull_up_control(codec, mbhc->moist_iref);
 }
 
 static const struct wcd_mbhc_cb mbhc_cb = {
@@ -7926,6 +7927,13 @@
 
 	tasha_mad_input = ucontrol->value.integer.value[0];
 
+	if (tasha_mad_input >= ARRAY_SIZE(tasha_conn_mad_text)) {
+		dev_err(codec->dev,
+			"%s: tasha_mad_input = %d out of bounds\n",
+			__func__, tasha_mad_input);
+		return -EINVAL;
+	}
+
 	if (!strcmp(tasha_conn_mad_text[tasha_mad_input], "NOTUSED1") ||
 	    !strcmp(tasha_conn_mad_text[tasha_mad_input], "NOTUSED2") ||
 	    !strcmp(tasha_conn_mad_text[tasha_mad_input], "NOTUSED3") ||
@@ -13436,8 +13444,6 @@
 
 	/* Class-H Init*/
 	wcd_clsh_init(&tasha->clsh_d);
-	/* Default HPH Mode to Class-H HiFi */
-	tasha->hph_mode = CLS_H_HIFI;
 
 	for (i = 0; i < TASHA_MAX_MICBIAS; i++)
 		tasha->micb_ref[i] = 0;
@@ -13445,8 +13451,6 @@
 	tasha_update_reg_defaults(tasha);
 
 	tasha->codec = codec;
-	for (i = 0; i < COMPANDER_MAX; i++)
-		tasha->comp_enabled[i] = 0;
 
 	dev_dbg(codec->dev, "%s: MCLK Rate = %x\n",
 		__func__, control->mclk_rate);
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsd.c b/sound/soc/codecs/wcd934x/wcd934x-dsd.c
index 580591a..3e23e37 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsd.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsd.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -97,6 +97,11 @@
 	{"DSD_FILTER_1", NULL, "DSD_R IF MUX"},
 	{"DSD_FILTER_1", NULL, "RX INT2 NATIVE SUPPLY"},
 	{"RX INT2 MIX3", "DSD HPHR Switch", "DSD_FILTER_1"},
+
+	{"DSD_FILTER_0", NULL, "RX INT3 NATIVE SUPPLY"},
+	{"RX INT3 MIX3", "DSD LO1 Switch", "DSD_FILTER_0"},
+	{"DSD_FILTER_1", NULL, "RX INT4 NATIVE SUPPLY"},
+	{"RX INT4 MIX3", "DSD LO2 Switch", "DSD_FILTER_1"},
 };
 
 static bool is_valid_dsd_interpolator(int interp_num)
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
index 9bc5d5f..8da0425 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
@@ -401,6 +401,8 @@
 			__func__, ret);
 		goto done;
 	}
+	/* Pull CPAR out of reset */
+	snd_soc_update_bits(codec, WCD934X_CPE_SS_CPAR_CTL, 0x04, 0x00);
 
 	/* Configure and Enable CPE FLL clock */
 	ret = wcd_cntl_cpe_fll_ctrl(cntl, true);
@@ -422,6 +424,7 @@
 	if (cntl->cdc_cb && cntl->cdc_cb->cdc_clk_en)
 		cntl->cdc_cb->cdc_clk_en(codec, false);
 
+	snd_soc_update_bits(codec, WCD934X_CPE_SS_CPAR_CTL, 0x04, 0x04);
 	WCD_CNTL_MUTEX_UNLOCK(codec, cntl->clk_mutex);
 	return ret;
 }
@@ -458,6 +461,9 @@
 		ret = -EINVAL;
 
 	cntl->is_clk_enabled = false;
+
+	/* Put CPAR in reset */
+	snd_soc_update_bits(codec, WCD934X_CPE_SS_CPAR_CTL, 0x04, 0x04);
 done:
 	WCD_CNTL_MUTEX_UNLOCK(codec, cntl->clk_mutex);
 	return ret;
@@ -469,9 +475,9 @@
 	struct snd_soc_codec *codec = cntl->codec;
 
 	if (enable)
-		snd_soc_write(codec, WCD934X_CPE_SS_CPAR_CTL, 0x03);
+		snd_soc_update_bits(codec, WCD934X_CPE_SS_CPAR_CTL, 0x03, 0x03);
 	else
-		snd_soc_write(codec, WCD934X_CPE_SS_CPAR_CTL, 0x00);
+		snd_soc_update_bits(codec, WCD934X_CPE_SS_CPAR_CTL, 0x03, 0x00);
 }
 
 static int wcd_cntl_enable_memory(struct wcd_dsp_cntl *cntl,
@@ -601,8 +607,6 @@
 	/* Disable WDOG */
 	snd_soc_update_bits(codec, WCD934X_CPE_SS_WDOG_CFG,
 			    0x3F, 0x01);
-	snd_soc_update_bits(codec, WCD934X_CODEC_RPM_CLK_MCLK_CFG,
-			    0x04, 0x00);
 
 	/* Put WDSP in reset state */
 	snd_soc_update_bits(codec, WCD934X_CPE_SS_CPE_CTL,
@@ -627,11 +631,7 @@
 	if (cntl->debug_mode) {
 		snd_soc_update_bits(codec, WCD934X_CPE_SS_WDOG_CFG,
 				    0x3F, 0x01);
-		snd_soc_update_bits(codec, WCD934X_CODEC_RPM_CLK_MCLK_CFG,
-				    0x04, 0x00);
 	} else {
-		snd_soc_update_bits(codec, WCD934X_CODEC_RPM_CLK_MCLK_CFG,
-				    0x04, 0x04);
 		snd_soc_update_bits(codec, WCD934X_CPE_SS_WDOG_CFG,
 				    0x3F, 0x21);
 	}
diff --git a/sound/soc/codecs/wcd934x/wcd934x-mbhc.c b/sound/soc/codecs/wcd934x/wcd934x-mbhc.c
index 5dbdb9a..578c347 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-mbhc.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-mbhc.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -32,6 +32,7 @@
 #include "wcd934x.h"
 #include "wcd934x-mbhc.h"
 #include "../wcdcal-hwdep.h"
+#include "../wcd-mbhc-v2-api.h"
 
 #define TAVIL_ZDET_SUPPORTED          true
 /* Z value defined in milliohm */
@@ -113,7 +114,7 @@
 	WCD_MBHC_REGISTER("WCD_MBHC_PULLDOWN_CTRL",
 			  0, 0, 0, 0),
 	WCD_MBHC_REGISTER("WCD_MBHC_ANC_DET_EN",
-			  WCD934X_ANA_MBHC_ZDET, 0x01, 0, 0),
+			  WCD934X_MBHC_CTL_BCS, 0x02, 1, 0),
 	WCD_MBHC_REGISTER("WCD_MBHC_FSM_STATUS",
 			  WCD934X_MBHC_STATUS_SPARE_1, 0x01, 0, 0),
 	WCD_MBHC_REGISTER("WCD_MBHC_MUX_CTL",
@@ -126,6 +127,21 @@
 			  WCD934X_INTR_PIN1_STATUS0, 0x04, 2, 0),
 	WCD_MBHC_REGISTER("WCD_MBHC_HPHR_OCP_STATUS",
 			  WCD934X_INTR_PIN1_STATUS0, 0x08, 3, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_ADC_EN",
+			  WCD934X_MBHC_NEW_CTL_1, 0x08, 3, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_ADC_COMPLETE", WCD934X_MBHC_NEW_FSM_STATUS,
+			  0x40, 6, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_ADC_TIMEOUT", WCD934X_MBHC_NEW_FSM_STATUS,
+			  0x80, 7, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_ADC_RESULT", WCD934X_MBHC_NEW_ADC_RESULT,
+			  0xFF, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_MICB2_VOUT", WCD934X_ANA_MICB2, 0x3F, 0, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_ADC_MODE",
+			  WCD934X_MBHC_NEW_CTL_1, 0x10, 4, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_DETECTION_DONE",
+			  WCD934X_MBHC_NEW_CTL_1, 0x04, 2, 0),
+	WCD_MBHC_REGISTER("WCD_MBHC_ELECT_ISRC_EN",
+			  WCD934X_ANA_MBHC_ZDET, 0x02, 1, 0),
 };
 
 static const struct wcd_mbhc_intr intr_ids = {
@@ -772,18 +788,24 @@
 {
 	struct snd_soc_codec *codec = mbhc->codec;
 
-	if (TAVIL_MBHC_MOISTURE_RREF == R_OFF)
+	if ((mbhc->moist_rref == R_OFF) ||
+	    (mbhc->mbhc_cfg->enable_usbc_analog)) {
+		snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_2,
+				    0x0C, R_OFF << 2);
 		return;
+	}
 
 	/* Donot enable moisture detection if jack type is NC */
 	if (!mbhc->hphl_swh) {
 		dev_dbg(codec->dev, "%s: disable moisture detection for NC\n",
 			__func__);
+		snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_2,
+				    0x0C, R_OFF << 2);
 		return;
 	}
 
 	snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_2,
-			    0x0C, TAVIL_MBHC_MOISTURE_RREF << 2);
+			    0x0C, mbhc->moist_rref << 2);
 }
 
 static bool tavil_hph_register_recovery(struct wcd_mbhc *mbhc)
@@ -906,6 +928,29 @@
 };
 
 /*
+ * tavil_mbhc_get_impedance: get impedance of headphone left and right channels
+ * @wcd934x_mbhc: handle to struct wcd934x_mbhc *
+ * @zl: handle to left-ch impedance
+ * @zr: handle to right-ch impedance
+ * return 0 for success or error code in case of failure
+ */
+int tavil_mbhc_get_impedance(struct wcd934x_mbhc *wcd934x_mbhc,
+			     uint32_t *zl, uint32_t *zr)
+{
+	if (!wcd934x_mbhc) {
+		pr_err("%s: mbhc not initialized!\n", __func__);
+		return -EINVAL;
+	}
+	if (!zl || !zr) {
+		pr_err("%s: zl or zr null!\n", __func__);
+		return -EINVAL;
+	}
+
+	return wcd_mbhc_get_impedance(&wcd934x_mbhc->wcd_mbhc, zl, zr);
+}
+EXPORT_SYMBOL(tavil_mbhc_get_impedance);
+
+/*
  * tavil_mbhc_hs_detect: starts mbhc insertion/removal functionality
  * @codec: handle to snd_soc_codec *
  * @mbhc_cfg: handle to mbhc configuration structure
@@ -964,8 +1009,10 @@
 			__func__);
 		goto done;
 	}
-	snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_1, 0x04, 0x04);
-	snd_soc_update_bits(codec, WCD934X_MBHC_CTL_BCS, 0x01, 0x01);
+	if (!WCD_MBHC_DETECTION) {
+		snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_1, 0x04, 0x04);
+		snd_soc_update_bits(codec, WCD934X_MBHC_CTL_BCS, 0x01, 0x01);
+	}
 
 done:
 	return ret;
@@ -996,8 +1043,9 @@
 	wcd934x_mbhc->fw_data = fw_data;
 	BLOCKING_INIT_NOTIFIER_HEAD(&wcd934x_mbhc->notifier);
 
-	ret = wcd_mbhc_init(&wcd934x_mbhc->wcd_mbhc, codec, &mbhc_cb, &intr_ids,
-			    wcd_mbhc_registers, TAVIL_ZDET_SUPPORTED);
+	ret = wcd_mbhc_init(&wcd934x_mbhc->wcd_mbhc, codec, &mbhc_cb,
+				&intr_ids, wcd_mbhc_registers,
+				TAVIL_ZDET_SUPPORTED);
 	if (ret) {
 		dev_err(codec->dev, "%s: mbhc initialization failed\n",
 			__func__);
@@ -1021,8 +1069,10 @@
 	snd_soc_add_codec_controls(codec, hph_type_detect_controls,
 				   ARRAY_SIZE(hph_type_detect_controls));
 
-	snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_1, 0x04, 0x04);
-	snd_soc_update_bits(codec, WCD934X_MBHC_CTL_BCS, 0x01, 0x01);
+	if (!WCD_MBHC_DETECTION) {
+		snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_1, 0x04, 0x04);
+		snd_soc_update_bits(codec, WCD934X_MBHC_CTL_BCS, 0x01, 0x01);
+	}
 
 	return 0;
 err:
diff --git a/sound/soc/codecs/wcd934x/wcd934x-mbhc.h b/sound/soc/codecs/wcd934x/wcd934x-mbhc.h
index 95d8e3d..d40546a 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-mbhc.h
+++ b/sound/soc/codecs/wcd934x/wcd934x-mbhc.h
@@ -35,6 +35,7 @@
 	bool is_hph_recover;
 };
 
+#ifdef CONFIG_SND_SOC_WCD934X_MBHC
 extern int tavil_mbhc_init(struct wcd934x_mbhc **mbhc,
 			   struct snd_soc_codec *codec,
 			   struct fw_info *fw_data);
@@ -44,4 +45,40 @@
 extern void tavil_mbhc_deinit(struct snd_soc_codec *codec);
 extern int tavil_mbhc_post_ssr_init(struct wcd934x_mbhc *mbhc,
 				    struct snd_soc_codec *codec);
+extern int tavil_mbhc_get_impedance(struct wcd934x_mbhc *wcd934x_mbhc,
+				    uint32_t *zl, uint32_t *zr);
+#else
+static inline int tavil_mbhc_init(struct wcd934x_mbhc **mbhc,
+				  struct snd_soc_codec *codec,
+				  struct fw_info *fw_data)
+{
+	return 0;
+}
+static inline void tavil_mbhc_hs_detect_exit(struct snd_soc_codec *codec)
+{
+}
+static inline int tavil_mbhc_hs_detect(struct snd_soc_codec *codec,
+				       struct wcd_mbhc_config *mbhc_cfg)
+{
+		return 0;
+}
+static inline void tavil_mbhc_deinit(struct snd_soc_codec *codec)
+{
+}
+static inline int tavil_mbhc_post_ssr_init(struct wcd934x_mbhc *mbhc,
+					   struct snd_soc_codec *codec)
+{
+	return 0;
+}
+static inline int tavil_mbhc_get_impedance(struct wcd934x_mbhc *wcd934x_mbhc,
+					   uint32_t *zl, uint32_t *zr)
+{
+	if (zl)
+		*zl = 0;
+	if (zr)
+		*zr = 0;
+	return -EINVAL;
+}
+#endif
+
 #endif /* __WCD934X_MBHC_H__ */
diff --git a/sound/soc/codecs/wcd934x/wcd934x-routing.h b/sound/soc/codecs/wcd934x/wcd934x-routing.h
index 940fdf8..afd93b2 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-routing.h
+++ b/sound/soc/codecs/wcd934x/wcd934x-routing.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -761,7 +761,8 @@
 	{"RX INT0_2 MUX", "RX5", "CDC_IF RX5 MUX"},
 	{"RX INT0_2 MUX", "RX6", "CDC_IF RX6 MUX"},
 	{"RX INT0_2 MUX", "RX7", "CDC_IF RX7 MUX"},
-	{"RX INT0 SEC MIX", NULL, "RX INT0_2 MUX"},
+	{"RX INT0_2 INTERP", NULL, "RX INT0_2 MUX"},
+	{"RX INT0 SEC MIX", NULL, "RX INT0_2 INTERP"},
 
 	/* Mixing path INT1 */
 	{"RX INT1_2 MUX", "RX0", "CDC_IF RX0 MUX"},
@@ -871,7 +872,8 @@
 	{"RX INT3 SEC MIX", NULL, "RX INT3_1 INTERP"},
 	{"RX INT3 MIX2", NULL, "RX INT3 SEC MIX"},
 	{"RX INT3 MIX2", NULL, "RX INT3 MIX2 INP"},
-	{"RX INT3 DAC", NULL, "RX INT3 MIX2"},
+	{"RX INT3 MIX3", NULL, "RX INT3 MIX2"},
+	{"RX INT3 DAC", NULL, "RX INT3 MIX3"},
 	{"RX INT3 DAC", NULL, "RX_BIAS"},
 	{"LINEOUT1 PA", NULL, "RX INT3 DAC"},
 	{"LINEOUT1", NULL, "LINEOUT1 PA"},
@@ -881,7 +883,8 @@
 	{"RX INT4 SEC MIX", NULL, "RX INT4_1 MIX1"},
 	{"RX INT4 MIX2", NULL, "RX INT4 SEC MIX"},
 	{"RX INT4 MIX2", NULL, "RX INT4 MIX2 INP"},
-	{"RX INT4 DAC", NULL, "RX INT4 MIX2"},
+	{"RX INT4 MIX3", NULL, "RX INT4 MIX2"},
+	{"RX INT4 DAC", NULL, "RX INT4 MIX3"},
 	{"RX INT4 DAC", NULL, "RX_BIAS"},
 	{"LINEOUT2 PA", NULL, "RX INT4 DAC"},
 	{"LINEOUT2", NULL, "LINEOUT2 PA"},
@@ -913,9 +916,23 @@
 	{"ANC OUT EAR Enable", "Switch", "ADC MUX11"},
 	{"RX INT0 MIX2", NULL, "ANC OUT EAR Enable"},
 
+	{"ANC OUT HPHL Enable", "Switch", "ADC MUX10"},
+	{"ANC OUT HPHL Enable", "Switch", "ADC MUX11"},
+	{"RX INT1 MIX2", NULL, "ANC OUT HPHL Enable"},
+
+	{"ANC OUT HPHR Enable", "Switch", "ADC MUX12"},
+	{"ANC OUT HPHR Enable", "Switch", "ADC MUX13"},
+	{"RX INT2 MIX2", NULL, "ANC OUT HPHR Enable"},
+
 	{"ANC EAR PA", NULL, "RX INT0 DAC"},
 	{"ANC EAR", NULL, "ANC EAR PA"},
 
+	{"ANC HPHL PA", NULL, "RX INT1 DAC"},
+	{"ANC HPHL", NULL, "ANC HPHL PA"},
+
+	{"ANC HPHR PA", NULL, "RX INT2 DAC"},
+	{"ANC HPHR", NULL, "ANC HPHR PA"},
+
 	{"ANC OUT EAR SPKR Enable", "Switch", "ADC MUX10"},
 	{"ANC OUT EAR SPKR Enable", "Switch", "ADC MUX11"},
 	{"RX INT7 MIX2", NULL, "ANC OUT EAR SPKR Enable"},
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index 58b08c9..4b6fcb0b 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -173,6 +173,11 @@
 	VI_SENSE_2,
 	AUDIO_NOMINAL,
 	HPH_PA_DELAY,
+	CLSH_Z_CONFIG,
+	ANC_MIC_AMIC1,
+	ANC_MIC_AMIC2,
+	ANC_MIC_AMIC3,
+	ANC_MIC_AMIC4,
 };
 
 enum {
@@ -506,6 +511,7 @@
 module_param(tx_unmute_delay, int, 0664);
 MODULE_PARM_DESC(tx_unmute_delay, "delay to unmute the tx path");
 
+static void tavil_codec_set_tx_hold(struct snd_soc_codec *, u16, bool);
 
 /* Hold instance to soundwire platform device */
 struct tavil_swr_ctrl_data {
@@ -994,14 +1000,30 @@
 		snd_soc_dapm_enable_pin(dapm, "ANC EAR PA");
 		snd_soc_dapm_enable_pin(dapm, "ANC EAR");
 		snd_soc_dapm_enable_pin(dapm, "ANC SPK1 PA");
+		snd_soc_dapm_enable_pin(dapm, "ANC HPHL PA");
+		snd_soc_dapm_enable_pin(dapm, "ANC HPHR PA");
+		snd_soc_dapm_enable_pin(dapm, "ANC HPHL");
+		snd_soc_dapm_enable_pin(dapm, "ANC HPHR");
 		snd_soc_dapm_disable_pin(dapm, "EAR PA");
 		snd_soc_dapm_disable_pin(dapm, "EAR");
+		snd_soc_dapm_disable_pin(dapm, "HPHL PA");
+		snd_soc_dapm_disable_pin(dapm, "HPHR PA");
+		snd_soc_dapm_disable_pin(dapm, "HPHL");
+		snd_soc_dapm_disable_pin(dapm, "HPHR");
 	} else {
 		snd_soc_dapm_disable_pin(dapm, "ANC EAR PA");
 		snd_soc_dapm_disable_pin(dapm, "ANC EAR");
 		snd_soc_dapm_disable_pin(dapm, "ANC SPK1 PA");
+		snd_soc_dapm_disable_pin(dapm, "ANC HPHL PA");
+		snd_soc_dapm_disable_pin(dapm, "ANC HPHR PA");
+		snd_soc_dapm_disable_pin(dapm, "ANC HPHL");
+		snd_soc_dapm_disable_pin(dapm, "ANC HPHR");
 		snd_soc_dapm_enable_pin(dapm, "EAR PA");
 		snd_soc_dapm_enable_pin(dapm, "EAR");
+		snd_soc_dapm_enable_pin(dapm, "HPHL");
+		snd_soc_dapm_enable_pin(dapm, "HPHR");
+		snd_soc_dapm_enable_pin(dapm, "HPHL PA");
+		snd_soc_dapm_enable_pin(dapm, "HPHR PA");
 	}
 	mutex_unlock(&tavil->codec_mutex);
 
@@ -1118,16 +1140,56 @@
 		}
 
 		/* Rate converter clk enable and set bypass mode */
-		snd_soc_update_bits(codec, WCD934X_CDC_ANC0_RC_COMMON_CTL,
-				    0x05, 0x05);
+		if (!strcmp(w->name, "RX INT0 DAC") ||
+		    !strcmp(w->name, "RX INT1 DAC") ||
+		    !strcmp(w->name, "ANC SPK1 PA")) {
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_ANC0_RC_COMMON_CTL,
+					    0x05, 0x05);
+			if (!strcmp(w->name, "RX INT1 DAC")) {
+				snd_soc_update_bits(codec,
+					WCD934X_CDC_ANC0_FIFO_COMMON_CTL,
+					0x66, 0x66);
+			}
+		} else if (!strcmp(w->name, "RX INT2 DAC")) {
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_ANC1_RC_COMMON_CTL,
+					    0x05, 0x05);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_ANC1_FIFO_COMMON_CTL,
+					    0x66, 0x66);
+		}
+		if (!strcmp(w->name, "RX INT1 DAC"))
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_ANC0_CLK_RESET_CTL, 0x08, 0x08);
+		else if (!strcmp(w->name, "RX INT2 DAC"))
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_ANC1_CLK_RESET_CTL, 0x08, 0x08);
+
 		if (!hwdep_cal)
 			release_firmware(fw);
 		break;
+
+	case SND_SOC_DAPM_POST_PMU:
+		if (!strcmp(w->name, "ANC HPHL PA") ||
+		    !strcmp(w->name, "ANC HPHR PA")) {
+			/* Remove ANC Rx from reset */
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_ANC0_CLK_RESET_CTL,
+					    0x08, 0x00);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_ANC1_CLK_RESET_CTL,
+					    0x08, 0x00);
+		}
+
+		break;
+
 	case SND_SOC_DAPM_POST_PMD:
 		snd_soc_update_bits(codec, WCD934X_CDC_ANC0_RC_COMMON_CTL,
 				    0x05, 0x00);
 		if (!strcmp(w->name, "ANC EAR PA") ||
-		    !strcmp(w->name, "ANC SPK1 PA")) {
+		    !strcmp(w->name, "ANC SPK1 PA") ||
+		    !strcmp(w->name, "ANC HPHL PA")) {
 			snd_soc_update_bits(codec, WCD934X_CDC_ANC0_MODE_1_CTL,
 					    0x30, 0x00);
 			msleep(50);
@@ -1142,6 +1204,21 @@
 			snd_soc_update_bits(codec,
 					    WCD934X_CDC_ANC0_CLK_RESET_CTL,
 					    0x38, 0x00);
+		} else if (!strcmp(w->name, "ANC HPHR PA")) {
+			snd_soc_update_bits(codec, WCD934X_CDC_ANC1_MODE_1_CTL,
+					    0x30, 0x00);
+			msleep(50);
+			snd_soc_update_bits(codec, WCD934X_CDC_ANC1_MODE_1_CTL,
+					    0x01, 0x00);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_ANC1_CLK_RESET_CTL,
+					    0x38, 0x38);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_ANC1_CLK_RESET_CTL,
+					    0x07, 0x00);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_ANC1_CLK_RESET_CTL,
+					    0x38, 0x00);
 		}
 		break;
 	}
@@ -1878,12 +1955,8 @@
 		switch (event) {
 		case SND_SOC_DAPM_PRE_PMU:
 		case SND_SOC_DAPM_POST_PMU:
-			if (!(snd_soc_read(codec,
-					WCD934X_CDC_RX2_RX_PATH_CTL) & 0x10) &&
-				(!(snd_soc_read(codec,
-					WCD934X_CDC_RX1_RX_PATH_CTL) & 0x10)))
-				snd_soc_update_bits(codec,
-					WCD9XXX_A_ANA_RX_SUPPLIES, 0x02, 0x02);
+			snd_soc_update_bits(codec,
+				WCD9XXX_A_ANA_RX_SUPPLIES, 0x02, 0x02);
 		break;
 		case SND_SOC_DAPM_POST_PMD:
 			snd_soc_update_bits(codec,
@@ -1893,6 +1966,18 @@
 	}
 }
 
+static void tavil_codec_clear_anc_tx_hold(struct tavil_priv *tavil)
+{
+	if (test_and_clear_bit(ANC_MIC_AMIC1, &tavil->status_mask))
+		tavil_codec_set_tx_hold(tavil->codec, WCD934X_ANA_AMIC1, false);
+	if (test_and_clear_bit(ANC_MIC_AMIC2, &tavil->status_mask))
+		tavil_codec_set_tx_hold(tavil->codec, WCD934X_ANA_AMIC2, false);
+	if (test_and_clear_bit(ANC_MIC_AMIC3, &tavil->status_mask))
+		tavil_codec_set_tx_hold(tavil->codec, WCD934X_ANA_AMIC3, false);
+	if (test_and_clear_bit(ANC_MIC_AMIC4, &tavil->status_mask))
+		tavil_codec_set_tx_hold(tavil->codec, WCD934X_ANA_AMIC4, false);
+}
+
 static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
 				      struct snd_kcontrol *kcontrol,
 				      int event)
@@ -1900,6 +1985,7 @@
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
 	struct tavil_dsd_config *dsd_conf = tavil->dsd_config;
+	int ret = 0;
 
 	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
 
@@ -1908,6 +1994,11 @@
 		if (TAVIL_IS_1_0(tavil->wcd9xxx))
 			snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
 					    0x06, (0x03 << 1));
+
+		if ((!(strcmp(w->name, "ANC HPHR PA"))) &&
+		    (test_bit(HPH_PA_DELAY, &tavil->status_mask)))
+			snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0xC0, 0xC0);
+
 		set_bit(HPH_PA_DELAY, &tavil->status_mask);
 		if (dsd_conf &&
 		    (snd_soc_read(codec, WCD934X_CDC_DSD1_PATH_CTL) & 0x01)) {
@@ -1917,14 +2008,34 @@
 		}
 		break;
 	case SND_SOC_DAPM_POST_PMU:
+		if ((!(strcmp(w->name, "ANC HPHR PA")))) {
+			if ((snd_soc_read(codec, WCD934X_ANA_HPH) & 0xC0)
+					!= 0xC0)
+				/*
+				 * If PA_EN is not set (potentially in ANC case)
+				 * then do nothing for POST_PMU and let left
+				 * channel handle everything.
+				 */
+				break;
+		}
 		/*
 		 * 7ms sleep is required after PA is enabled as per
-		 * HW requirement
+		 * HW requirement. If compander is disabled, then
+		 * 20ms delay is needed.
 		 */
 		if (test_bit(HPH_PA_DELAY, &tavil->status_mask)) {
-			usleep_range(7000, 7100);
+			if (!tavil->comp_enabled[COMPANDER_2])
+				usleep_range(20000, 20100);
+			else
+				usleep_range(7000, 7100);
 			clear_bit(HPH_PA_DELAY, &tavil->status_mask);
 		}
+		if (tavil->anc_func) {
+			/* Clear Tx FE HOLD if both PAs are enabled */
+			if ((snd_soc_read(tavil->codec, WCD934X_ANA_HPH) &
+					0xC0) == 0xC0)
+				tavil_codec_clear_anc_tx_hold(tavil);
+		}
 
 		snd_soc_update_bits(codec, WCD934X_HPH_R_TEST, 0x01, 0x01);
 
@@ -1947,6 +2058,34 @@
 		    (snd_soc_read(codec, WCD934X_CDC_DSD1_PATH_CTL) & 0x01))
 			snd_soc_update_bits(codec, WCD934X_CDC_DSD1_CFG2,
 					    0x04, 0x00);
+		if (!(strcmp(w->name, "ANC HPHR PA"))) {
+			pr_debug("%s:Do everything needed for left channel\n",
+				__func__);
+			/* Do everything needed for left channel */
+			snd_soc_update_bits(codec, WCD934X_HPH_L_TEST,
+					    0x01, 0x01);
+
+			/* Remove mute */
+			snd_soc_update_bits(codec, WCD934X_CDC_RX1_RX_PATH_CTL,
+					    0x10, 0x00);
+
+			/* Remove mix path mute if it is enabled */
+			if ((snd_soc_read(codec,
+					WCD934X_CDC_RX1_RX_PATH_MIX_CTL)) &
+					0x10)
+				snd_soc_update_bits(codec,
+					WCD934X_CDC_RX1_RX_PATH_MIX_CTL,
+					0x10, 0x00);
+
+			if (dsd_conf && (snd_soc_read(codec,
+						WCD934X_CDC_DSD0_PATH_CTL) &
+						0x01))
+				snd_soc_update_bits(codec,
+						    WCD934X_CDC_DSD0_CFG2,
+						    0x04, 0x00);
+			/* Remove ANC Rx from reset */
+			ret = tavil_codec_enable_anc(w, kcontrol, event);
+		}
 		tavil_codec_override(codec, tavil->hph_mode, event);
 		break;
 	case SND_SOC_DAPM_PRE_PMD:
@@ -1961,10 +2100,20 @@
 		snd_soc_update_bits(codec, WCD934X_HPH_R_TEST, 0x01, 0x00);
 		snd_soc_update_bits(codec, WCD934X_CDC_RX2_RX_PATH_CTL,
 				    0x10, 0x10);
+		snd_soc_update_bits(codec, WCD934X_CDC_RX2_RX_PATH_MIX_CTL,
+				    0x10, 0x10);
+		if (!(strcmp(w->name, "ANC HPHR PA")))
+			snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x40, 0x00);
 		break;
 	case SND_SOC_DAPM_POST_PMD:
-		/* 5ms sleep is required after PA disable */
-		usleep_range(5000, 5100);
+		/*
+		 * 5ms sleep is required after PA disable. If compander is
+		 * disabled, then 20ms delay is needed after PA disable.
+		 */
+		if (!tavil->comp_enabled[COMPANDER_2])
+			usleep_range(20000, 20100);
+		else
+			usleep_range(5000, 5100);
 		tavil_codec_override(codec, tavil->hph_mode, event);
 		blocking_notifier_call_chain(&tavil->mbhc->notifier,
 					     WCD_EVENT_POST_HPHR_PA_OFF,
@@ -1972,10 +2121,16 @@
 		if (TAVIL_IS_1_0(tavil->wcd9xxx))
 			snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
 					    0x06, 0x0);
+		if (!(strcmp(w->name, "ANC HPHR PA"))) {
+			ret = tavil_codec_enable_anc(w, kcontrol, event);
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_RX2_RX_PATH_CFG0,
+					    0x10, 0x00);
+		}
 		break;
 	};
 
-	return 0;
+	return ret;
 }
 
 static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
@@ -1985,6 +2140,7 @@
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
 	struct tavil_dsd_config *dsd_conf = tavil->dsd_config;
+	int ret = 0;
 
 	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
 
@@ -1993,6 +2149,10 @@
 		if (TAVIL_IS_1_0(tavil->wcd9xxx))
 			snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
 					    0x06, (0x03 << 1));
+		if ((!(strcmp(w->name, "ANC HPHL PA"))) &&
+		    (test_bit(HPH_PA_DELAY, &tavil->status_mask)))
+			snd_soc_update_bits(codec, WCD934X_ANA_HPH,
+					    0xC0, 0xC0);
 		set_bit(HPH_PA_DELAY, &tavil->status_mask);
 		if (dsd_conf &&
 		    (snd_soc_read(codec, WCD934X_CDC_DSD0_PATH_CTL) & 0x01)) {
@@ -2002,14 +2162,35 @@
 		}
 		break;
 	case SND_SOC_DAPM_POST_PMU:
+		if (!(strcmp(w->name, "ANC HPHL PA"))) {
+			if ((snd_soc_read(codec, WCD934X_ANA_HPH) & 0xC0)
+								!= 0xC0)
+				/*
+				 * If PA_EN is not set (potentially in ANC
+				 * case) then do nothing for POST_PMU and
+				 * let right channel handle everything.
+				 */
+				break;
+		}
 		/*
 		 * 7ms sleep is required after PA is enabled as per
-		 * HW requirement
+		 * HW requirement. If compander is disabled, then
+		 * 20ms delay is needed.
 		 */
 		if (test_bit(HPH_PA_DELAY, &tavil->status_mask)) {
-			usleep_range(7000, 7100);
+			if (!tavil->comp_enabled[COMPANDER_1])
+				usleep_range(20000, 20100);
+			else
+				usleep_range(7000, 7100);
 			clear_bit(HPH_PA_DELAY, &tavil->status_mask);
 		}
+		if (tavil->anc_func) {
+			/* Clear Tx FE HOLD if both PAs are enabled */
+			if ((snd_soc_read(tavil->codec, WCD934X_ANA_HPH) &
+					0xC0) == 0xC0)
+				tavil_codec_clear_anc_tx_hold(tavil);
+		}
+
 		snd_soc_update_bits(codec, WCD934X_HPH_L_TEST, 0x01, 0x01);
 		/* Remove Mute on primary path */
 		snd_soc_update_bits(codec, WCD934X_CDC_RX1_RX_PATH_CTL,
@@ -2030,6 +2211,33 @@
 		    (snd_soc_read(codec, WCD934X_CDC_DSD0_PATH_CTL) & 0x01))
 			snd_soc_update_bits(codec, WCD934X_CDC_DSD0_CFG2,
 					    0x04, 0x00);
+		if (!(strcmp(w->name, "ANC HPHL PA"))) {
+			pr_debug("%s:Do everything needed for right channel\n",
+				__func__);
+
+			/* Do everything needed for right channel */
+			snd_soc_update_bits(codec, WCD934X_HPH_R_TEST,
+					    0x01, 0x01);
+
+			/* Remove mute */
+			snd_soc_update_bits(codec, WCD934X_CDC_RX2_RX_PATH_CTL,
+						0x10, 0x00);
+
+			/* Remove mix path mute if it is enabled */
+			if ((snd_soc_read(codec,
+					WCD934X_CDC_RX2_RX_PATH_MIX_CTL)) &
+					0x10)
+				snd_soc_update_bits(codec,
+						WCD934X_CDC_RX2_RX_PATH_MIX_CTL,
+						0x10, 0x00);
+			if (dsd_conf && (snd_soc_read(codec,
+					WCD934X_CDC_DSD1_PATH_CTL) & 0x01))
+				snd_soc_update_bits(codec,
+						    WCD934X_CDC_DSD1_CFG2,
+						    0x04, 0x00);
+			/* Remove ANC Rx from reset */
+			ret = tavil_codec_enable_anc(w, kcontrol, event);
+		}
 		tavil_codec_override(codec, tavil->hph_mode, event);
 		break;
 	case SND_SOC_DAPM_PRE_PMD:
@@ -2045,10 +2253,21 @@
 		snd_soc_update_bits(codec, WCD934X_HPH_L_TEST, 0x01, 0x00);
 		snd_soc_update_bits(codec, WCD934X_CDC_RX1_RX_PATH_CTL,
 				    0x10, 0x10);
+		snd_soc_update_bits(codec, WCD934X_CDC_RX1_RX_PATH_MIX_CTL,
+				    0x10, 0x10);
+		if (!(strcmp(w->name, "ANC HPHL PA")))
+			snd_soc_update_bits(codec, WCD934X_ANA_HPH,
+					    0x80, 0x00);
 		break;
 	case SND_SOC_DAPM_POST_PMD:
-		/* 5ms sleep is required after PA disable */
-		usleep_range(5000, 5100);
+		/*
+		 * 5ms sleep is required after PA disable. If compander is
+		 * disabled, then 20ms delay is needed after PA disable.
+		 */
+		if (!tavil->comp_enabled[COMPANDER_1])
+			usleep_range(20000, 20100);
+		else
+			usleep_range(5000, 5100);
 		tavil_codec_override(codec, tavil->hph_mode, event);
 		blocking_notifier_call_chain(&tavil->mbhc->notifier,
 					     WCD_EVENT_POST_HPHL_PA_OFF,
@@ -2056,10 +2275,15 @@
 		if (TAVIL_IS_1_0(tavil->wcd9xxx))
 			snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
 					    0x06, 0x0);
+		if (!(strcmp(w->name, "ANC HPHL PA"))) {
+			ret = tavil_codec_enable_anc(w, kcontrol, event);
+			snd_soc_update_bits(codec,
+				WCD934X_CDC_RX1_RX_PATH_CFG0, 0x10, 0x00);
+		}
 		break;
 	};
 
-	return 0;
+	return ret;
 }
 
 static int tavil_codec_enable_lineout_pa(struct snd_soc_dapm_widget *w,
@@ -2068,6 +2292,9 @@
 {
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 	u16 lineout_vol_reg = 0, lineout_mix_vol_reg = 0;
+	u16 dsd_mute_reg = 0, dsd_clk_reg = 0;
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+	struct tavil_dsd_config *dsd_conf = tavil->dsd_config;
 
 	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
 
@@ -2075,9 +2302,13 @@
 		if (w->shift == 7) {
 			lineout_vol_reg = WCD934X_CDC_RX3_RX_PATH_CTL;
 			lineout_mix_vol_reg = WCD934X_CDC_RX3_RX_PATH_MIX_CTL;
+			dsd_mute_reg = WCD934X_CDC_DSD0_CFG2;
+			dsd_clk_reg = WCD934X_CDC_DSD0_PATH_CTL;
 		} else if (w->shift == 6) {
 			lineout_vol_reg = WCD934X_CDC_RX4_RX_PATH_CTL;
 			lineout_mix_vol_reg = WCD934X_CDC_RX4_RX_PATH_MIX_CTL;
+			dsd_mute_reg = WCD934X_CDC_DSD1_CFG2;
+			dsd_clk_reg = WCD934X_CDC_DSD1_PATH_CTL;
 		}
 	} else {
 		dev_err(codec->dev, "%s: Error enabling lineout PA\n",
@@ -2102,6 +2333,12 @@
 			snd_soc_update_bits(codec,
 					    lineout_mix_vol_reg,
 					    0x10, 0x00);
+		if (dsd_conf && (snd_soc_read(codec, dsd_clk_reg) & 0x01))
+			snd_soc_update_bits(codec, dsd_mute_reg, 0x04, 0x00);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		if (dsd_conf && (snd_soc_read(codec, dsd_clk_reg) & 0x01))
+			snd_soc_update_bits(codec, dsd_mute_reg, 0x04, 0x04);
 		break;
 	case SND_SOC_DAPM_POST_PMD:
 		/*
@@ -2166,12 +2403,18 @@
 	int hph_mode = tavil->hph_mode;
 	u8 dem_inp;
 	struct tavil_dsd_config *dsd_conf = tavil->dsd_config;
+	int ret = 0;
 
 	dev_dbg(codec->dev, "%s wname: %s event: %d hph_mode: %d\n", __func__,
 		w->name, event, hph_mode);
 
 	switch (event) {
 	case SND_SOC_DAPM_PRE_PMU:
+		if (tavil->anc_func) {
+			ret = tavil_codec_enable_anc(w, kcontrol, event);
+			/* 40 msec delay is needed to avoid click and pop */
+			msleep(40);
+		}
 		/* Read DEM INP Select */
 		dem_inp = snd_soc_read(codec, WCD934X_CDC_RX2_RX_PATH_SEC0) &
 			  0x03;
@@ -2202,6 +2445,10 @@
 			     WCD_CLSH_EVENT_PRE_DAC,
 			     WCD_CLSH_STATE_HPHR,
 			     hph_mode);
+		if (tavil->anc_func)
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_RX2_RX_PATH_CFG0,
+					    0x10, 0x10);
 		break;
 	case SND_SOC_DAPM_POST_PMD:
 		/* 1000us required as per HW requirement */
@@ -2238,12 +2485,18 @@
 	u8 dem_inp;
 	int ret = 0;
 	struct tavil_dsd_config *dsd_conf = tavil->dsd_config;
+	uint32_t impedl = 0, impedr = 0;
 
 	dev_dbg(codec->dev, "%s wname: %s event: %d hph_mode: %d\n", __func__,
 		w->name, event, hph_mode);
 
 	switch (event) {
 	case SND_SOC_DAPM_PRE_PMU:
+		if (tavil->anc_func) {
+			ret = tavil_codec_enable_anc(w, kcontrol, event);
+			/* 40 msec delay is needed to avoid click and pop */
+			msleep(40);
+		}
 		/* Read DEM INP Select */
 		dem_inp = snd_soc_read(codec, WCD934X_CDC_RX1_RX_PATH_SEC0) &
 			  0x03;
@@ -2274,6 +2527,23 @@
 			     WCD_CLSH_EVENT_PRE_DAC,
 			     WCD_CLSH_STATE_HPHL,
 			     hph_mode);
+
+		if (tavil->anc_func)
+			snd_soc_update_bits(codec,
+					    WCD934X_CDC_RX1_RX_PATH_CFG0,
+					    0x10, 0x10);
+
+		ret = tavil_mbhc_get_impedance(tavil->mbhc,
+					       &impedl, &impedr);
+		if (!ret) {
+			wcd_clsh_imped_config(codec, impedl, false);
+			set_bit(CLSH_Z_CONFIG, &tavil->status_mask);
+		} else {
+			dev_dbg(codec->dev, "%s: Failed to get mbhc impedance %d\n",
+				__func__, ret);
+			ret = 0;
+		}
+
 		break;
 	case SND_SOC_DAPM_POST_PMD:
 		/* 1000us required as per HW requirement */
@@ -2292,6 +2562,11 @@
 			snd_soc_update_bits(codec,
 					    WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
 					    0xF0, 0x0);
+
+		if (test_bit(CLSH_Z_CONFIG, &tavil->status_mask)) {
+			wcd_clsh_imped_config(codec, impedl, true);
+			clear_bit(CLSH_Z_CONFIG, &tavil->status_mask);
+		}
 		break;
 	default:
 		break;
@@ -2577,6 +2852,8 @@
 		/* Undo reset for MAD */
 		snd_soc_update_bits(codec, WCD934X_CPE_SS_MAD_CTL,
 				    0x02, 0x00);
+		snd_soc_update_bits(codec, WCD934X_CODEC_RPM_CLK_MCLK_CFG,
+					0x04, 0x04);
 	} else {
 		snd_soc_update_bits(codec, WCD934X_SOC_MAD_AUDIO_CTL_2,
 				    0x03, 0x00);
@@ -2586,6 +2863,8 @@
 		/* Turn off MAD clk */
 		snd_soc_update_bits(codec, WCD934X_CPE_SS_MAD_CTL,
 				    0x01, 0x00);
+		snd_soc_update_bits(codec, WCD934X_CODEC_RPM_CLK_MCLK_CFG,
+					0x04, 0x00);
 	}
 done:
 	return rc;
@@ -3138,6 +3417,15 @@
 }
 EXPORT_SYMBOL(tavil_codec_enable_interp_clk);
 
+static int tavil_anc_out_switch_cb(struct snd_soc_dapm_widget *w,
+				   struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	tavil_codec_enable_interp_clk(codec, event, w->shift);
+
+	return 0;
+}
 static int tavil_codec_set_idle_detect_thr(struct snd_soc_codec *codec,
 					   int interp, int path_type)
 {
@@ -3592,8 +3880,8 @@
 {
 	int adc_mux_n = w->shift;
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
 	int amic_n;
-	u16 amic_reg;
 
 	dev_dbg(codec->dev, "%s: event: %d\n", __func__, event);
 
@@ -3601,8 +3889,13 @@
 	case SND_SOC_DAPM_POST_PMU:
 		amic_n = tavil_codec_find_amic_input(codec, adc_mux_n);
 		if (amic_n) {
-			amic_reg = WCD934X_ANA_AMIC1 + amic_n - 1;
-			tavil_codec_set_tx_hold(codec, amic_reg, false);
+			/*
+			 * Prevent ANC Rx pop by leaving Tx FE in HOLD
+			 * state until PA is up. Track AMIC being used
+			 * so we can release the HOLD later.
+			 */
+			set_bit(ANC_MIC_AMIC1 + amic_n - 1,
+				&tavil->status_mask);
 		}
 		break;
 	default:
@@ -5104,19 +5397,18 @@
 				  struct snd_ctl_elem_value *ucontrol)
 {
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	u16 amic_reg;
+	u16 amic_reg = 0;
 
 	if (!strcmp(kcontrol->id.name, "AMIC_1_2 PWR MODE"))
 		amic_reg = WCD934X_ANA_AMIC1;
 	if (!strcmp(kcontrol->id.name, "AMIC_3_4 PWR MODE"))
 		amic_reg = WCD934X_ANA_AMIC3;
-	else
-		goto ret;
 
-	ucontrol->value.integer.value[0] =
-		(snd_soc_read(codec, amic_reg) & WCD934X_AMIC_PWR_LVL_MASK) >>
-			     WCD934X_AMIC_PWR_LVL_SHIFT;
-ret:
+	if (amic_reg)
+		ucontrol->value.integer.value[0] =
+			(snd_soc_read(codec, amic_reg) &
+			 WCD934X_AMIC_PWR_LVL_MASK) >>
+			  WCD934X_AMIC_PWR_LVL_SHIFT;
 	return 0;
 }
 
@@ -5125,7 +5417,7 @@
 {
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	u32 mode_val;
-	u16 amic_reg;
+	u16 amic_reg = 0;
 
 	mode_val = ucontrol->value.enumerated.item[0];
 
@@ -5135,12 +5427,10 @@
 		amic_reg = WCD934X_ANA_AMIC1;
 	if (!strcmp(kcontrol->id.name, "AMIC_3_4 PWR MODE"))
 		amic_reg = WCD934X_ANA_AMIC3;
-	else
-		goto ret;
 
-	snd_soc_update_bits(codec, amic_reg, WCD934X_AMIC_PWR_LVL_MASK,
-			    mode_val << WCD934X_AMIC_PWR_LVL_SHIFT);
-ret:
+	if (amic_reg)
+		snd_soc_update_bits(codec, amic_reg, WCD934X_AMIC_PWR_LVL_MASK,
+				    mode_val << WCD934X_AMIC_PWR_LVL_SHIFT);
 	return 0;
 }
 
@@ -5185,6 +5475,14 @@
 
 	tavil_mad_input = ucontrol->value.integer.value[0];
 
+	if (tavil_mad_input >= sizeof(tavil_conn_mad_text)/
+	    sizeof(tavil_conn_mad_text[0])) {
+		dev_err(codec->dev,
+			"%s: tavil_mad_input = %d out of bounds\n",
+			__func__, tavil_mad_input);
+		return -EINVAL;
+	}
+
 	if (strnstr(tavil_conn_mad_text[tavil_mad_input], "NOTUSED",
 				sizeof("NOTUSED"))) {
 		dev_dbg(codec->dev,
@@ -6420,6 +6718,12 @@
 static const struct snd_kcontrol_new anc_spkr_pa_switch =
 	SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
 
+static const struct snd_kcontrol_new anc_hphl_pa_switch =
+	SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new anc_hphr_pa_switch =
+	SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
 static const struct snd_kcontrol_new mad_cpe1_switch =
 	SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
 
@@ -6527,6 +6831,16 @@
 			tavil_dsd_mixer_get, tavil_dsd_mixer_put),
 };
 
+static const struct snd_kcontrol_new lo1_mixer[] = {
+	SOC_SINGLE_EXT("DSD LO1 Switch", SND_SOC_NOPM, INTERP_LO1, 1, 0,
+			tavil_dsd_mixer_get, tavil_dsd_mixer_put),
+};
+
+static const struct snd_kcontrol_new lo2_mixer[] = {
+	SOC_SINGLE_EXT("DSD LO2 Switch", SND_SOC_NOPM, INTERP_LO2, 1, 0,
+			tavil_dsd_mixer_get, tavil_dsd_mixer_put),
+};
+
 static const struct snd_soc_dapm_widget tavil_dapm_widgets[] = {
 	SND_SOC_DAPM_AIF_IN_E("AIF1 PB", "AIF1 Playback", 0, SND_SOC_NOPM,
 		AIF1_PB, 0, tavil_codec_enable_slimrx,
@@ -6659,7 +6973,11 @@
 	SND_SOC_DAPM_MIXER("RX INT2 MIX3", SND_SOC_NOPM, 0, 0, hphr_mixer,
 			   ARRAY_SIZE(hphr_mixer)),
 	SND_SOC_DAPM_MIXER("RX INT3 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT3 MIX3", SND_SOC_NOPM, 0, 0, lo1_mixer,
+			   ARRAY_SIZE(lo1_mixer)),
 	SND_SOC_DAPM_MIXER("RX INT4 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX INT4 MIX3", SND_SOC_NOPM, 0, 0, lo2_mixer,
+			   ARRAY_SIZE(lo2_mixer)),
 	SND_SOC_DAPM_MIXER("RX INT7 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
 	SND_SOC_DAPM_MIXER_E("RX INT7 CHAIN", SND_SOC_NOPM, 0, 0,
 		NULL, 0, tavil_codec_spk_boost_event,
@@ -7048,17 +7366,25 @@
 	SND_SOC_DAPM_PGA_E("LINEOUT1 PA", WCD934X_ANA_LO_1_2, 7, 0, NULL, 0,
 		tavil_codec_enable_lineout_pa,
 		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_POST_PMD),
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
 	SND_SOC_DAPM_PGA_E("LINEOUT2 PA", WCD934X_ANA_LO_1_2, 6, 0, NULL, 0,
 		tavil_codec_enable_lineout_pa,
 		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_POST_PMD),
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
 	SND_SOC_DAPM_PGA_E("ANC EAR PA", WCD934X_ANA_EAR, 7, 0, NULL, 0,
 		tavil_codec_enable_ear_pa, SND_SOC_DAPM_POST_PMU |
 		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
 	SND_SOC_DAPM_PGA_E("ANC SPK1 PA", SND_SOC_NOPM, 0, 0, NULL, 0,
 		tavil_codec_enable_spkr_anc,
 		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("ANC HPHL PA", SND_SOC_NOPM, 0, 0, NULL, 0,
+		tavil_codec_enable_hphl_pa,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("ANC HPHR PA", SND_SOC_NOPM, 0, 0, NULL, 0,
+		tavil_codec_enable_hphr_pa,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
 
 	SND_SOC_DAPM_OUTPUT("EAR"),
 	SND_SOC_DAPM_OUTPUT("HPHL"),
@@ -7068,6 +7394,8 @@
 	SND_SOC_DAPM_OUTPUT("SPK1 OUT"),
 	SND_SOC_DAPM_OUTPUT("SPK2 OUT"),
 	SND_SOC_DAPM_OUTPUT("ANC EAR"),
+	SND_SOC_DAPM_OUTPUT("ANC HPHL"),
+	SND_SOC_DAPM_OUTPUT("ANC HPHR"),
 
 	SND_SOC_DAPM_SWITCH("ANC OUT EAR Enable", SND_SOC_NOPM, 0, 0,
 		&anc_ear_switch),
@@ -7076,6 +7404,13 @@
 	SND_SOC_DAPM_SWITCH("ANC SPKR PA Enable", SND_SOC_NOPM, 0, 0,
 		&anc_spkr_pa_switch),
 
+	SND_SOC_DAPM_SWITCH_E("ANC OUT HPHL Enable", SND_SOC_NOPM, INTERP_HPHL,
+		0, &anc_hphl_pa_switch, tavil_anc_out_switch_cb,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+	 SND_SOC_DAPM_SWITCH_E("ANC OUT HPHR Enable", SND_SOC_NOPM, INTERP_HPHR,
+		0, &anc_hphr_pa_switch, tavil_anc_out_switch_cb,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+
 	SND_SOC_DAPM_SUPPLY("RX_BIAS", SND_SOC_NOPM, 0, 0,
 		tavil_codec_enable_rx_bias,
 		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
@@ -7760,13 +8095,8 @@
 
 static void tavil_codec_power_gate_digital_core(struct tavil_priv *tavil)
 {
-	struct snd_soc_codec *codec = tavil->codec;
-
-	if (!codec)
-		return;
-
 	mutex_lock(&tavil->power_lock);
-	dev_dbg(codec->dev, "%s: Entering power gating function, %d\n",
+	dev_dbg(tavil->dev, "%s: Entering power gating function, %d\n",
 		__func__, tavil->power_active_ref);
 
 	if (tavil->power_active_ref > 0)
@@ -7775,16 +8105,16 @@
 	wcd9xxx_set_power_state(tavil->wcd9xxx,
 			WCD_REGION_POWER_COLLAPSE_BEGIN,
 			WCD9XXX_DIG_CORE_REGION_1);
-	snd_soc_update_bits(codec, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
-			0x04, 0x04);
-	snd_soc_update_bits(codec, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
-			0x01, 0x00);
-	snd_soc_update_bits(codec, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
-			0x02, 0x00);
+	regmap_update_bits(tavil->wcd9xxx->regmap,
+			   WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x04, 0x04);
+	regmap_update_bits(tavil->wcd9xxx->regmap,
+			   WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x01, 0x00);
+	regmap_update_bits(tavil->wcd9xxx->regmap,
+			   WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x02, 0x00);
 	wcd9xxx_set_power_state(tavil->wcd9xxx, WCD_REGION_POWER_DOWN,
 				WCD9XXX_DIG_CORE_REGION_1);
 exit:
-	dev_dbg(codec->dev, "%s: Exiting power gating function, %d\n",
+	dev_dbg(tavil->dev, "%s: Exiting power gating function, %d\n",
 		__func__, tavil->power_active_ref);
 	mutex_unlock(&tavil->power_lock);
 }
@@ -7793,34 +8123,32 @@
 {
 	struct tavil_priv *tavil;
 	struct delayed_work *dwork;
-	struct snd_soc_codec *codec;
 
 	dwork = to_delayed_work(work);
 	tavil = container_of(dwork, struct tavil_priv, power_gate_work);
-	codec = tavil->codec;
-
-	if (!codec)
-		return;
 
 	tavil_codec_power_gate_digital_core(tavil);
 }
 
 /* called under power_lock acquisition */
-static int tavil_dig_core_remove_power_collapse(struct snd_soc_codec *codec)
+static int tavil_dig_core_remove_power_collapse(struct tavil_priv *tavil)
 {
-	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
-
-	snd_soc_write(codec, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x5);
-	snd_soc_write(codec, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x7);
-	snd_soc_write(codec, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x3);
-	snd_soc_update_bits(codec, WCD934X_CODEC_RPM_RST_CTL, 0x02, 0x00);
-	snd_soc_update_bits(codec, WCD934X_CODEC_RPM_RST_CTL, 0x02, 0x02);
+	regmap_write(tavil->wcd9xxx->regmap,
+		     WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x05);
+	regmap_write(tavil->wcd9xxx->regmap,
+		     WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x07);
+	regmap_update_bits(tavil->wcd9xxx->regmap,
+			   WCD934X_CODEC_RPM_RST_CTL, 0x02, 0x00);
+	regmap_update_bits(tavil->wcd9xxx->regmap,
+			   WCD934X_CODEC_RPM_RST_CTL, 0x02, 0x02);
+	regmap_write(tavil->wcd9xxx->regmap,
+		     WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x03);
 
 	wcd9xxx_set_power_state(tavil->wcd9xxx,
 			WCD_REGION_POWER_COLLAPSE_REMOVE,
 			WCD9XXX_DIG_CORE_REGION_1);
-	regcache_mark_dirty(codec->component.regmap);
-	regcache_sync_region(codec->component.regmap,
+	regcache_mark_dirty(tavil->wcd9xxx->regmap);
+	regcache_sync_region(tavil->wcd9xxx->regmap,
 			     WCD934X_DIG_CORE_REG_MIN,
 			     WCD934X_DIG_CORE_REG_MAX);
 
@@ -7830,7 +8158,6 @@
 static int tavil_dig_core_power_collapse(struct tavil_priv *tavil,
 					 int req_state)
 {
-	struct snd_soc_codec *codec;
 	int cur_state;
 
 	/* Exit if feature is disabled */
@@ -7851,10 +8178,6 @@
 		goto unlock_mutex;
 	}
 
-	codec = tavil->codec;
-	if (!codec)
-		goto unlock_mutex;
-
 	if (req_state == POWER_COLLAPSE) {
 		if (tavil->power_active_ref == 0) {
 			schedule_delayed_work(&tavil->power_gate_work,
@@ -7872,7 +8195,7 @@
 						tavil->wcd9xxx,
 						WCD9XXX_DIG_CORE_REGION_1);
 			if (cur_state == WCD_REGION_POWER_DOWN) {
-				tavil_dig_core_remove_power_collapse(codec);
+				tavil_dig_core_remove_power_collapse(tavil);
 			} else {
 				mutex_unlock(&tavil->power_lock);
 				cancel_delayed_work_sync(
@@ -8584,7 +8907,7 @@
 	if (pdata->dmic_clk_drv ==
 	    WCD9XXX_DMIC_CLK_DRIVE_UNDEFINED) {
 		pdata->dmic_clk_drv = WCD934X_DMIC_CLK_DRIVE_DEFAULT;
-		dev_info(codec->dev,
+		dev_dbg(codec->dev,
 			 "%s: dmic_clk_strength invalid, default = %d\n",
 			 __func__, pdata->dmic_clk_drv);
 	}
@@ -8772,13 +9095,8 @@
 				WCD9XXX_DIG_CORE_REGION_1);
 
 	mutex_lock(&tavil->codec_mutex);
-	/*
-	 * Codec hardware by default comes up in SVS mode.
-	 * Initialize the svs_ref_cnt to 1 to reflect the hardware
-	 * state in the driver.
-	 */
-	tavil->svs_ref_cnt = 1;
 
+	tavil_vote_svs(tavil, true);
 	tavil_slimbus_slave_port_cfg.slave_dev_intfdev_la =
 				control->slim_slave->laddr;
 	tavil_slimbus_slave_port_cfg.slave_dev_pgd_la =
@@ -8786,17 +9104,9 @@
 	tavil_init_slim_slave_cfg(codec);
 	snd_soc_card_change_online_state(codec->component.card, 1);
 
-	/* Class-H Init */
-	wcd_clsh_init(&tavil->clsh_d);
-	/* Default HPH Mode to Class-H LOHiFi */
-	tavil->hph_mode = CLS_H_LOHIFI;
-
 	for (i = 0; i < TAVIL_MAX_MICBIAS; i++)
 		tavil->micb_ref[i] = 0;
 
-	for (i = 0; i < COMPANDER_MAX; i++)
-		tavil->comp_enabled[i] = 0;
-
 	dev_dbg(codec->dev, "%s: MCLK Rate = %x\n",
 		__func__, control->mclk_rate);
 
@@ -8990,6 +9300,10 @@
 	mutex_lock(&tavil->codec_mutex);
 	snd_soc_dapm_disable_pin(dapm, "ANC EAR PA");
 	snd_soc_dapm_disable_pin(dapm, "ANC EAR");
+	snd_soc_dapm_disable_pin(dapm, "ANC HPHL PA");
+	snd_soc_dapm_disable_pin(dapm, "ANC HPHR PA");
+	snd_soc_dapm_disable_pin(dapm, "ANC HPHL");
+	snd_soc_dapm_disable_pin(dapm, "ANC HPHR");
 	snd_soc_dapm_enable_pin(dapm, "ANC SPK1 PA");
 	mutex_unlock(&tavil->codec_mutex);
 
@@ -9000,6 +9314,7 @@
 	snd_soc_dapm_ignore_suspend(dapm, "AIF3 Playback");
 	snd_soc_dapm_ignore_suspend(dapm, "AIF3 Capture");
 	snd_soc_dapm_ignore_suspend(dapm, "AIF4 Playback");
+	snd_soc_dapm_ignore_suspend(dapm, "AIF4 MAD TX");
 	snd_soc_dapm_ignore_suspend(dapm, "VIfeed");
 
 	snd_soc_dapm_sync(dapm);
diff --git a/sound/soc/codecs/wcd9xxx-common-v2.c b/sound/soc/codecs/wcd9xxx-common-v2.c
index ad62d18..9ac38c2 100644
--- a/sound/soc/codecs/wcd9xxx-common-v2.c
+++ b/sound/soc/codecs/wcd9xxx-common-v2.c
@@ -130,6 +130,81 @@
 	},
 };
 
+static const struct wcd_reg_mask_val imped_table_tavil[][MAX_IMPED_PARAMS] = {
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xf2},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xf2},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x00},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xf2},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xf2},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x00},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xf4},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xf4},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x00},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xf4},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xf4},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x00},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xf7},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xf7},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x01},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xf7},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xf7},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x01},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xf9},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xf9},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x00},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xf9},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xf9},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x00},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xfa},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xfa},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x00},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xfa},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xfa},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x00},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xfb},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xfb},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x00},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xfb},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xfb},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x00},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xfc},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xfc},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x00},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xfc},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xfc},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x00},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xfd},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xfd},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x00},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xfd},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xfd},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x00},
+	},
+	{
+		{WCD9XXX_CDC_RX1_RX_VOL_CTL, 0xff, 0xfd},
+		{WCD9XXX_CDC_RX1_RX_VOL_MIX_CTL, 0xff, 0xfd},
+		{WCD9XXX_CDC_RX1_RX_PATH_SEC1, 0x01, 0x01},
+		{WCD9XXX_CDC_RX2_RX_VOL_CTL, 0xff, 0xfd},
+		{WCD9XXX_CDC_RX2_RX_VOL_MIX_CTL, 0xff, 0xfd},
+		{WCD9XXX_CDC_RX2_RX_PATH_SEC1, 0x01, 0x01},
+	},
+};
+
 static const struct wcd_imped_val imped_index[] = {
 	{4, 0},
 	{5, 1},
@@ -185,12 +260,26 @@
 {
 	int i;
 	int index = 0;
+	int table_size;
+
+	static const struct wcd_reg_mask_val
+				(*imped_table_ptr)[MAX_IMPED_PARAMS];
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+
+	if (IS_CODEC_TYPE(wcd9xxx, WCD934X)) {
+		table_size = ARRAY_SIZE(imped_table_tavil);
+		imped_table_ptr = imped_table_tavil;
+	} else {
+		table_size = ARRAY_SIZE(imped_table);
+		imped_table_ptr = imped_table;
+	}
 
 	/* reset = 1, which means request is to reset the register values */
 	if (reset) {
 		for (i = 0; i < MAX_IMPED_PARAMS; i++)
-			snd_soc_update_bits(codec, imped_table[index][i].reg,
-				imped_table[index][i].mask, 0);
+			snd_soc_update_bits(codec,
+				imped_table_ptr[index][i].reg,
+				imped_table_ptr[index][i].mask, 0);
 		return;
 	}
 	index = get_impedance_index(imped);
@@ -198,15 +287,16 @@
 		pr_debug("%s, impedance not in range = %d\n", __func__, imped);
 		return;
 	}
-	if (index >= ARRAY_SIZE(imped_table)) {
+	if (index >= table_size) {
 		pr_debug("%s, impedance index not in range = %d\n", __func__,
 			index);
 		return;
 	}
 	for (i = 0; i < MAX_IMPED_PARAMS; i++)
-		snd_soc_update_bits(codec, imped_table[index][i].reg,
-				imped_table[index][i].mask,
-				imped_table[index][i].val);
+		snd_soc_update_bits(codec,
+				imped_table_ptr[index][i].reg,
+				imped_table_ptr[index][i].mask,
+				imped_table_ptr[index][i].val);
 }
 EXPORT_SYMBOL(wcd_clsh_imped_config);
 
@@ -579,6 +669,11 @@
 static void wcd_clsh_set_flyback_vneg_ctl(struct snd_soc_codec *codec,
 					  bool enable)
 {
+	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+
+	if (!TASHA_IS_2_0(wcd9xxx))
+		return;
+
 	if (enable) {
 		snd_soc_update_bits(codec, WCD9XXX_FLYBACK_VNEG_CTRL_1, 0xE0,
 				    0x00);
@@ -758,35 +853,35 @@
 	dev_dbg(codec->dev, "%s: mode: %s, %s\n", __func__, mode_to_str(mode),
 		is_enable ? "enable" : "disable");
 
-	if (is_enable && (req_state == WCD_CLSH_STATE_LO)) {
-		wcd_clsh_set_buck_regulator_mode(codec, CLS_AB);
-	} else {
-		if (req_state == WCD_CLSH_STATE_EAR)
-			goto end;
-
-		/* LO powerdown.
-		 * If EAR Class-H is already enabled, just
-		 * turn on regulator other enable Class-H
-		 * configuration
+	if (is_enable) {
+		/* LO powerup is taken care in PA sequence.
+		 * No need to change to class AB here.
 		 */
-		if (wcd_clsh_enable_status(codec)) {
-			wcd_clsh_set_buck_regulator_mode(codec,
-					CLS_H_NORMAL);
-			goto end;
+		if (req_state == WCD_CLSH_STATE_EAR) {
+			/* EAR powerup.*/
+			if (!wcd_clsh_enable_status(codec)) {
+				wcd_enable_clsh_block(codec, clsh_d, true);
+				wcd_clsh_set_buck_mode(codec, mode);
+				wcd_clsh_set_flyback_mode(codec, mode);
+			}
+			snd_soc_update_bits(codec,
+					WCD9XXX_A_CDC_RX0_RX_PATH_CFG0,
+					0x40, 0x40);
 		}
-		wcd_enable_clsh_block(codec, clsh_d, true);
-		snd_soc_update_bits(codec,
-				WCD9XXX_A_CDC_RX0_RX_PATH_CFG0,
-				0x40, 0x40);
-		wcd_clsh_set_buck_regulator_mode(codec,
-				CLS_H_NORMAL);
-		wcd_clsh_set_buck_mode(codec, mode);
-		wcd_clsh_set_flyback_mode(codec, mode);
-		wcd_clsh_flyback_ctrl(codec, clsh_d, mode, true);
-		wcd_clsh_buck_ctrl(codec, clsh_d, mode, true);
+	} else {
+		if (req_state == WCD_CLSH_STATE_EAR) {
+			/* EAR powerdown.*/
+			wcd_enable_clsh_block(codec, clsh_d, false);
+			wcd_clsh_set_buck_mode(codec, CLS_H_NORMAL);
+			wcd_clsh_set_flyback_mode(codec, CLS_H_NORMAL);
+			snd_soc_update_bits(codec,
+					WCD9XXX_A_CDC_RX0_RX_PATH_CFG0,
+					0x40, 0x00);
+		}
+		/* LO powerdown is taken care in PA sequence.
+		 * No need to change to class H here.
+		 */
 	}
-end:
-	return;
 }
 
 static void wcd_clsh_state_hph_lo(struct snd_soc_codec *codec,
@@ -1135,6 +1230,7 @@
 	case WCD_CLSH_STATE_HPHL_LO:
 	case WCD_CLSH_STATE_HPHR_LO:
 	case WCD_CLSH_STATE_HPH_ST_LO:
+	case WCD_CLSH_STATE_EAR_LO:
 		return true;
 	default:
 		return false;
diff --git a/sound/soc/codecs/wcd9xxx-common-v2.h b/sound/soc/codecs/wcd9xxx-common-v2.h
index ee7e587..53c9a84 100644
--- a/sound/soc/codecs/wcd9xxx-common-v2.h
+++ b/sound/soc/codecs/wcd9xxx-common-v2.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -34,7 +34,12 @@
 #define	WCD_CLSH_STATE_HPHL (0x01 << 1)
 #define	WCD_CLSH_STATE_HPHR (0x01 << 2)
 #define	WCD_CLSH_STATE_LO (0x01 << 3)
-#define WCD_CLSH_STATE_MAX 4
+
+/*
+ * Though number of CLSH states are 4, max state shoulbe be 5
+ * because state array index starts from 1.
+ */
+#define WCD_CLSH_STATE_MAX 5
 #define NUM_CLSH_STATES_V2 (0x01 << WCD_CLSH_STATE_MAX)
 
 
diff --git a/sound/soc/codecs/wcd9xxx-resmgr-v2.c b/sound/soc/codecs/wcd9xxx-resmgr-v2.c
index fde13d2..8780888 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr-v2.c
+++ b/sound/soc/codecs/wcd9xxx-resmgr-v2.c
@@ -247,9 +247,15 @@
 			 * to CLK_SYS_MCLK_PRG
 			 */
 			wcd_resmgr_codec_reg_update_bits(resmgr,
+					WCD934X_CLK_SYS_MCLK_PRG, 0x80, 0x80);
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+					WCD934X_CLK_SYS_MCLK_PRG, 0x30, 0x10);
+			wcd_resmgr_codec_reg_update_bits(resmgr,
 					WCD934X_CLK_SYS_MCLK_PRG, 0x02, 0x00);
 			wcd_resmgr_codec_reg_update_bits(resmgr,
-					WCD934X_CLK_SYS_MCLK_PRG, 0x91, 0x91);
+					WCD934X_CLK_SYS_MCLK_PRG, 0x01, 0x01);
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+					WCD934X_CLK_SYS_MCLK_PRG, 0x02, 0x00);
 			wcd_resmgr_codec_reg_update_bits(resmgr,
 					WCD93XX_CDC_CLK_RST_CTRL_FS_CNT_CONTROL,
 					0x01, 0x01);
@@ -257,9 +263,6 @@
 					WCD93XX_CDC_CLK_RST_CTRL_MCLK_CONTROL,
 					0x01, 0x01);
 			wcd_resmgr_codec_reg_update_bits(resmgr,
-					WCD934X_CODEC_RPM_CLK_MCLK_CFG,
-					0x04, 0x04);
-			wcd_resmgr_codec_reg_update_bits(resmgr,
 					WCD93XX_CDC_CLK_RST_CTRL_MCLK_CONTROL,
 					0x01, 0x01);
 			wcd_resmgr_codec_reg_update_bits(resmgr,
@@ -305,6 +308,9 @@
 					0x08, 0x08);
 			wcd_resmgr_codec_reg_update_bits(resmgr,
 					WCD934X_CLK_SYS_MCLK_PRG, 0x02, 0x02);
+			/* Disable clock buffer */
+			wcd_resmgr_codec_reg_update_bits(resmgr,
+					WCD934X_CLK_SYS_MCLK_PRG, 0x80, 0x00);
 			resmgr->clk_type = WCD_CLK_RCO;
 		} else {
 			wcd_resmgr_codec_reg_update_bits(resmgr,
diff --git a/sound/soc/codecs/wcd_cpe_core.c b/sound/soc/codecs/wcd_cpe_core.c
index c98fdc9..cf014d7 100644
--- a/sound/soc/codecs/wcd_cpe_core.c
+++ b/sound/soc/codecs/wcd_cpe_core.c
@@ -887,14 +887,7 @@
 			 * instead SSR handler will control CPE.
 			 */
 			wcd_cpe_enable_cpe_clks(core, false);
-			/*
-			 * During BUS_DOWN event, possibly the
-			 * irq driver is under cleanup, do not request
-			 * cleanup of irqs here, rather cleanup irqs
-			 * once BUS_UP event is received.
-			 */
-			if (core->ssr_type != WCD_CPE_BUS_DOWN_EVENT)
-				wcd_cpe_cleanup_irqs(core);
+			wcd_cpe_cleanup_irqs(core);
 			goto done;
 		}
 
@@ -1145,7 +1138,6 @@
 		break;
 
 	case WCD_CPE_BUS_UP_EVENT:
-		wcd_cpe_cleanup_irqs(core);
 		wcd_cpe_set_and_complete(core, WCD_CPE_BUS_READY);
 		/*
 		 * In case of bus up event ssr_type will be changed
@@ -3024,7 +3016,7 @@
 
 static int wcd_cpe_set_one_param(void *core_handle,
 	struct cpe_lsm_session *session, struct lsm_params_info *p_info,
-	void *data, enum LSM_PARAM_TYPE param_type)
+	void *data, uint32_t param_type)
 {
 	struct wcd_cpe_core *core = core_handle;
 	int rc = 0;
@@ -3039,25 +3031,9 @@
 		rc = wcd_cpe_send_param_epd_thres(core, session,
 						data, &ids);
 		break;
-	case LSM_OPERATION_MODE: {
-		struct cpe_lsm_ids connectport_ids;
-
-		rc = wcd_cpe_send_param_opmode(core, session,
-					data, &ids);
-		if (rc)
-			break;
-
-		connectport_ids.module_id = LSM_MODULE_ID_FRAMEWORK;
-		connectport_ids.param_id = LSM_PARAM_ID_CONNECT_TO_PORT;
-
-		rc = wcd_cpe_send_param_connectport(core, session, NULL,
-				       &connectport_ids, CPE_AFE_PORT_1_TX);
-		if (rc)
-			dev_err(core->dev,
-				"%s: send_param_connectport failed, err %d\n",
-				__func__, rc);
+	case LSM_OPERATION_MODE:
+		rc = wcd_cpe_send_param_opmode(core, session, data, &ids);
 		break;
-	}
 	case LSM_GAIN:
 		rc = wcd_cpe_send_param_gain(core, session, data, &ids);
 		break;
@@ -3076,13 +3052,13 @@
 		break;
 	default:
 		pr_err("%s: wrong param_type 0x%x\n",
-			__func__, p_info->param_type);
+			__func__, param_type);
 	}
 
 	if (rc)
 		dev_err(core->dev,
 			"%s: send_param(%d) failed, err %d\n",
-			 __func__, p_info->param_type, rc);
+			 __func__, param_type, rc);
 	return rc;
 }
 
diff --git a/sound/soc/codecs/wsa881x-temp-sensor.c b/sound/soc/codecs/wsa881x-temp-sensor.c
index 0079d0f..5ab0ecf 100644
--- a/sound/soc/codecs/wsa881x-temp-sensor.c
+++ b/sound/soc/codecs/wsa881x-temp-sensor.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015, 2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -23,7 +23,7 @@
 #define LOW_TEMP_THRESHOLD 5
 #define HIGH_TEMP_THRESHOLD 45
 #define TEMP_INVALID	0xFFFF
-
+#define WSA881X_TEMP_RETRY 3
 /*
  * wsa881x_get_temp - get wsa temperature
  * @thermal: thermal zone device
@@ -44,6 +44,7 @@
 	int temp_val;
 	int t1 = T1_TEMP;
 	int t2 = T2_TEMP;
+	u8 retry = WSA881X_TEMP_RETRY;
 
 	if (!thermal)
 		return -EINVAL;
@@ -60,6 +61,7 @@
 		pr_err("%s: pdata is NULL\n", __func__);
 		return -EINVAL;
 	}
+temp_retry:
 	if (pdata->wsa_temp_reg_read) {
 		ret = pdata->wsa_temp_reg_read(codec, &reg);
 		if (ret) {
@@ -101,6 +103,10 @@
 		printk_ratelimited("%s: T0: %d is out of range[%d, %d]\n",
 				   __func__, temp_val, LOW_TEMP_THRESHOLD,
 				   HIGH_TEMP_THRESHOLD);
+		if (retry--) {
+			msleep(20);
+			goto temp_retry;
+		}
 	}
 	if (temp)
 		*temp = temp_val;
diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
index e689570..062bae2 100644
--- a/sound/soc/codecs/wsa881x.c
+++ b/sound/soc/codecs/wsa881x.c
@@ -757,15 +757,13 @@
 	switch (event) {
 	case SND_SOC_DAPM_PRE_PMU:
 		wsa881x_resource_acquire(codec, ENABLE);
-		if (wsa881x->boost_enable)
-			wsa881x_boost_ctrl(codec, ENABLE);
+		wsa881x_boost_ctrl(codec, ENABLE);
 		break;
 	case SND_SOC_DAPM_POST_PMD:
 		swr_slvdev_datapath_control(wsa881x->swr_slave,
 					    wsa881x->swr_slave->dev_num,
 					    false);
-		if (wsa881x->boost_enable)
-			wsa881x_boost_ctrl(codec, DISABLE);
+		wsa881x_boost_ctrl(codec, DISABLE);
 		wsa881x_resource_acquire(codec, DISABLE);
 		break;
 	}
@@ -988,6 +986,7 @@
 {
 	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
 	struct swr_device *dev;
+	u8 retry = WSA881X_NUM_RETRY;
 	u8 devnum = 0;
 
 	if (!wsa881x) {
@@ -996,7 +995,12 @@
 	}
 	dev = wsa881x->swr_slave;
 	if (dev && (wsa881x->state == WSA881X_DEV_DOWN)) {
-		if (swr_get_logical_dev_num(dev, dev->addr, &devnum)) {
+		while (swr_get_logical_dev_num(dev, dev->addr, &devnum) &&
+		       retry--) {
+			/* Retry after 1 msec delay */
+			usleep_range(1000, 1100);
+		}
+		if (retry == 0) {
 			dev_err(codec->dev,
 				"%s get devnum %d for dev addr %lx failed\n",
 				__func__, devnum, dev->addr);
@@ -1108,8 +1112,9 @@
 	usleep_range(5000, 5010);
 	ret = swr_get_logical_dev_num(swr_dev, swr_dev->addr, &devnum);
 	if (ret) {
-		dev_dbg(&swr_dev->dev, "%s failed to get devnum, err:%d\n",
-			__func__, ret);
+		dev_dbg(&swr_dev->dev,
+			"%s get devnum %d for dev addr %lx failed\n",
+			__func__, devnum, swr_dev->addr);
 		goto err;
 	}
 	swr_dev->dev_num = devnum;
diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig
index 4a7af76..18585749 100644
--- a/sound/soc/msm/Kconfig
+++ b/sound/soc/msm/Kconfig
@@ -30,16 +30,6 @@
 	 is inducing kernel panic upon encountering critical
 	 errors from DSP audio modules
 
-config DOLBY_DAP
-	bool "Enable Dolby DAP"
-	depends on SND_SOC_MSM_QDSP6V2_INTF
-	help
-	 To add support for dolby DAP post processing.
-	 This support is to configure the post processing parameters
-	 to DSP. The configuration includes sending the end point
-	 device, end point dependent post processing parameters and
-	 the various posrt processing parameters
-
 config DOLBY_DS2
 	bool "Enable Dolby DS2"
 	depends on SND_SOC_MSM_QDSP6V2_INTF
@@ -50,6 +40,15 @@
 	 device, end point dependent post processing parameters and
 	 the various posrt processing parameters
 
+config DOLBY_LICENSE
+	bool "Enable Dolby LICENSE"
+	depends on SND_SOC_MSM_QDSP6V2_INTF
+	help
+	 To add support for dolby DAP post processing,
+	 and retain DAP set license functionality only.
+	 This is required by Dolby GEF implementation which needs
+	 nothing but dolby license validation functionality in driver.
+
 config DTS_EAGLE
 	bool "Enable DTS Eagle Support"
 	depends on SND_SOC_MSM_QDSP6V2_INTF
@@ -99,7 +98,7 @@
 	 listen on codec.
 
 config SND_SOC_INT_CODEC
-	tristate "SoC Machine driver for MSMFALCON_INT"
+	tristate "SoC Machine driver for SDM660_INT"
 	depends on ARCH_QCOM
 	select SND_SOC_QDSP6V2
 	select SND_SOC_MSM_STUB
@@ -110,17 +109,19 @@
 	select MSM_QDSP6_PDR
 	select MSM_QDSP6_NOTIFIER
 	select MSM_QDSP6V2_CODECS
-	select SND_SOC_MSM_SWR
-	select SND_SOC_MSM8X16_WCD
+	select MSM_CDC_PINCTRL
+	select SND_SOC_MSM_SDW
+	select SND_SOC_SDM660_CDC
+	select SND_SOC_MSM_HDMI_CODEC_RX
 	select QTI_PP
 	select DTS_SRS_TM
-	select DOLBY_DAP
-	select DOLBY_DS2
+	select DOLBY_LICENSE
 	select SND_HWDEP
 	select MSM_ULTRASOUND
 	select DTS_EAGLE
-	select SND_SOC_MSMFALCON_COMMON
+	select SND_SOC_SDM660_COMMON
 	select SND_SOC_COMPRESS
+	select PINCTRL_LPI
 	help
 	To add support for SoC audio on MSM_INT.
 	This will enable sound soc drivers which
@@ -129,7 +130,7 @@
 	DAI-links
 
 config SND_SOC_EXT_CODEC
-	tristate "SoC Machine driver for MSMFALCON_EXT"
+	tristate "SoC Machine driver for SDM660_EXT"
 	depends on ARCH_QCOM
 	select SND_SOC_QDSP6V2
 	select SND_SOC_MSM_STUB
@@ -143,18 +144,19 @@
 	select SND_SOC_WCD9335
 	select SND_SOC_WCD934X
 	select SND_SOC_WSA881X
+	select SND_SOC_MSM_HDMI_CODEC_RX
 	select MFD_CORE
 	select QTI_PP
 	select DTS_SRS_TM
-	select DOLBY_DAP
-	select DOLBY_DS2
+	select DOLBY_LICENSE
 	select SND_SOC_CPE
 	select SND_SOC_WCD_CPE
 	select SND_HWDEP
 	select MSM_ULTRASOUND
 	select DTS_EAGLE
-	select SND_SOC_MSMFALCON_COMMON
+	select SND_SOC_SDM660_COMMON
 	select SND_SOC_COMPRESS
+	select PINCTRL_LPI
 	help
 	To add support for SoC audio on MSM_EXT.
 	This will enable sound soc drivers which
@@ -211,7 +213,7 @@
 	select QTI_PP
 	select SND_SOC_CPE
 	select MSM_ULTRASOUND
-	select DOLBY_DS2
+	select DOLBY_LICENSE
 	select SND_HWDEP
         select DTS_EAGLE
 	help
@@ -221,13 +223,13 @@
 	 the machine driver and the corresponding
 	 DAI-links
 
-config SND_SOC_FALCON
-	tristate "SoC Machine driver for MSMFALCON boards"
-	depends on ARCH_MSMFALCON
+config SND_SOC_660
+	tristate "SoC Machine driver for SDM660 boards"
+	depends on ARCH_SDM660
 	select SND_SOC_INT_CODEC
 	select SND_SOC_EXT_CODEC
 	help
-	 To add support for SoC audio on MSMFALCON.
+	 To add support for SoC audio on SDM660.
 	 This will enable sound soc drivers which
 	 interfaces with DSP, also it will enable
 	 the machine driver and the corresponding
diff --git a/sound/soc/msm/Makefile b/sound/soc/msm/Makefile
index e0544fc..5105cd9 100644
--- a/sound/soc/msm/Makefile
+++ b/sound/soc/msm/Makefile
@@ -20,18 +20,18 @@
 snd-soc-msm8998-objs := msm8998.o
 obj-$(CONFIG_SND_SOC_MSM8998) += snd-soc-msm8998.o
 
-# for MSMFALCON sound card driver
-snd-soc-msmfalcon-common-objs := msm-audio-pinctrl.o msmfalcon-common.o
-obj-$(CONFIG_SND_SOC_MSMFALCON_COMMON) += snd-soc-msmfalcon-common.o
+# for SDM660 sound card driver
+snd-soc-sdm660-common-objs := sdm660-common.o
+obj-$(CONFIG_SND_SOC_SDM660_COMMON) += snd-soc-sdm660-common.o
 
-# for MSMFALCON sound card driver
-snd-soc-int-codec-objs := msmfalcon-internal.o
-obj-$(CONFIG_SND_SOC_INT_CODEC) += snd-soc-msmfalcon-common.o
+# for SDM660 sound card driver
+snd-soc-int-codec-objs := sdm660-internal.o
+obj-$(CONFIG_SND_SOC_INT_CODEC) += snd-soc-sdm660-common.o
 obj-$(CONFIG_SND_SOC_INT_CODEC) += snd-soc-int-codec.o
 
-# for MSMFALCON sound card driver
-snd-soc-ext-codec-objs := msmfalcon-external.o msmfalcon-ext-dai-links.o
-obj-$(CONFIG_SND_SOC_EXT_CODEC) += snd-soc-msmfalcon-common.o
+# for SDM660 sound card driver
+snd-soc-ext-codec-objs := sdm660-external.o sdm660-ext-dai-links.o
+obj-$(CONFIG_SND_SOC_EXT_CODEC) += snd-soc-sdm660-common.o
 obj-$(CONFIG_SND_SOC_EXT_CODEC) += snd-soc-ext-codec.o
 
 # for SDM845 sound card driver
diff --git a/sound/soc/msm/msm-cpe-lsm.c b/sound/soc/msm/msm-cpe-lsm.c
index 44927c8..7b65dda 100644
--- a/sound/soc/msm/msm-cpe-lsm.c
+++ b/sound/soc/msm/msm-cpe-lsm.c
@@ -1046,7 +1046,6 @@
 	struct cpe_lsm_lab *lab_d = &lsm_d->lab;
 	struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
 	struct msm_slim_dma_data *dma_data = NULL;
-	struct snd_lsm_event_status *user;
 	struct snd_lsm_detection_params det_params;
 	int rc = 0;
 
@@ -1176,13 +1175,6 @@
 		dev_dbg(rtd->dev,
 			"%s: %s\n",
 			__func__, "SNDRV_LSM_REG_SND_MODEL_V2");
-		if (!arg) {
-			dev_err(rtd->dev,
-				"%s: Invalid argument to ioctl %s\n",
-				__func__,
-				"SNDRV_LSM_REG_SND_MODEL_V2");
-			return -EINVAL;
-		}
 
 		memcpy(&snd_model, arg,
 			sizeof(struct snd_lsm_sound_model_v2));
@@ -1320,19 +1312,21 @@
 		break;
 
 	case SNDRV_LSM_EVENT_STATUS:
+	case SNDRV_LSM_EVENT_STATUS_V3: {
+		struct snd_lsm_event_status *user;
+		struct snd_lsm_event_status_v3 *user_v3;
+
 		dev_dbg(rtd->dev,
 			"%s: %s\n",
-			__func__, "SNDRV_LSM_EVENT_STATUS");
+			__func__, "SNDRV_LSM_EVENT_STATUS(_V3)");
 		if (!arg) {
 			dev_err(rtd->dev,
 				"%s: Invalid argument to ioctl %s\n",
 				__func__,
-				"SNDRV_LSM_EVENT_STATUS");
+				"SNDRV_LSM_EVENT_STATUS(_V3)");
 			return -EINVAL;
 		}
 
-		user = arg;
-
 		/*
 		 * Release the api lock before wait to allow
 		 * other IOCTLs to be invoked while waiting
@@ -1352,31 +1346,62 @@
 			if (atomic_read(&lsm_d->event_avail) == 1) {
 				rc = 0;
 				atomic_set(&lsm_d->event_avail, 0);
-				if (lsm_d->ev_det_pld_size >
-					user->payload_size) {
-					dev_err(rtd->dev,
-						"%s: avail pld_bytes = %u, needed = %u\n",
-						__func__,
-						user->payload_size,
-						lsm_d->ev_det_pld_size);
-					return -EINVAL;
+
+				if (cmd == SNDRV_LSM_EVENT_STATUS) {
+					user = arg;
+					if (lsm_d->ev_det_pld_size >
+						user->payload_size) {
+						dev_err(rtd->dev,
+							"%s: avail pld_bytes = %u, needed = %u\n",
+							__func__,
+							user->payload_size,
+							lsm_d->ev_det_pld_size);
+						return -EINVAL;
+					}
+
+					user->status = lsm_d->ev_det_status;
+					user->payload_size =
+							lsm_d->ev_det_pld_size;
+					memcpy(user->payload,
+					       lsm_d->ev_det_payload,
+					       lsm_d->ev_det_pld_size);
+				} else {
+					user_v3 = arg;
+					if (lsm_d->ev_det_pld_size >
+						user_v3->payload_size) {
+						dev_err(rtd->dev,
+							"%s: avail pld_bytes = %u, needed = %u\n",
+							__func__,
+							user_v3->payload_size,
+							lsm_d->ev_det_pld_size);
+						return -EINVAL;
+					}
+					/* event status timestamp not supported
+					 * on CPE mode. Set msw and lsw to 0.
+					 */
+					user_v3->timestamp_lsw = 0;
+					user_v3->timestamp_msw = 0;
+					user_v3->status = lsm_d->ev_det_status;
+					user_v3->payload_size =
+							lsm_d->ev_det_pld_size;
+					memcpy(user_v3->payload,
+					       lsm_d->ev_det_payload,
+					       lsm_d->ev_det_pld_size);
 				}
-
-				user->status = lsm_d->ev_det_status;
-				user->payload_size = lsm_d->ev_det_pld_size;
-
-				memcpy(user->payload,
-				       lsm_d->ev_det_payload,
-				       lsm_d->ev_det_pld_size);
-
 			} else if (atomic_read(&lsm_d->event_stop) == 1) {
 				dev_dbg(rtd->dev,
 					"%s: wait_aborted\n", __func__);
-				user->payload_size = 0;
+				if (cmd == SNDRV_LSM_EVENT_STATUS) {
+					user = arg;
+					user->payload_size = 0;
+				} else {
+					user_v3 = arg;
+					user_v3->payload_size = 0;
+				}
 				rc = 0;
 			}
 		}
-
+	}
 		break;
 
 	case SNDRV_LSM_ABORT_EVENT:
@@ -1432,12 +1457,6 @@
 		break;
 
 	case SNDRV_LSM_SET_PARAMS:
-		if (!arg) {
-			dev_err(rtd->dev,
-				"%s: %s Invalid argument\n",
-				__func__, "SNDRV_LSM_SET_PARAMS");
-			return -EINVAL;
-		}
 		memcpy(&det_params, arg,
 			sizeof(det_params));
 		if (det_params.num_confidence_levels <= 0) {
@@ -1514,6 +1533,20 @@
 		}
 		break;
 
+	case SNDRV_LSM_SET_PORT: {
+		u32 port_id = cpe->input_port_id;
+
+		dev_dbg(rtd->dev, "%s: %s\n", __func__, "SNDRV_LSM_SET_PORT");
+		rc = lsm_ops->lsm_set_port(cpe->core_handle, session, &port_id);
+		if (rc) {
+			dev_err(rtd->dev,
+				"%s: lsm_set_port failed, err = %d\n",
+				__func__, rc);
+			return rc;
+		}
+	}
+	break;
+
 	default:
 		dev_dbg(rtd->dev,
 			"%s: Default snd_lib_ioctl cmd 0x%x\n",
@@ -1525,7 +1558,7 @@
 }
 
 static int msm_cpe_lsm_lab_start(struct snd_pcm_substream *substream,
-		struct snd_lsm_event_status *event_status)
+		u16 event_det_status)
 {
 	struct snd_soc_pcm_runtime *rtd;
 	struct cpe_lsm_data *lsm_d = NULL;
@@ -1578,7 +1611,7 @@
 	reinit_completion(&lab_d->thread_complete);
 
 	if (session->lab_enable &&
-	    event_status->status ==
+	    event_det_status ==
 	    LSM_VOICE_WAKEUP_STATUS_DETECTED) {
 		out_port = &session->afe_out_port_cfg;
 		out_port->port_id = session->afe_out_port_id;
@@ -1873,6 +1906,13 @@
 
 	lsm_ops->lsm_get_snd_model_offset(cpe->core_handle,
 			session, &offset);
+	/* Check if 'p_info->param_size + offset' crosses U32_MAX. */
+	if (p_info->param_size > U32_MAX - offset) {
+		dev_err(rtd->dev,
+			"%s: Invalid param_size %d\n",
+			__func__, p_info->param_size);
+		return -EINVAL;
+	}
 	session->snd_model_size = p_info->param_size + offset;
 
 	session->snd_model_data = vzalloc(session->snd_model_size);
@@ -2108,7 +2148,8 @@
 			dev_err(rtd->dev,
 				"%s: %s: not supported if using topology\n",
 				__func__, "LSM_REG_SND_MODEL_V2");
-			return -EINVAL;
+			err = -EINVAL;
+			goto done;
 		}
 
 		if (copy_from_user(&snd_model, (void *)arg,
@@ -2173,7 +2214,60 @@
 			goto done;
 		}
 
-		msm_cpe_lsm_lab_start(substream, event_status);
+		msm_cpe_lsm_lab_start(substream, event_status->status);
+		msm_cpe_process_event_status_done(lsm_d);
+		kfree(event_status);
+	}
+		break;
+	case SNDRV_LSM_EVENT_STATUS_V3: {
+		struct snd_lsm_event_status_v3 u_event_status;
+		struct snd_lsm_event_status_v3 *event_status = NULL;
+		int u_pld_size = 0;
+
+		if (copy_from_user(&u_event_status, (void *)arg,
+				   sizeof(struct snd_lsm_event_status_v3))) {
+			dev_err(rtd->dev,
+				"%s: event status copy from user failed, size %zd\n",
+				__func__,
+				sizeof(struct snd_lsm_event_status_v3));
+			err = -EFAULT;
+			goto done;
+		}
+
+		if (u_event_status.payload_size >
+		    LISTEN_MAX_STATUS_PAYLOAD_SIZE) {
+			dev_err(rtd->dev,
+				"%s: payload_size %d is invalid, max allowed = %d\n",
+				__func__, u_event_status.payload_size,
+				LISTEN_MAX_STATUS_PAYLOAD_SIZE);
+			err = -EINVAL;
+			goto done;
+		}
+
+		u_pld_size = sizeof(struct snd_lsm_event_status_v3) +
+				u_event_status.payload_size;
+
+		event_status = kzalloc(u_pld_size, GFP_KERNEL);
+		if (!event_status) {
+			err = -ENOMEM;
+			goto done;
+		} else {
+			event_status->payload_size =
+				u_event_status.payload_size;
+			err = msm_cpe_lsm_ioctl_shared(substream,
+						       cmd, event_status);
+		}
+
+		if (!err  && copy_to_user(arg, event_status, u_pld_size)) {
+			dev_err(rtd->dev,
+				"%s: copy to user failed\n",
+				__func__);
+			kfree(event_status);
+			err = -EFAULT;
+			goto done;
+		}
+
+		msm_cpe_lsm_lab_start(substream, event_status->status);
 		msm_cpe_process_event_status_done(lsm_d);
 		kfree(event_status);
 	}
@@ -2185,7 +2279,8 @@
 			dev_err(rtd->dev,
 				"%s: %s: not supported if using topology\n",
 				__func__, "SNDRV_LSM_SET_PARAMS");
-			return -EINVAL;
+			err = -EINVAL;
+			goto done;
 		}
 
 		if (copy_from_user(&det_params, (void *) arg,
@@ -2212,14 +2307,16 @@
 			dev_err(rtd->dev,
 				"%s: %s: not supported if not using topology\n",
 				__func__, "SET_MODULE_PARAMS");
-			return -EINVAL;
+			err = -EINVAL;
+			goto done;
 		}
 
 		if (!arg) {
 			dev_err(rtd->dev,
 				"%s: %s: No Param data to set\n",
 				__func__, "SET_MODULE_PARAMS");
-			return -EINVAL;
+			err = -EINVAL;
+			goto done;
 		}
 
 		if (copy_from_user(&p_data, arg,
@@ -2227,7 +2324,8 @@
 			dev_err(rtd->dev,
 				"%s: %s: copy_from_user failed, size = %zd\n",
 				__func__, "p_data", sizeof(p_data));
-			return -EFAULT;
+			err = -EFAULT;
+			goto done;
 		}
 
 		if (p_data.num_params > LSM_PARAMS_MAX) {
@@ -2235,7 +2333,8 @@
 				"%s: %s: Invalid num_params %d\n",
 				__func__, "SET_MODULE_PARAMS",
 				p_data.num_params);
-			return -EINVAL;
+			err = -EINVAL;
+			goto done;
 		}
 
 		p_size = p_data.num_params *
@@ -2246,12 +2345,15 @@
 				"%s: %s: Invalid size %zd\n",
 				__func__, "SET_MODULE_PARAMS", p_size);
 
-			return -EFAULT;
+			err = -EFAULT;
+			goto done;
 		}
 
 		params = kzalloc(p_size, GFP_KERNEL);
-		if (!params)
-			return -ENOMEM;
+		if (!params) {
+			err = -ENOMEM;
+			goto done;
+		}
 
 		if (copy_from_user(params, p_data.params,
 				   p_data.data_size)) {
@@ -2259,7 +2361,8 @@
 				"%s: %s: copy_from_user failed, size = %d\n",
 				__func__, "params", p_data.data_size);
 			kfree(params);
-			return -EFAULT;
+			err = -EFAULT;
+			goto done;
 		}
 
 		err = msm_cpe_lsm_process_params(substream, &p_data, params);
@@ -2282,12 +2385,6 @@
 }
 
 #ifdef CONFIG_COMPAT
-struct snd_lsm_event_status32 {
-	u16 status;
-	u16 payload_size;
-	u8 payload[0];
-};
-
 struct snd_lsm_sound_model_v2_32 {
 	compat_uptr_t data;
 	compat_uptr_t confidence_level;
@@ -2309,7 +2406,7 @@
 	u32 param_id;
 	u32 param_size;
 	compat_uptr_t param_data;
-	enum LSM_PARAM_TYPE param_type;
+	uint32_t param_type;
 };
 
 struct snd_lsm_module_params_32 {
@@ -2319,8 +2416,6 @@
 };
 
 enum {
-	SNDRV_LSM_EVENT_STATUS32 =
-		_IOW('U', 0x02, struct snd_lsm_event_status32),
 	SNDRV_LSM_REG_SND_MODEL_V2_32 =
 		_IOW('U', 0x07, struct snd_lsm_sound_model_v2_32),
 	SNDRV_LSM_SET_PARAMS32 =
@@ -2378,7 +2473,8 @@
 			dev_err(rtd->dev,
 				"%s: %s: not supported if using topology\n",
 				__func__, "LSM_REG_SND_MODEL_V2_32");
-			return -EINVAL;
+			err = -EINVAL;
+			goto done;
 		}
 
 		dev_dbg(rtd->dev,
@@ -2414,7 +2510,7 @@
 				err);
 	}
 		break;
-	case SNDRV_LSM_EVENT_STATUS32: {
+	case SNDRV_LSM_EVENT_STATUS: {
 		struct snd_lsm_event_status *event_status = NULL;
 		struct snd_lsm_event_status u_event_status32;
 		struct snd_lsm_event_status *udata_32 = NULL;
@@ -2456,7 +2552,6 @@
 		} else {
 			event_status->payload_size =
 				u_event_status32.payload_size;
-			cmd = SNDRV_LSM_EVENT_STATUS;
 			err = msm_cpe_lsm_ioctl_shared(substream,
 						       cmd, event_status);
 			if (err)
@@ -2495,7 +2590,97 @@
 			goto done;
 		}
 
-		msm_cpe_lsm_lab_start(substream, event_status);
+		msm_cpe_lsm_lab_start(substream, event_status->status);
+		msm_cpe_process_event_status_done(lsm_d);
+		kfree(event_status);
+		kfree(udata_32);
+	}
+		break;
+	case SNDRV_LSM_EVENT_STATUS_V3: {
+		struct snd_lsm_event_status_v3 *event_status = NULL;
+		struct snd_lsm_event_status_v3 u_event_status32;
+		struct snd_lsm_event_status_v3 *udata_32 = NULL;
+		int u_pld_size = 0;
+
+		dev_dbg(rtd->dev,
+			"%s: ioctl %s\n", __func__,
+			"SNDRV_LSM_EVENT_STATUS_V3_32");
+
+		if (copy_from_user(&u_event_status32, (void *)arg,
+				   sizeof(struct snd_lsm_event_status_v3))) {
+			dev_err(rtd->dev,
+				"%s: event status copy from user failed, size %zd\n",
+				__func__,
+				sizeof(struct snd_lsm_event_status_v3));
+			err = -EFAULT;
+			goto done;
+		}
+
+		if (u_event_status32.payload_size >
+		   LISTEN_MAX_STATUS_PAYLOAD_SIZE) {
+			dev_err(rtd->dev,
+				"%s: payload_size %d is invalid, max allowed = %d\n",
+				__func__, u_event_status32.payload_size,
+				LISTEN_MAX_STATUS_PAYLOAD_SIZE);
+			err = -EINVAL;
+			goto done;
+		}
+
+		u_pld_size = sizeof(struct snd_lsm_event_status_v3) +
+				u_event_status32.payload_size;
+		event_status = kzalloc(u_pld_size, GFP_KERNEL);
+		if (!event_status) {
+			dev_err(rtd->dev,
+				"%s: No memory for event status\n",
+				__func__);
+			err = -ENOMEM;
+			goto done;
+		} else {
+			event_status->payload_size =
+				u_event_status32.payload_size;
+			err = msm_cpe_lsm_ioctl_shared(substream,
+						       cmd, event_status);
+			if (err)
+				dev_err(rtd->dev,
+					"%s: %s failed, error = %d\n",
+					__func__,
+					"SNDRV_LSM_EVENT_STATUS_V3_32",
+					err);
+		}
+
+		if (!err) {
+			udata_32 = kzalloc(u_pld_size, GFP_KERNEL);
+			if (!udata_32) {
+				dev_err(rtd->dev,
+					"%s: nomem for udata\n",
+					__func__);
+				err = -EFAULT;
+			} else {
+				udata_32->timestamp_lsw =
+					event_status->timestamp_lsw;
+				udata_32->timestamp_msw =
+					event_status->timestamp_msw;
+				udata_32->status = event_status->status;
+				udata_32->payload_size =
+					event_status->payload_size;
+				memcpy(udata_32->payload,
+				       event_status->payload,
+				       u_pld_size);
+			}
+		}
+
+		if (!err  && copy_to_user(arg, udata_32,
+					  u_pld_size)) {
+			dev_err(rtd->dev,
+				"%s: copy to user failed\n",
+				__func__);
+			kfree(event_status);
+			kfree(udata_32);
+			err = -EFAULT;
+			goto done;
+		}
+
+		msm_cpe_lsm_lab_start(substream, event_status->status);
 		msm_cpe_process_event_status_done(lsm_d);
 		kfree(event_status);
 		kfree(udata_32);
@@ -2509,7 +2694,9 @@
 			dev_err(rtd->dev,
 				"%s: %s: not supported if using topology\n",
 				__func__, "SNDRV_LSM_SET_PARAMS32");
-			return -EINVAL;
+
+			err = -EINVAL;
+			goto done;
 		}
 
 		if (copy_from_user(&det_params32, arg,
@@ -2553,14 +2740,8 @@
 			dev_err(rtd->dev,
 				"%s: %s: not supported if not using topology\n",
 				__func__, "SET_MODULE_PARAMS_32");
-			return -EINVAL;
-		}
-
-		if (!arg) {
-			dev_err(rtd->dev,
-				"%s: %s: No Param data to set\n",
-				__func__, "SET_MODULE_PARAMS_32");
-			return -EINVAL;
+			err = -EINVAL;
+			goto done;
 		}
 
 		if (copy_from_user(&p_data_32, arg,
@@ -2569,7 +2750,8 @@
 				"%s: %s: copy_from_user failed, size = %zd\n",
 				__func__, "SET_MODULE_PARAMS_32",
 				sizeof(p_data_32));
-			return -EFAULT;
+			err = -EFAULT;
+			goto done;
 		}
 
 		p_data.params = compat_ptr(p_data_32.params);
@@ -2581,7 +2763,8 @@
 				"%s: %s: Invalid num_params %d\n",
 				__func__, "SET_MODULE_PARAMS_32",
 				p_data.num_params);
-			return -EINVAL;
+			err = -EINVAL;
+			goto done;
 		}
 
 		if (p_data.data_size !=
@@ -2590,21 +2773,25 @@
 				"%s: %s: Invalid size %d\n",
 				__func__, "SET_MODULE_PARAMS_32",
 				p_data.data_size);
-			return -EINVAL;
+			err = -EINVAL;
+			goto done;
 		}
 
 		p_size = sizeof(struct lsm_params_info_32) *
 			 p_data.num_params;
 
 		params32 = kzalloc(p_size, GFP_KERNEL);
-		if (!params32)
-			return -ENOMEM;
+		if (!params32) {
+			err = -ENOMEM;
+			goto done;
+		}
 
 		p_size = sizeof(struct lsm_params_info) * p_data.num_params;
 		params = kzalloc(p_size, GFP_KERNEL);
 		if (!params) {
 			kfree(params32);
-			return -ENOMEM;
+			err = -ENOMEM;
+			goto done;
 		}
 
 		if (copy_from_user(params32, p_data.params,
@@ -2614,7 +2801,8 @@
 				__func__, "params32", p_data.data_size);
 			kfree(params32);
 			kfree(params);
-			return -EFAULT;
+			err = -EFAULT;
+			goto done;
 		}
 
 		p_info_32 = (struct lsm_params_info_32 *) params32;
@@ -2640,6 +2828,19 @@
 		kfree(params32);
 		break;
 	}
+	case SNDRV_LSM_REG_SND_MODEL_V2:
+	case SNDRV_LSM_SET_PARAMS:
+	case SNDRV_LSM_SET_MODULE_PARAMS:
+		/*
+		 * In ideal cases, the compat_ioctl should never be called
+		 * with the above unlocked ioctl commands. Print error
+		 * and return error if it does.
+		 */
+		dev_err(rtd->dev,
+			"%s: Invalid cmd for compat_ioctl\n",
+			__func__);
+		err = -EINVAL;
+		break;
 	default:
 		err = msm_cpe_lsm_ioctl_shared(substream, cmd, arg);
 		break;
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index 081f8b4..755b62a 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -506,6 +506,33 @@
 	},
 	{
 		.playback = {
+			.stream_name = "SLIMBUS7_HOSTLESS Playback",
+			.aif_name = "SLIM7_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =     384000,
+		},
+		.capture = {
+			.stream_name = "SLIMBUS7_HOSTLESS Capture",
+			.aif_name = "SLIM7_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_384000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =     384000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "SLIMBUS7_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
 			.stream_name = "SLIMBUS8_HOSTLESS Playback",
 			.aif_name = "SLIM8_DL_HL",
 			.rates = SNDRV_PCM_RATE_8000_384000,
@@ -583,6 +610,49 @@
 	},
 	{
 		.playback = {
+			.stream_name = "USBAUDIO_HOSTLESS Playback",
+			.aif_name = "USBAUDIO_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |
+				SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+				SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
+				SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
+				SNDRV_PCM_RATE_192000 | SNDRV_PCM_RATE_352800 |
+				SNDRV_PCM_RATE_384000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_S24_LE |
+				SNDRV_PCM_FMTBIT_S24_3LE |
+				SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =     384000,
+		},
+		.capture = {
+			.stream_name = "USBAUDIO_HOSTLESS Capture",
+			.aif_name = "USBAUDIO_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |
+				SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+				SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
+				SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
+				SNDRV_PCM_RATE_192000 | SNDRV_PCM_RATE_352800 |
+				SNDRV_PCM_RATE_384000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_S24_LE |
+				SNDRV_PCM_FMTBIT_S24_3LE |
+				SNDRV_PCM_FMTBIT_S32_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =     384000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "USBAUDIO_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
+	{
+		.playback = {
 			.stream_name = "AFE Playback",
 			.aif_name = "PCM_RX",
 			.rates = (SNDRV_PCM_RATE_8000 |
@@ -894,7 +964,7 @@
 			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
 				    SNDRV_PCM_FMTBIT_S24_LE),
 			.channels_min = 1,
-			.channels_max = 2,
+			.channels_max = 4,
 			.rate_min =     8000,
 			.rate_max =    192000,
 		},
@@ -902,6 +972,22 @@
 		.name = "INT4_MI2S_RX_HOSTLESS",
 		.probe = fe_dai_probe,
 	},
+	{
+		.capture = {
+			.stream_name = "INT3 MI2S_TX Hostless Capture",
+			.aif_name = "INT3_MI2S_UL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "INT3_MI2S_TX_HOSTLESS",
+		.probe = fe_dai_probe,
+	},
 	/* TDM Hostless */
 	{
 		.capture = {
@@ -2117,12 +2203,14 @@
 		.capture = {
 			.stream_name = "Listen 1 Audio Service Capture",
 			.aif_name = "LSM1_UL_HL",
-			.rates = SNDRV_PCM_RATE_16000,
-			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rates = (SNDRV_PCM_RATE_16000 |
+				  SNDRV_PCM_RATE_48000),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
 			.channels_min = 1,
-			.channels_max = 1,
+			.channels_max = 4,
 			.rate_min = 16000,
-			.rate_max = 16000,
+			.rate_max = 48000,
 		},
 		.ops = &msm_fe_dai_ops,
 		.name = "LSM1",
@@ -2132,12 +2220,14 @@
 		.capture = {
 			.stream_name = "Listen 2 Audio Service Capture",
 			.aif_name = "LSM2_UL_HL",
-			.rates = SNDRV_PCM_RATE_16000,
-			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rates = (SNDRV_PCM_RATE_16000 |
+				  SNDRV_PCM_RATE_48000),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
 			.channels_min = 1,
-			.channels_max = 1,
+			.channels_max = 4,
 			.rate_min = 16000,
-			.rate_max = 16000,
+			.rate_max = 48000,
 		},
 		.ops = &msm_fe_dai_ops,
 		.name = "LSM2",
@@ -2147,12 +2237,14 @@
 		.capture = {
 			.stream_name = "Listen 3 Audio Service Capture",
 			.aif_name = "LSM3_UL_HL",
-			.rates = SNDRV_PCM_RATE_16000,
-			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rates = (SNDRV_PCM_RATE_16000 |
+				  SNDRV_PCM_RATE_48000),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
 			.channels_min = 1,
-			.channels_max = 1,
+			.channels_max = 4,
 			.rate_min = 16000,
-			.rate_max = 16000,
+			.rate_max = 48000,
 		},
 		.ops = &msm_fe_dai_ops,
 		.name = "LSM3",
@@ -2162,12 +2254,14 @@
 		.capture = {
 			.stream_name = "Listen 4 Audio Service Capture",
 			.aif_name = "LSM4_UL_HL",
-			.rates = SNDRV_PCM_RATE_16000,
-			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rates = (SNDRV_PCM_RATE_16000 |
+				  SNDRV_PCM_RATE_48000),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
 			.channels_min = 1,
-			.channels_max = 1,
+			.channels_max = 4,
 			.rate_min = 16000,
-			.rate_max = 16000,
+			.rate_max = 48000,
 		},
 		.ops = &msm_fe_dai_ops,
 		.name = "LSM4",
@@ -2177,12 +2271,14 @@
 		.capture = {
 			.stream_name = "Listen 5 Audio Service Capture",
 			.aif_name = "LSM5_UL_HL",
-			.rates = SNDRV_PCM_RATE_16000,
-			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rates = (SNDRV_PCM_RATE_16000 |
+				  SNDRV_PCM_RATE_48000),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
 			.channels_min = 1,
-			.channels_max = 1,
+			.channels_max = 4,
 			.rate_min = 16000,
-			.rate_max = 16000,
+			.rate_max = 48000,
 		},
 		.ops = &msm_fe_dai_ops,
 		.name = "LSM5",
@@ -2192,12 +2288,14 @@
 		.capture = {
 			.stream_name = "Listen 6 Audio Service Capture",
 			.aif_name = "LSM6_UL_HL",
-			.rates = SNDRV_PCM_RATE_16000,
-			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rates = (SNDRV_PCM_RATE_16000 |
+				  SNDRV_PCM_RATE_48000),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
 			.channels_min = 1,
-			.channels_max = 1,
+			.channels_max = 4,
 			.rate_min = 16000,
-			.rate_max = 16000,
+			.rate_max = 48000,
 		},
 		.ops = &msm_fe_dai_ops,
 		.name = "LSM6",
@@ -2207,12 +2305,14 @@
 		.capture = {
 			.stream_name = "Listen 7 Audio Service Capture",
 			.aif_name = "LSM7_UL_HL",
-			.rates = SNDRV_PCM_RATE_16000,
-			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rates = (SNDRV_PCM_RATE_16000 |
+				  SNDRV_PCM_RATE_48000),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
 			.channels_min = 1,
-			.channels_max = 1,
+			.channels_max = 4,
 			.rate_min = 16000,
-			.rate_max = 16000,
+			.rate_max = 48000,
 		},
 		.ops = &msm_fe_dai_ops,
 		.name = "LSM7",
@@ -2222,12 +2322,14 @@
 		.capture = {
 			.stream_name = "Listen 8 Audio Service Capture",
 			.aif_name = "LSM8_UL_HL",
-			.rates = SNDRV_PCM_RATE_16000,
-			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rates = (SNDRV_PCM_RATE_16000 |
+				  SNDRV_PCM_RATE_48000),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE),
 			.channels_min = 1,
-			.channels_max = 1,
+			.channels_max = 4,
 			.rate_min = 16000,
-			.rate_max = 16000,
+			.rate_max = 48000,
 		},
 		.ops = &msm_fe_dai_ops,
 		.name = "LSM8",
diff --git a/sound/soc/msm/msm8998.c b/sound/soc/msm/msm8998.c
index 73755ed..51c27b7 100644
--- a/sound/soc/msm/msm8998.c
+++ b/sound/soc/msm/msm8998.c
@@ -159,6 +159,21 @@
 	u32 index;
 };
 
+enum pinctrl_pin_state {
+	STATE_DISABLE = 0, /* All pins are in sleep state */
+	STATE_MI2S_ACTIVE,  /* IS2 = active, TDM = sleep */
+	STATE_TDM_ACTIVE,  /* IS2 = sleep, TDM = active */
+};
+
+struct msm_pinctrl_info {
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *mi2s_disable;
+	struct pinctrl_state *tdm_disable;
+	struct pinctrl_state *mi2s_active;
+	struct pinctrl_state *tdm_active;
+	enum pinctrl_pin_state curr_state;
+};
+
 struct msm_asoc_mach_data {
 	u32 mclk_freq;
 	int us_euro_gpio; /* used by gpio driver API */
@@ -166,6 +181,7 @@
 	struct device_node *hph_en1_gpio_p; /* used by pinctrl API */
 	struct device_node *hph_en0_gpio_p; /* used by pinctrl API */
 	struct snd_info_entry *codec_root;
+	struct msm_pinctrl_info pinctrl_info;
 };
 
 struct msm_asoc_wcd93xx_codec {
@@ -174,6 +190,9 @@
 	void (*mbhc_hs_detect_exit)(struct snd_soc_codec *codec);
 };
 
+static const char *const pin_states[] = {"sleep", "i2s-active",
+					 "tdm-active"};
+
 enum {
 	TDM_0 = 0,
 	TDM_1,
@@ -402,7 +421,8 @@
 					"KHZ_88P2", "KHZ_96", "KHZ_176P4",
 					"KHZ_192", "KHZ_352P8", "KHZ_384"};
 static char const *ext_disp_sample_rate_text[] = {"KHZ_48", "KHZ_96",
-						  "KHZ_192"};
+					"KHZ_192", "KHZ_32", "KHZ_44P1",
+					"KHZ_88P2", "KHZ_176P4"};
 static char const *tdm_ch_text[] = {"One", "Two", "Three", "Four",
 				    "Five", "Six", "Seven", "Eight"};
 static char const *tdm_bit_format_text[] = {"S16_LE", "S24_LE", "S32_LE"};
@@ -514,6 +534,9 @@
 	.key_code[7] = 0,
 	.linein_th = 5000,
 	.moisture_en = true,
+	.mbhc_micbias = MIC_BIAS_2,
+	.anc_micbias = MIC_BIAS_2,
+	.enable_anc_mic_detect = false,
 };
 
 static struct snd_soc_dapm_route wcd_audio_paths_tasha[] = {
@@ -1479,6 +1502,22 @@
 		return idx;
 
 	switch (ext_disp_rx_cfg[idx].sample_rate) {
+	case SAMPLING_RATE_176P4KHZ:
+		sample_rate_val = 6;
+		break;
+
+	case SAMPLING_RATE_88P2KHZ:
+		sample_rate_val = 5;
+		break;
+
+	case SAMPLING_RATE_44P1KHZ:
+		sample_rate_val = 4;
+		break;
+
+	case SAMPLING_RATE_32KHZ:
+		sample_rate_val = 3;
+		break;
+
 	case SAMPLING_RATE_192KHZ:
 		sample_rate_val = 2;
 		break;
@@ -1509,6 +1548,18 @@
 		return idx;
 
 	switch (ucontrol->value.integer.value[0]) {
+	case 6:
+		ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_176P4KHZ;
+		break;
+	case 5:
+		ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_88P2KHZ;
+		break;
+	case 4:
+		ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_44P1KHZ;
+		break;
+	case 3:
+		ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_32KHZ;
+		break;
 	case 2:
 		ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_192KHZ;
 		break;
@@ -3321,6 +3372,18 @@
 					    134, 135, 136, 137, 138, 139,
 					    140, 141, 142, 143};
 
+	/* Tavil Codec SLIMBUS configuration
+	 * RX1, RX2, RX3, RX4, RX5, RX6, RX7, RX8
+	 * TX1, TX2, TX3, TX4, TX5, TX6, TX7, TX8, TX9, TX10, TX11, TX12, TX13
+	 * TX14, TX15, TX16
+	 */
+	unsigned int rx_ch_tavil[WCD934X_RX_MAX] = {144, 145, 146, 147, 148,
+						    149, 150, 151};
+	unsigned int tx_ch_tavil[WCD934X_TX_MAX] = {128, 129, 130, 131, 132,
+						    133, 134, 135, 136, 137,
+						    138, 139, 140, 141, 142,
+						    143};
+
 	pr_info("%s: dev_name%s\n", __func__, dev_name(cpu_dai->dev));
 
 	rtd->pmdown_time = 0;
@@ -3369,20 +3432,27 @@
 	snd_soc_dapm_ignore_suspend(dapm, "HPHR");
 	snd_soc_dapm_ignore_suspend(dapm, "AIF4 VI");
 	snd_soc_dapm_ignore_suspend(dapm, "VIINPUT");
+	snd_soc_dapm_ignore_suspend(dapm, "ANC HPHL");
+	snd_soc_dapm_ignore_suspend(dapm, "ANC HPHR");
 
 	if (!strcmp(dev_name(codec_dai->dev), "tasha_codec")) {
 		snd_soc_dapm_ignore_suspend(dapm, "LINEOUT3");
 		snd_soc_dapm_ignore_suspend(dapm, "LINEOUT4");
-		snd_soc_dapm_ignore_suspend(dapm, "ANC HPHL");
-		snd_soc_dapm_ignore_suspend(dapm, "ANC HPHR");
 		snd_soc_dapm_ignore_suspend(dapm, "ANC LINEOUT1");
 		snd_soc_dapm_ignore_suspend(dapm, "ANC LINEOUT2");
 	}
 
 	snd_soc_dapm_sync(dapm);
 
-	snd_soc_dai_set_channel_map(codec_dai, ARRAY_SIZE(tx_ch),
-				    tx_ch, ARRAY_SIZE(rx_ch), rx_ch);
+	if (!strcmp(dev_name(codec_dai->dev), "tavil_codec")) {
+		snd_soc_dai_set_channel_map(codec_dai, ARRAY_SIZE(tx_ch_tavil),
+					tx_ch_tavil, ARRAY_SIZE(rx_ch_tavil),
+					rx_ch_tavil);
+	} else {
+		snd_soc_dai_set_channel_map(codec_dai, ARRAY_SIZE(tx_ch),
+					tx_ch, ARRAY_SIZE(rx_ch),
+					rx_ch);
+	}
 
 	if (!strcmp(dev_name(codec_dai->dev), "tavil_codec")) {
 		msm_codec_fn.get_afe_config_fn = tavil_get_afe_config;
@@ -3978,6 +4048,275 @@
 	return ret;
 }
 
+static int msm_set_pinctrl(struct msm_pinctrl_info *pinctrl_info,
+				enum pinctrl_pin_state new_state)
+{
+	int ret = 0;
+	int curr_state = 0;
+
+	if (pinctrl_info == NULL) {
+		pr_err("%s: pinctrl_info is NULL\n", __func__);
+		ret = -EINVAL;
+		goto err;
+	}
+	curr_state = pinctrl_info->curr_state;
+	pinctrl_info->curr_state = new_state;
+	pr_debug("%s: curr_state = %s new_state = %s\n", __func__,
+		 pin_states[curr_state], pin_states[pinctrl_info->curr_state]);
+
+	if (curr_state == pinctrl_info->curr_state) {
+		pr_debug("%s: Already in same state\n", __func__);
+		goto err;
+	}
+
+	if (curr_state != STATE_DISABLE &&
+		pinctrl_info->curr_state != STATE_DISABLE) {
+		pr_debug("%s: state already active cannot switch\n", __func__);
+		ret = -EIO;
+		goto err;
+	}
+
+	switch (pinctrl_info->curr_state) {
+	case STATE_MI2S_ACTIVE:
+		ret = pinctrl_select_state(pinctrl_info->pinctrl,
+					pinctrl_info->mi2s_active);
+		if (ret) {
+			pr_err("%s: MI2S state select failed with %d\n",
+				__func__, ret);
+			ret = -EIO;
+			goto err;
+		}
+		break;
+	case STATE_TDM_ACTIVE:
+		ret = pinctrl_select_state(pinctrl_info->pinctrl,
+					pinctrl_info->tdm_active);
+		if (ret) {
+			pr_err("%s: TDM state select failed with %d\n",
+				__func__, ret);
+			ret = -EIO;
+			goto err;
+		}
+		break;
+	case STATE_DISABLE:
+		if (curr_state == STATE_MI2S_ACTIVE) {
+			ret = pinctrl_select_state(pinctrl_info->pinctrl,
+					pinctrl_info->mi2s_disable);
+		} else {
+			ret = pinctrl_select_state(pinctrl_info->pinctrl,
+					pinctrl_info->tdm_disable);
+		}
+		if (ret) {
+			pr_err("%s:  state disable failed with %d\n",
+				__func__, ret);
+			ret = -EIO;
+			goto err;
+		}
+		break;
+	default:
+		pr_err("%s: TLMM pin state is invalid\n", __func__);
+		return -EINVAL;
+	}
+
+err:
+	return ret;
+}
+
+static void msm_release_pinctrl(struct platform_device *pdev)
+{
+	struct snd_soc_card *card = platform_get_drvdata(pdev);
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+
+	if (pinctrl_info->pinctrl) {
+		devm_pinctrl_put(pinctrl_info->pinctrl);
+		pinctrl_info->pinctrl = NULL;
+	}
+}
+
+static int msm_get_pinctrl(struct platform_device *pdev)
+{
+	struct snd_soc_card *card = platform_get_drvdata(pdev);
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = NULL;
+	struct pinctrl *pinctrl;
+	int ret;
+
+	pinctrl_info = &pdata->pinctrl_info;
+
+	if (pinctrl_info == NULL) {
+		pr_err("%s: pinctrl_info is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR_OR_NULL(pinctrl)) {
+		pr_err("%s: Unable to get pinctrl handle\n", __func__);
+		return -EINVAL;
+	}
+	pinctrl_info->pinctrl = pinctrl;
+
+	/* get all the states handles from Device Tree */
+	pinctrl_info->mi2s_disable = pinctrl_lookup_state(pinctrl,
+						"quat-mi2s-sleep");
+	if (IS_ERR(pinctrl_info->mi2s_disable)) {
+		pr_err("%s: could not get mi2s_disable pinstate\n", __func__);
+		goto err;
+	}
+	pinctrl_info->mi2s_active = pinctrl_lookup_state(pinctrl,
+						"quat-mi2s-active");
+	if (IS_ERR(pinctrl_info->mi2s_active)) {
+		pr_err("%s: could not get mi2s_active pinstate\n", __func__);
+		goto err;
+	}
+	pinctrl_info->tdm_disable = pinctrl_lookup_state(pinctrl,
+						"quat-tdm-sleep");
+	if (IS_ERR(pinctrl_info->tdm_disable)) {
+		pr_err("%s: could not get tdm_disable pinstate\n", __func__);
+		goto err;
+	}
+	pinctrl_info->tdm_active = pinctrl_lookup_state(pinctrl,
+						"quat-tdm-active");
+	if (IS_ERR(pinctrl_info->tdm_active)) {
+		pr_err("%s: could not get tdm_active pinstate\n",
+			__func__);
+		goto err;
+	}
+	/* Reset the TLMM pins to a default state */
+	ret = pinctrl_select_state(pinctrl_info->pinctrl,
+					pinctrl_info->mi2s_disable);
+	if (ret != 0) {
+		pr_err("%s: Disable TLMM pins failed with %d\n",
+			__func__, ret);
+		ret = -EIO;
+		goto err;
+	}
+	pinctrl_info->curr_state = STATE_DISABLE;
+
+	return 0;
+
+err:
+	devm_pinctrl_put(pinctrl);
+	pinctrl_info->pinctrl = NULL;
+	return -EINVAL;
+}
+
+static int msm_tdm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+				      struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_interval *rate = hw_param_interval(params,
+					SNDRV_PCM_HW_PARAM_RATE);
+	struct snd_interval *channels = hw_param_interval(params,
+					SNDRV_PCM_HW_PARAM_CHANNELS);
+
+	if (cpu_dai->id == AFE_PORT_ID_QUATERNARY_TDM_RX) {
+		channels->min = channels->max =
+				tdm_rx_cfg[TDM_QUAT][TDM_0].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       tdm_rx_cfg[TDM_QUAT][TDM_0].bit_format);
+		rate->min = rate->max =
+				tdm_rx_cfg[TDM_QUAT][TDM_0].sample_rate;
+	} else if (cpu_dai->id == AFE_PORT_ID_SECONDARY_TDM_RX) {
+		channels->min = channels->max =
+				tdm_rx_cfg[TDM_SEC][TDM_0].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       tdm_rx_cfg[TDM_SEC][TDM_0].bit_format);
+		rate->min = rate->max = tdm_rx_cfg[TDM_SEC][TDM_0].sample_rate;
+	} else {
+		pr_err("%s: dai id 0x%x not supported\n",
+			__func__, cpu_dai->id);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: dai id = 0x%x channels = %d rate = %d format = 0x%x\n",
+		__func__, cpu_dai->id, channels->max, rate->max,
+		params_format(params));
+
+	return 0;
+}
+
+static int msm8998_tdm_snd_hw_params(struct snd_pcm_substream *substream,
+				     struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	int ret = 0;
+	int channels, slot_width, slots;
+	unsigned int slot_mask;
+	unsigned int slot_offset[8] = {0, 4, 8, 12, 16, 20, 24, 28};
+
+	pr_debug("%s: dai id = 0x%x\n", __func__, cpu_dai->id);
+
+	slots = tdm_rx_cfg[TDM_QUAT][TDM_0].channels;
+	/*2 slot config - bits 0 and 1 set for the first two slots */
+	slot_mask = 0x0000FFFF >> (16-slots);
+	slot_width = 32;
+	channels = slots;
+
+	pr_debug("%s: slot_width %d slots %d\n", __func__, slot_width, slots);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		pr_debug("%s: slot_width %d\n", __func__, slot_width);
+		ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0, slot_mask,
+			slots, slot_width);
+		if (ret < 0) {
+			pr_err("%s: failed to set tdm slot, err:%d\n",
+				__func__, ret);
+			goto end;
+		}
+
+		ret = snd_soc_dai_set_channel_map(cpu_dai,
+			0, NULL, channels, slot_offset);
+		if (ret < 0) {
+			pr_err("%s: failed to set channel map, err:%d\n",
+				__func__, ret);
+			goto end;
+		}
+	} else {
+		pr_err("%s: invalid use case, err:%d\n",
+			__func__, ret);
+	}
+
+end:
+	return ret;
+}
+
+static int msm8998_tdm_snd_startup(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_card *card = rtd->card;
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+
+	ret = msm_set_pinctrl(pinctrl_info, STATE_TDM_ACTIVE);
+	if (ret)
+		pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
+static void msm8998_tdm_snd_shutdown(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_card *card = rtd->card;
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+
+	ret = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
+	if (ret)
+		pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+			__func__, ret);
+
+}
+
+static struct snd_soc_ops msm8998_tdm_be_ops = {
+	.hw_params = msm8998_tdm_snd_hw_params,
+	.startup = msm8998_tdm_snd_startup,
+	.shutdown = msm8998_tdm_snd_shutdown
+};
+
 static int msm_mi2s_snd_startup(struct snd_pcm_substream *substream)
 {
 	int ret = 0;
@@ -3985,6 +4324,9 @@
 	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 	int index = cpu_dai->id;
 	unsigned int fmt = SND_SOC_DAIFMT_CBS_CFS;
+	struct snd_soc_card *card = rtd->card;
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
 
 	dev_dbg(rtd->card->dev,
 		"%s: substream = %s  stream = %d, dai name %s, dai ID %d\n",
@@ -3998,6 +4340,15 @@
 			__func__, cpu_dai->id);
 		goto done;
 	}
+	if (index == QUAT_MI2S) {
+		ret = msm_set_pinctrl(pinctrl_info, STATE_MI2S_ACTIVE);
+		if (ret) {
+			pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+				__func__, ret);
+			goto done;
+		}
+	}
+
 	/*
 	 * Muxtex protection in case the same MI2S
 	 * interface using for both TX and RX  so
@@ -4050,6 +4401,9 @@
 	int ret;
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	int index = rtd->cpu_dai->id;
+	struct snd_soc_card *card = rtd->card;
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
 
 	pr_debug("%s(): substream = %s  stream = %d\n", __func__,
 		 substream->name, substream->stream);
@@ -4068,6 +4422,13 @@
 		}
 	}
 	mutex_unlock(&mi2s_intf_conf[index].lock);
+
+	if (index == QUAT_MI2S) {
+		ret = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
+		if (ret)
+			pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+				__func__, ret);
+	}
 }
 
 static struct snd_soc_ops msm_mi2s_be_ops = {
@@ -4952,6 +5313,42 @@
 	},
 };
 
+static struct snd_soc_dai_link msm_common_misc_fe_dai_links[] = {
+	{
+		.name = MSM_DAILINK_NAME(ASM Loopback),
+		.stream_name = "MultiMedia6",
+		.cpu_dai_name = "MultiMedia6",
+		.platform_name = "msm-pcm-loopback",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.ignore_suspend = 1,
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA6,
+	},
+	{
+		.name = "USB Audio Hostless",
+		.stream_name = "USB Audio Hostless",
+		.cpu_dai_name = "USBAUDIO_HOSTLESS",
+		.platform_name = "msm-pcm-hostless",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+	},
+};
+
 static struct snd_soc_dai_link msm_common_be_dai_links[] = {
 	/* Backend AFE DAI Links */
 	{
@@ -5159,8 +5556,8 @@
 		.no_pcm = 1,
 		.dpcm_playback = 1,
 		.id = MSM_BACKEND_DAI_QUAT_TDM_RX_0,
-		.be_hw_params_fixup = msm_be_hw_params_fixup,
-		.ops = &msm_tdm_be_ops,
+		.be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+		.ops = &msm8998_tdm_be_ops,
 		.ignore_suspend = 1,
 	},
 	{
@@ -5334,6 +5731,22 @@
 		.ignore_pmdown_time = 1,
 		.ignore_suspend = 1,
 	},
+	/* Slimbus VI Recording */
+	{
+		.name = LPASS_BE_SLIMBUS_TX_VI,
+		.stream_name = "Slimbus4 Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.16393",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tasha_codec",
+		.codec_dai_name = "tasha_vifeedback",
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		.ignore_suspend = 1,
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.ignore_pmdown_time = 1,
+	},
 };
 
 static struct snd_soc_dai_link msm_tavil_be_dai_links[] = {
@@ -5506,6 +5919,22 @@
 		.ignore_pmdown_time = 1,
 		.ignore_suspend = 1,
 	},
+	/* Slimbus VI Recording */
+	{
+		.name = LPASS_BE_SLIMBUS_TX_VI,
+		.stream_name = "Slimbus4 Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.16393",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tavil_codec",
+		.codec_dai_name = "tavil_vifeedback",
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		.ignore_suspend = 1,
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.ignore_pmdown_time = 1,
+	},
 };
 
 static struct snd_soc_dai_link msm_wcn_be_dai_links[] = {
@@ -5842,6 +6271,7 @@
 static struct snd_soc_dai_link msm_tasha_dai_links[
 			 ARRAY_SIZE(msm_common_dai_links) +
 			 ARRAY_SIZE(msm_tasha_fe_dai_links) +
+			 ARRAY_SIZE(msm_common_misc_fe_dai_links) +
 			 ARRAY_SIZE(msm_common_be_dai_links) +
 			 ARRAY_SIZE(msm_tasha_be_dai_links) +
 			 ARRAY_SIZE(msm_wcn_be_dai_links) +
@@ -5852,6 +6282,7 @@
 static struct snd_soc_dai_link msm_tavil_dai_links[
 			 ARRAY_SIZE(msm_common_dai_links) +
 			 ARRAY_SIZE(msm_tavil_fe_dai_links) +
+			 ARRAY_SIZE(msm_common_misc_fe_dai_links) +
 			 ARRAY_SIZE(msm_common_be_dai_links) +
 			 ARRAY_SIZE(msm_tavil_be_dai_links) +
 			 ARRAY_SIZE(msm_wcn_be_dai_links) +
@@ -6185,7 +6616,7 @@
 {
 	struct snd_soc_card *card = NULL;
 	struct snd_soc_dai_link *dailink;
-	int len_1, len_2, len_3;
+	int len_1, len_2, len_3, len_4;
 	int total_links;
 	const struct of_device_id *match;
 
@@ -6200,8 +6631,9 @@
 		card = &snd_soc_card_tasha_msm;
 		len_1 = ARRAY_SIZE(msm_common_dai_links);
 		len_2 = len_1 + ARRAY_SIZE(msm_tasha_fe_dai_links);
-		len_3 = len_2 + ARRAY_SIZE(msm_common_be_dai_links);
-		total_links = len_3 + ARRAY_SIZE(msm_tasha_be_dai_links);
+		len_3 = len_2 + ARRAY_SIZE(msm_common_misc_fe_dai_links);
+		len_4 = len_3 + ARRAY_SIZE(msm_common_be_dai_links);
+		total_links = len_4 + ARRAY_SIZE(msm_tasha_be_dai_links);
 		memcpy(msm_tasha_dai_links,
 		       msm_common_dai_links,
 		       sizeof(msm_common_dai_links));
@@ -6209,9 +6641,12 @@
 		       msm_tasha_fe_dai_links,
 		       sizeof(msm_tasha_fe_dai_links));
 		memcpy(msm_tasha_dai_links + len_2,
+		       msm_common_misc_fe_dai_links,
+		       sizeof(msm_common_misc_fe_dai_links));
+		memcpy(msm_tasha_dai_links + len_3,
 		       msm_common_be_dai_links,
 		       sizeof(msm_common_be_dai_links));
-		memcpy(msm_tasha_dai_links + len_3,
+		memcpy(msm_tasha_dai_links + len_4,
 		       msm_tasha_be_dai_links,
 		       sizeof(msm_tasha_be_dai_links));
 
@@ -6252,8 +6687,9 @@
 		card = &snd_soc_card_tavil_msm;
 		len_1 = ARRAY_SIZE(msm_common_dai_links);
 		len_2 = len_1 + ARRAY_SIZE(msm_tavil_fe_dai_links);
-		len_3 = len_2 + ARRAY_SIZE(msm_common_be_dai_links);
-		total_links = len_3 + ARRAY_SIZE(msm_tavil_be_dai_links);
+		len_3 = len_2 + ARRAY_SIZE(msm_common_misc_fe_dai_links);
+		len_4 = len_3 + ARRAY_SIZE(msm_common_be_dai_links);
+		total_links = len_4 + ARRAY_SIZE(msm_tavil_be_dai_links);
 		memcpy(msm_tavil_dai_links,
 		       msm_common_dai_links,
 		       sizeof(msm_common_dai_links));
@@ -6261,9 +6697,12 @@
 		       msm_tavil_fe_dai_links,
 		       sizeof(msm_tavil_fe_dai_links));
 		memcpy(msm_tavil_dai_links + len_2,
+		       msm_common_misc_fe_dai_links,
+		       sizeof(msm_common_misc_fe_dai_links));
+		memcpy(msm_tavil_dai_links + len_3,
 		       msm_common_be_dai_links,
 		       sizeof(msm_common_be_dai_links));
-		memcpy(msm_tavil_dai_links + len_3,
+		memcpy(msm_tavil_dai_links + len_4,
 		       msm_tavil_be_dai_links,
 		       sizeof(msm_tavil_be_dai_links));
 
@@ -6751,14 +7190,19 @@
 			pdev->dev.of_node->full_name);
 		dev_dbg(&pdev->dev, "Jack type properties set to default");
 	} else {
-		if (!strcmp(mbhc_audio_jack_type, "4-pole-jack"))
+		if (!strcmp(mbhc_audio_jack_type, "4-pole-jack")) {
+			wcd_mbhc_cfg.enable_anc_mic_detect = false;
 			dev_dbg(&pdev->dev, "This hardware has 4 pole jack");
-		else if (!strcmp(mbhc_audio_jack_type, "5-pole-jack"))
+		} else if (!strcmp(mbhc_audio_jack_type, "5-pole-jack")) {
+			wcd_mbhc_cfg.enable_anc_mic_detect = true;
 			dev_dbg(&pdev->dev, "This hardware has 5 pole jack");
-		else if (!strcmp(mbhc_audio_jack_type, "6-pole-jack"))
+		} else if (!strcmp(mbhc_audio_jack_type, "6-pole-jack")) {
+			wcd_mbhc_cfg.enable_anc_mic_detect = true;
 			dev_dbg(&pdev->dev, "This hardware has 6 pole jack");
-		else
+		} else {
+			wcd_mbhc_cfg.enable_anc_mic_detect = false;
 			dev_dbg(&pdev->dev, "Unknown value, set to default");
+		}
 	}
 	/*
 	 * Parse US-Euro gpio info from DT. Report no error if us-euro
@@ -6784,6 +7228,17 @@
 		dev_dbg(&pdev->dev, "msm_prepare_us_euro failed (%d)\n",
 			ret);
 
+	/* Parse pinctrl info from devicetree */
+	ret = msm_get_pinctrl(pdev);
+	if (!ret) {
+		pr_debug("%s: pinctrl parsing successful\n", __func__);
+	} else {
+		dev_dbg(&pdev->dev,
+			"%s: Parsing pinctrl failed with %d. Cannot use Ports\n",
+			__func__, ret);
+		ret = 0;
+	}
+
 	i2s_auxpcm_init(pdev);
 
 	is_initial_boot = true;
@@ -6801,6 +7256,7 @@
 		gpio_free(pdata->us_euro_gpio);
 		pdata->us_euro_gpio = 0;
 	}
+	msm_release_pinctrl(pdev);
 	devm_kfree(&pdev->dev, pdata);
 	return ret;
 }
diff --git a/sound/soc/msm/qdsp6v2/Makefile b/sound/soc/msm/qdsp6v2/Makefile
index 461c09d..d4db55f 100644
--- a/sound/soc/msm/qdsp6v2/Makefile
+++ b/sound/soc/msm/qdsp6v2/Makefile
@@ -10,8 +10,8 @@
 				 msm-dai-stub-v2.o
 obj-$(CONFIG_SND_HWDEP) += msm-pcm-routing-devdep.o
 obj-$(CONFIG_DTS_EAGLE) += msm-dts-eagle.o
-obj-$(CONFIG_DOLBY_DAP) += msm-dolby-dap-config.o
 obj-$(CONFIG_DOLBY_DS2) += msm-ds2-dap-config.o
+obj-$(CONFIG_DOLBY_LICENSE) += msm-ds2-dap-config.o
 obj-$(CONFIG_DTS_SRS_TM) += msm-dts-srs-tm-config.o
 obj-$(CONFIG_QTI_PP) += msm-qti-pp-config.o
 obj-y += audio_calibration.o audio_cal_utils.o q6adm.o q6afe.o q6asm.o \
diff --git a/sound/soc/msm/qdsp6v2/audio_cal_utils.c b/sound/soc/msm/qdsp6v2/audio_cal_utils.c
index f88087b..5d4a0ba 100644
--- a/sound/soc/msm/qdsp6v2/audio_cal_utils.c
+++ b/sound/soc/msm/qdsp6v2/audio_cal_utils.c
@@ -119,6 +119,9 @@
 	case AFE_SIDETONE_CAL_TYPE:
 		size = sizeof(struct audio_cal_info_sidetone);
 		break;
+	case AFE_SIDETONE_IIR_CAL_TYPE:
+		size = sizeof(struct audio_cal_info_sidetone_iir);
+		break;
 	case LSM_CUST_TOPOLOGY_CAL_TYPE:
 		size = 0;
 		break;
@@ -265,6 +268,9 @@
 	case AFE_SIDETONE_CAL_TYPE:
 		size = sizeof(struct audio_cal_type_sidetone);
 		break;
+	case AFE_SIDETONE_IIR_CAL_TYPE:
+		size = sizeof(struct audio_cal_type_sidetone_iir);
+		break;
 	case LSM_CUST_TOPOLOGY_CAL_TYPE:
 		size = sizeof(struct audio_cal_type_basic);
 		break;
@@ -598,7 +604,6 @@
 		goto done;
 
 	INIT_LIST_HEAD(&cal_block->list);
-	list_add_tail(&cal_block->list, &cal_type->cal_blocks);
 
 	cal_block->map_data.ion_map_handle = basic_cal->cal_data.mem_handle;
 	if (basic_cal->cal_data.mem_handle > 0) {
@@ -630,6 +635,7 @@
 		goto err;
 	}
 	cal_block->buffer_number = basic_cal->cal_hdr.buffer_number;
+	list_add_tail(&cal_block->list, &cal_type->cal_blocks);
 	pr_debug("%s: created block for cal type %d, buf num %d, map handle %d, map size %zd paddr 0x%pK!\n",
 		__func__, cal_type->info.reg.cal_type,
 		cal_block->buffer_number,
@@ -639,6 +645,8 @@
 done:
 	return cal_block;
 err:
+	kfree(cal_block->cal_info);
+	kfree(cal_block->client_info);
 	kfree(cal_block);
 	cal_block = NULL;
 	return cal_block;
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index 48f58f1..e8e4e04 100644
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -56,8 +56,8 @@
 #define FLAC_BLK_SIZE_LIMIT		65535
 
 /* Timestamp mode payload offsets */
-#define TS_LSW_OFFSET			6
-#define TS_MSW_OFFSET			7
+#define CAPTURE_META_DATA_TS_OFFSET_LSW	6
+#define CAPTURE_META_DATA_TS_OFFSET_MSW	7
 
 /* decoder parameter length */
 #define DDP_DEC_MAX_NUM_PARAM		18
@@ -100,7 +100,7 @@
 
 static unsigned int supported_sample_rates[] = {
 	8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 64000,
-	88200, 96000, 176400, 192000, 352800, 384000, 2822400, 5644800
+	88200, 96000, 128000, 176400, 192000, 352800, 384000, 2822400, 5644800
 };
 
 struct msm_compr_pdata {
@@ -160,6 +160,10 @@
 	uint32_t stream_available;
 	uint32_t next_stream;
 
+	uint32_t run_mode;
+	uint32_t start_delay_lsw;
+	uint32_t start_delay_msw;
+
 	uint64_t marker_timestamp;
 
 	struct msm_compr_gapless_state gapless_state;
@@ -215,6 +219,99 @@
 				     struct msm_compr_dec_params *dec_params,
 				     int stream_id);
 
+static int msm_compr_set_render_mode(struct msm_compr_audio *prtd,
+				     uint32_t render_mode) {
+	int ret = -EINVAL;
+	struct audio_client *ac = prtd->audio_client;
+
+	pr_debug("%s, got render mode %u\n", __func__, render_mode);
+
+	if (render_mode == SNDRV_COMPRESS_RENDER_MODE_AUDIO_MASTER) {
+		render_mode = ASM_SESSION_MTMX_STRTR_PARAM_RENDER_DEFAULT;
+	} else if (render_mode == SNDRV_COMPRESS_RENDER_MODE_STC_MASTER) {
+		render_mode = ASM_SESSION_MTMX_STRTR_PARAM_RENDER_LOCAL_STC;
+		prtd->run_mode = ASM_SESSION_CMD_RUN_STARTIME_RUN_WITH_DELAY;
+	} else {
+		pr_err("%s, Invalid render mode %u\n", __func__,
+			render_mode);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	ret = q6asm_send_mtmx_strtr_render_mode(ac, render_mode);
+	if (ret) {
+		pr_err("%s, Render mode can't be set error %d\n", __func__,
+			ret);
+	}
+exit:
+	return ret;
+}
+
+static int msm_compr_set_clk_rec_mode(struct audio_client *ac,
+				     uint32_t clk_rec_mode) {
+	int ret = -EINVAL;
+
+	pr_debug("%s, got clk rec mode %u\n", __func__, clk_rec_mode);
+
+	if (clk_rec_mode == SNDRV_COMPRESS_CLK_REC_MODE_NONE) {
+		clk_rec_mode = ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_NONE;
+	} else if (clk_rec_mode == SNDRV_COMPRESS_CLK_REC_MODE_AUTO) {
+		clk_rec_mode = ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_AUTO;
+	} else {
+		pr_err("%s, Invalid clk rec_mode mode %u\n", __func__,
+			clk_rec_mode);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	ret = q6asm_send_mtmx_strtr_clk_rec_mode(ac, clk_rec_mode);
+	if (ret) {
+		pr_err("%s, clk rec mode can't be set, error %d\n", __func__,
+			ret);
+	}
+
+exit:
+	return ret;
+}
+
+static int msm_compr_set_render_window(struct audio_client *ac,
+		uint32_t ws_lsw, uint32_t ws_msw,
+		uint32_t we_lsw, uint32_t we_msw)
+{
+	int ret = -EINVAL;
+	struct asm_session_mtmx_strtr_param_window_v2_t asm_mtmx_strtr_window;
+	uint32_t param_id;
+
+	pr_debug("%s, ws_lsw 0x%x ws_msw 0x%x we_lsw 0x%x we_ms 0x%x\n",
+		 __func__, ws_lsw, ws_msw, we_lsw, we_msw);
+
+	memset(&asm_mtmx_strtr_window, 0,
+	       sizeof(struct asm_session_mtmx_strtr_param_window_v2_t));
+	asm_mtmx_strtr_window.window_lsw = ws_lsw;
+	asm_mtmx_strtr_window.window_msw = ws_msw;
+	param_id = ASM_SESSION_MTMX_STRTR_PARAM_RENDER_WINDOW_START_V2;
+	ret = q6asm_send_mtmx_strtr_window(ac, &asm_mtmx_strtr_window,
+					   param_id);
+	if (ret) {
+		pr_err("%s, start window can't be set error %d\n", __func__,
+			ret);
+		goto exit;
+	}
+
+	asm_mtmx_strtr_window.window_lsw = we_lsw;
+	asm_mtmx_strtr_window.window_msw = we_msw;
+	param_id = ASM_SESSION_MTMX_STRTR_PARAM_RENDER_WINDOW_END_V2;
+	ret = q6asm_send_mtmx_strtr_window(ac, &asm_mtmx_strtr_window,
+					   param_id);
+	if (ret) {
+		pr_err("%s, end window can't be set error %d\n", __func__,
+			ret);
+	}
+
+exit:
+	return ret;
+}
+
 static int msm_compr_set_volume(struct snd_compr_stream *cstream,
 				uint32_t volume_l, uint32_t volume_r)
 {
@@ -314,6 +411,7 @@
 	int buffer_length;
 	uint64_t bytes_available;
 	struct audio_aio_write_param param;
+	struct snd_codec_metadata *buff_addr;
 
 	if (!atomic_read(&prtd->start)) {
 		pr_err("%s: stream is not in started state\n", __func__);
@@ -347,23 +445,34 @@
 	}
 
 	if (buffer_length) {
-		param.paddr	= prtd->buffer_paddr + prtd->byte_offset;
+		param.paddr = prtd->buffer_paddr + prtd->byte_offset;
 		WARN(prtd->byte_offset % 32 != 0, "offset %x not multiple of 32\n",
 		prtd->byte_offset);
 	} else {
-		param.paddr	= prtd->buffer_paddr;
+		param.paddr = prtd->buffer_paddr;
 	}
-
 	param.len	= buffer_length;
-	param.msw_ts	= 0;
-	param.lsw_ts	= 0;
-	param.flags	= NO_TIMESTAMP;
+	if (prtd->ts_header_offset) {
+		buff_addr = (struct snd_codec_metadata *)
+					(prtd->buffer + prtd->byte_offset);
+		param.len = buff_addr->length;
+		param.msw_ts = (uint32_t)
+			((buff_addr->timestamp & 0xFFFFFFFF00000000LL) >> 32);
+		param.lsw_ts = (uint32_t) (buff_addr->timestamp & 0xFFFFFFFFLL);
+		param.paddr += prtd->ts_header_offset;
+		param.flags = SET_TIMESTAMP;
+		param.metadata_len = prtd->ts_header_offset;
+	} else {
+		param.msw_ts = 0;
+		param.lsw_ts = 0;
+		param.flags = NO_TIMESTAMP;
+		param.metadata_len = 0;
+	}
 	param.uid	= buffer_length;
-	param.metadata_len = 0;
 	param.last_buffer = prtd->last_buffer;
 
 	pr_debug("%s: sending %d bytes to DSP byte_offset = %d\n",
-		__func__, buffer_length, prtd->byte_offset);
+		__func__, param.len, prtd->byte_offset);
 	if (q6asm_async_write(prtd->audio_client, &param) < 0) {
 		pr_err("%s:q6asm_async_write failed\n", __func__);
 	} else {
@@ -482,9 +591,21 @@
 		 * written to ADSP in the last write, update offset and
 		 * total copied data accordingly.
 		 */
-
-		prtd->byte_offset += token;
-		prtd->copied_total += token;
+		if (prtd->ts_header_offset) {
+			/* Always assume that the data will be sent to DSP on
+			 * frame boundary.
+			 * i.e, one frame of userspace write will result in
+			 * one kernel write to DSP. This is needed as
+			 * timestamp will be sent per frame.
+			 */
+			prtd->byte_offset +=
+					prtd->codec_param.buffer.fragment_size;
+			prtd->copied_total +=
+					prtd->codec_param.buffer.fragment_size;
+		} else {
+			prtd->byte_offset += token;
+			prtd->copied_total += token;
+		}
 		if (prtd->byte_offset >= prtd->buffer_size)
 			prtd->byte_offset -= prtd->buffer_size;
 
@@ -539,10 +660,10 @@
 			*buff_addr = prtd->ts_header_offset;
 			buff_addr++;
 			/* Write the TS LSW */
-			*buff_addr = payload[TS_LSW_OFFSET];
+			*buff_addr = payload[CAPTURE_META_DATA_TS_OFFSET_LSW];
 			buff_addr++;
 			/* Write the TS MSW */
-			*buff_addr = payload[TS_MSW_OFFSET];
+			*buff_addr = payload[CAPTURE_META_DATA_TS_OFFSET_MSW];
 		}
 		/* Always assume read_size is same as fragment_size */
 		read_size = prtd->codec_param.buffer.fragment_size;
@@ -760,7 +881,7 @@
 			COMPR_PLAYBACK_MIN_NUM_FRAGMENTS;
 	prtd->compr_cap.max_fragments =
 			COMPR_PLAYBACK_MAX_NUM_FRAGMENTS;
-	prtd->compr_cap.num_codecs = 14;
+	prtd->compr_cap.num_codecs = 15;
 	prtd->compr_cap.codecs[0] = SND_AUDIOCODEC_MP3;
 	prtd->compr_cap.codecs[1] = SND_AUDIOCODEC_AAC;
 	prtd->compr_cap.codecs[2] = SND_AUDIOCODEC_AC3;
@@ -775,6 +896,7 @@
 	prtd->compr_cap.codecs[11] = SND_AUDIOCODEC_APE;
 	prtd->compr_cap.codecs[12] = SND_AUDIOCODEC_DTS;
 	prtd->compr_cap.codecs[13] = SND_AUDIOCODEC_DSD;
+	prtd->compr_cap.codecs[14] = SND_AUDIOCODEC_APTX;
 }
 
 static int msm_compr_send_media_format_block(struct snd_compr_stream *cstream,
@@ -794,6 +916,7 @@
 	struct asm_alac_cfg alac_cfg;
 	struct asm_ape_cfg ape_cfg;
 	struct asm_dsd_cfg dsd_cfg;
+	struct aptx_dec_bt_addr_cfg aptx_cfg;
 	union snd_codec_options *codec_options;
 
 	int ret = 0;
@@ -869,6 +992,9 @@
 		if (prtd->codec_param.codec.format ==
 					SND_AUDIOSTREAMFORMAT_MP4ADTS)
 			aac_cfg.format = 0x0;
+		else if (prtd->codec_param.codec.format ==
+					SND_AUDIOSTREAMFORMAT_MP4LATM)
+			aac_cfg.format = 0x04;
 		else
 			aac_cfg.format = 0x03;
 		aac_cfg.ch_cfg = prtd->num_channels;
@@ -1025,6 +1151,24 @@
 			pr_err("%s: CMD DSD Format block failed ret %d\n",
 				__func__, ret);
 		break;
+	case FORMAT_APTX:
+		pr_debug("SND_AUDIOCODEC_APTX\n");
+		memset(&aptx_cfg, 0x0, sizeof(struct aptx_dec_bt_addr_cfg));
+		ret = q6asm_stream_media_format_block_aptx_dec(
+							prtd->audio_client,
+							prtd->sample_rate,
+							stream_id);
+		if (ret >= 0) {
+			aptx_cfg.nap = codec_options->aptx_dec.nap;
+			aptx_cfg.uap = codec_options->aptx_dec.uap;
+			aptx_cfg.lap = codec_options->aptx_dec.lap;
+			q6asm_set_aptx_dec_bt_addr(prtd->audio_client,
+							&aptx_cfg);
+		} else {
+			pr_err("%s: CMD Format block failed ret %d\n",
+					 __func__, ret);
+		}
+		break;
 	default:
 		pr_debug("%s, unsupported format, skip", __func__);
 		break;
@@ -1206,6 +1350,12 @@
 	prtd->buffer_paddr = ac->port[dir].buf[0].phys;
 	prtd->buffer_size  = runtime->fragments * runtime->fragment_size;
 
+	/* Bit-0 of flags represent timestamp mode */
+	if (prtd->codec_param.codec.flags & COMPRESSED_TIMESTAMP_FLAG)
+		prtd->ts_header_offset = sizeof(struct snd_codec_metadata);
+	else
+		prtd->ts_header_offset = 0;
+
 	ret = msm_compr_send_media_format_block(cstream, ac->stream_id, false);
 	if (ret < 0)
 		pr_err("%s, failed to send media format block\n", __func__);
@@ -1247,8 +1397,13 @@
 	pr_debug("%s: stream_id %d bits_per_sample %d\n",
 			__func__, ac->stream_id, bits_per_sample);
 
-	ret = q6asm_open_read_v4(prtd->audio_client, FORMAT_LINEAR_PCM,
-		bits_per_sample);
+	if (prtd->codec_param.codec.flags & COMPRESSED_TIMESTAMP_FLAG) {
+		ret = q6asm_open_read_v4(prtd->audio_client, FORMAT_LINEAR_PCM,
+			bits_per_sample, true);
+	} else {
+		ret = q6asm_open_read_v4(prtd->audio_client, FORMAT_LINEAR_PCM,
+			bits_per_sample, false);
+	}
 	if (ret < 0) {
 		pr_err("%s: q6asm_open_read failed:%d\n", __func__, ret);
 		return ret;
@@ -1563,6 +1718,7 @@
 	kfree(pdata->dec_params[soc_prtd->dai_link->id]);
 	pdata->dec_params[soc_prtd->dai_link->id] = NULL;
 	kfree(prtd);
+	runtime->private_data = NULL;
 
 	return 0;
 }
@@ -1622,6 +1778,7 @@
 	q6asm_audio_client_free(ac);
 
 	kfree(prtd);
+	runtime->private_data = NULL;
 
 	return 0;
 }
@@ -1791,6 +1948,12 @@
 		break;
 	}
 
+	case SND_AUDIOCODEC_APTX: {
+		pr_debug("%s: SND_AUDIOCODEC_APTX\n", __func__);
+		prtd->codec = FORMAT_APTX;
+		break;
+	}
+
 	default:
 		pr_err("codec not supported, id =%d\n", params->codec.id);
 		return -EINVAL;
@@ -1935,7 +2098,8 @@
 			msm_compr_read_buffer(prtd);
 		}
 		/* issue RUN command for the stream */
-		q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
+		q6asm_run_nowait(prtd->audio_client, prtd->run_mode,
+				 prtd->start_delay_msw, prtd->start_delay_lsw);
 		break;
 	case SNDRV_PCM_TRIGGER_STOP:
 		spin_lock_irqsave(&prtd->lock, flags);
@@ -2019,7 +2183,8 @@
 				   prtd->gapless_state.gapless_transition);
 		if (!prtd->gapless_state.gapless_transition) {
 			atomic_set(&prtd->start, 1);
-			q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
+			q6asm_run_nowait(prtd->audio_client, prtd->run_mode,
+					 0, 0);
 		}
 		break;
 	case SND_COMPR_TRIGGER_PARTIAL_DRAIN:
@@ -2664,6 +2829,7 @@
 	case SND_AUDIOCODEC_DTS:
 		break;
 	case SND_AUDIOCODEC_DSD:
+	case SND_AUDIOCODEC_APTX:
 		break;
 	default:
 		pr_err("%s: Unsupported audio codec %d\n",
@@ -2690,11 +2856,14 @@
 		return -EINVAL;
 	}
 
-	if (prtd->compr_passthr != LEGACY_PCM) {
+	if (((metadata->key == SNDRV_COMPRESS_ENCODER_PADDING) ||
+	     (metadata->key == SNDRV_COMPRESS_ENCODER_DELAY)) &&
+	     (prtd->compr_passthr != LEGACY_PCM)) {
 		pr_debug("%s: No trailing silence for compress_type[%d]\n",
 			__func__, prtd->compr_passthr);
 		return 0;
 	}
+
 	ac = prtd->audio_client;
 	if (metadata->key == SNDRV_COMPRESS_ENCODER_PADDING) {
 		pr_debug("%s, got encoder padding %u",
@@ -2704,11 +2873,63 @@
 		pr_debug("%s, got encoder delay %u",
 			 __func__, metadata->value[0]);
 		prtd->gapless_state.initial_samples_drop = metadata->value[0];
+	} else if (metadata->key == SNDRV_COMPRESS_RENDER_MODE) {
+		return msm_compr_set_render_mode(prtd, metadata->value[0]);
+	} else if (metadata->key == SNDRV_COMPRESS_CLK_REC_MODE) {
+		return msm_compr_set_clk_rec_mode(ac, metadata->value[0]);
+	} else if (metadata->key == SNDRV_COMPRESS_RENDER_WINDOW) {
+		return msm_compr_set_render_window(
+				ac,
+				metadata->value[0],
+				metadata->value[1],
+				metadata->value[2],
+				metadata->value[3]);
+	} else if (metadata->key == SNDRV_COMPRESS_START_DELAY) {
+		prtd->start_delay_lsw = metadata->value[0];
+		prtd->start_delay_msw = metadata->value[1];
 	}
 
 	return 0;
 }
 
+static int msm_compr_get_metadata(struct snd_compr_stream *cstream,
+				struct snd_compr_metadata *metadata)
+{
+	struct msm_compr_audio *prtd;
+	struct audio_client *ac;
+	int ret = -EINVAL;
+
+	pr_debug("%s\n", __func__);
+
+	if (!metadata || !cstream || !cstream->runtime)
+		return ret;
+
+	if (metadata->key != SNDRV_COMPRESS_PATH_DELAY) {
+		pr_err("%s, unsupported key %d\n", __func__, metadata->key);
+		return ret;
+	}
+
+	prtd = cstream->runtime->private_data;
+	if (!prtd || !prtd->audio_client) {
+		pr_err("%s: prtd or audio client is NULL\n", __func__);
+		return ret;
+	}
+
+	ac = prtd->audio_client;
+	ret = q6asm_get_path_delay(prtd->audio_client);
+	if (ret) {
+		pr_err("%s: get_path_delay failed, ret=%d\n", __func__, ret);
+		return ret;
+	}
+
+	pr_debug("%s, path delay(in us) %u\n", __func__, ac->path_delay);
+
+	metadata->value[0] = ac->path_delay;
+
+	return ret;
+}
+
+
 static int msm_compr_set_next_track_param(struct snd_compr_stream *cstream,
 				union snd_codec_options *codec_options)
 {
@@ -3078,6 +3299,7 @@
 	switch (prtd->codec) {
 	case FORMAT_MP3:
 	case FORMAT_MPEG4_AAC:
+	case FORMAT_APTX:
 		pr_debug("%s: no runtime parameters for codec: %d\n", __func__,
 			 prtd->codec);
 		break;
@@ -3144,6 +3366,7 @@
 	case FORMAT_APE:
 	case FORMAT_DTS:
 	case FORMAT_DSD:
+	case FORMAT_APTX:
 		pr_debug("%s: no runtime parameters for codec: %d\n", __func__,
 			 prtd->codec);
 		break;
@@ -3194,48 +3417,45 @@
 				      struct snd_ctl_elem_value *ucontrol)
 {
 	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_RX;
+	int be_id = ucontrol->value.integer.value[3];
+	int ret = 0;
 	int app_type;
 	int acdb_dev_id;
 	int sample_rate = 48000;
 
-	pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
-	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
-		pr_err("%s Received out of bounds fe_id %llu\n",
-			__func__, fe_id);
-		return -EINVAL;
-	}
-
 	app_type = ucontrol->value.integer.value[0];
 	acdb_dev_id = ucontrol->value.integer.value[1];
 	if (ucontrol->value.integer.value[2] != 0)
 		sample_rate = ucontrol->value.integer.value[2];
-	pr_debug("%s: app_type- %d acdb_dev_id- %d sample_rate- %d session_type- %d\n",
-		__func__, app_type, acdb_dev_id, sample_rate, SESSION_TYPE_RX);
-	msm_pcm_routing_reg_stream_app_type_cfg(fe_id, app_type,
-			acdb_dev_id, sample_rate, SESSION_TYPE_RX);
+	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+		__func__, fe_id, session_type, be_id,
+		app_type, acdb_dev_id, sample_rate);
+	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+						      be_id, app_type,
+						      acdb_dev_id, sample_rate);
+	if (ret < 0)
+		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+			__func__, ret);
 
-	return 0;
+	return ret;
 }
 
 static int msm_compr_playback_app_type_cfg_get(struct snd_kcontrol *kcontrol,
 				      struct snd_ctl_elem_value *ucontrol)
 {
 	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_RX;
+	int be_id = ucontrol->value.integer.value[3];
 	int ret = 0;
 	int app_type;
 	int acdb_dev_id;
 	int sample_rate;
 
-	pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
-	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
-		pr_err("%s Received out of bounds fe_id %llu\n",
-			__func__, fe_id);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, SESSION_TYPE_RX,
-		&app_type, &acdb_dev_id, &sample_rate);
+	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+						      be_id, &app_type,
+						      &acdb_dev_id,
+						      &sample_rate);
 	if (ret < 0) {
 		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
 			__func__, ret);
@@ -3245,8 +3465,8 @@
 	ucontrol->value.integer.value[0] = app_type;
 	ucontrol->value.integer.value[1] = acdb_dev_id;
 	ucontrol->value.integer.value[2] = sample_rate;
-	pr_debug("%s: fedai_id %llu, session_type %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
-		__func__, fe_id, SESSION_TYPE_RX,
+	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fe_id, session_type, be_id,
 		app_type, acdb_dev_id, sample_rate);
 done:
 	return ret;
@@ -3256,48 +3476,45 @@
 					struct snd_ctl_elem_value *ucontrol)
 {
 	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_TX;
+	int be_id = ucontrol->value.integer.value[3];
+	int ret = 0;
 	int app_type;
 	int acdb_dev_id;
 	int sample_rate = 48000;
 
-	pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
-	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
-		pr_err("%s Received out of bounds fe_id %llu\n",
-			__func__, fe_id);
-		return -EINVAL;
-	}
-
 	app_type = ucontrol->value.integer.value[0];
 	acdb_dev_id = ucontrol->value.integer.value[1];
 	if (ucontrol->value.integer.value[2] != 0)
 		sample_rate = ucontrol->value.integer.value[2];
-	pr_debug("%s: app_type- %d acdb_dev_id- %d sample_rate- %d session_type- %d\n",
-		__func__, app_type, acdb_dev_id, sample_rate, SESSION_TYPE_TX);
-	msm_pcm_routing_reg_stream_app_type_cfg(fe_id, app_type,
-		acdb_dev_id, sample_rate, SESSION_TYPE_TX);
+	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+		__func__, fe_id, session_type, be_id,
+		app_type, acdb_dev_id, sample_rate);
+	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+						      be_id, app_type,
+						      acdb_dev_id, sample_rate);
+	if (ret < 0)
+		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+			__func__, ret);
 
-	return 0;
+	return ret;
 }
 
 static int msm_compr_capture_app_type_cfg_get(struct snd_kcontrol *kcontrol,
 					struct snd_ctl_elem_value *ucontrol)
 {
 	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_TX;
+	int be_id = ucontrol->value.integer.value[3];
 	int ret = 0;
 	int app_type;
 	int acdb_dev_id;
 	int sample_rate;
 
-	pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
-	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
-		pr_err("%s Received out of bounds fe_id %llu\n",
-			__func__, fe_id);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, SESSION_TYPE_TX,
-		&app_type, &acdb_dev_id, &sample_rate);
+	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+						      be_id, &app_type,
+						      &acdb_dev_id,
+						      &sample_rate);
 	if (ret < 0) {
 		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
 			__func__, ret);
@@ -3307,8 +3524,8 @@
 	ucontrol->value.integer.value[0] = app_type;
 	ucontrol->value.integer.value[1] = acdb_dev_id;
 	ucontrol->value.integer.value[2] = sample_rate;
-	pr_debug("%s: fedai_id %llu, session_type %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
-		__func__, fe_id, SESSION_TYPE_TX,
+	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fe_id, session_type, be_id,
 		app_type, acdb_dev_id, sample_rate);
 done:
 	return ret;
@@ -3860,6 +4077,7 @@
 	.pointer		= msm_compr_pointer,
 	.set_params		= msm_compr_set_params,
 	.set_metadata		= msm_compr_set_metadata,
+	.get_metadata		= msm_compr_get_metadata,
 	.set_next_track_param	= msm_compr_set_next_track_param,
 	.ack			= msm_compr_ack,
 	.copy			= msm_compr_copy,
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c
index dffac45..9b072ea 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c
@@ -124,6 +124,45 @@
 	SOC_ENUM_SINGLE_EXT(2, hdmi_format),
 };
 
+static int msm_dai_q6_ext_disp_drift_info(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+	uinfo->count = sizeof(struct afe_param_id_dev_timing_stats);
+
+	return 0;
+}
+
+static int msm_dai_q6_ext_disp_drift_get(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = -EINVAL;
+	struct afe_param_id_dev_timing_stats timing_stats;
+	struct snd_soc_dai *dai = kcontrol->private_data;
+	struct msm_dai_q6_hdmi_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		pr_err("%s:  afe port not started. status_mask = %ld\n",
+			__func__, *dai_data->status_mask);
+		goto done;
+	}
+
+	memset(&timing_stats, 0, sizeof(struct afe_param_id_dev_timing_stats));
+	ret = afe_get_av_dev_drift(&timing_stats, dai->id);
+	if (ret) {
+		pr_err("%s: Error getting AFE Drift for port %d, err=%d\n",
+			__func__, dai->id, ret);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	memcpy(ucontrol->value.bytes.data, (void *)&timing_stats,
+	       sizeof(struct afe_param_id_dev_timing_stats));
+done:
+	return ret;
+}
+
 static const struct snd_kcontrol_new hdmi_config_controls[] = {
 	SOC_ENUM_EXT("HDMI RX Format", hdmi_config_enum[0],
 				 msm_dai_q6_ext_disp_format_get,
@@ -132,6 +171,13 @@
 				 HDMI_RX_CA_MAX, 0, 1,
 				 msm_dai_q6_ext_disp_ca_get,
 				 msm_dai_q6_ext_disp_ca_put),
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ,
+		.iface	= SNDRV_CTL_ELEM_IFACE_PCM,
+		.name	= "HDMI RX Drift",
+		.info	= msm_dai_q6_ext_disp_drift_info,
+		.get	= msm_dai_q6_ext_disp_drift_get,
+	},
 };
 
 static const struct snd_kcontrol_new display_port_config_controls[] = {
@@ -142,6 +188,13 @@
 				 HDMI_RX_CA_MAX, 0, 1,
 				 msm_dai_q6_ext_disp_ca_get,
 				 msm_dai_q6_ext_disp_ca_put),
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ,
+		.iface	= SNDRV_CTL_ELEM_IFACE_PCM,
+		.name	= "DISPLAY Port RX Drift",
+		.info	= msm_dai_q6_ext_disp_drift_info,
+		.get	= msm_dai_q6_ext_disp_drift_get,
+	},
 };
 
 /* Current implementation assumes hw_param is called once
@@ -297,6 +350,10 @@
 		kcontrol = &hdmi_config_controls[1];
 		rc = snd_ctl_add(dai->component->card->snd_card,
 				 snd_ctl_new1(kcontrol, dai_data));
+
+		kcontrol = &hdmi_config_controls[2];
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 snd_ctl_new1(kcontrol, dai));
 	} else if (dai->driver->id == DISPLAY_PORT_RX) {
 		kcontrol = &display_port_config_controls[0];
 		rc = snd_ctl_add(dai->component->card->snd_card,
@@ -305,6 +362,10 @@
 		kcontrol = &display_port_config_controls[1];
 		rc = snd_ctl_add(dai->component->card->snd_card,
 				 snd_ctl_new1(kcontrol, dai_data));
+
+		kcontrol = &display_port_config_controls[2];
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				snd_ctl_new1(kcontrol, dai));
 	} else {
 		dev_err(dai->dev, "%s: Invalid id:%d\n",
 			__func__, dai->driver->id);
@@ -370,8 +431,10 @@
 	.playback = {
 		.stream_name = "HDMI Playback",
 		.aif_name = "HDMI",
-		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
-		 SNDRV_PCM_RATE_192000,
+		.rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+			 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
+			 SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
+			 SNDRV_PCM_RATE_192000,
 		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
 		.channels_min = 2,
 		.channels_max = 8,
@@ -389,7 +452,9 @@
 		.playback = {
 			.stream_name = "Display Port Playback",
 			.aif_name = "DISPLAY_PORT",
-			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+			.rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+				 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
+				 SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
 				 SNDRV_PCM_RATE_192000,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
 				   SNDRV_PCM_FMTBIT_S24_LE,
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index 52c2296..0c46763 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -51,6 +51,11 @@
 	ENC_FMT_APTX_HD = ASM_MEDIA_FMT_APTX_HD,
 };
 
+enum {
+	SPKR_1,
+	SPKR_2,
+};
+
 static const struct afe_clk_set lpass_clk_set_default = {
 	AFE_API_VERSION_CLOCK_SET,
 	Q6AFE_LPASS_CLK_ID_PRI_PCM_IBIT,
@@ -175,6 +180,7 @@
 	u16 afe_in_bitformat;
 	struct afe_enc_config enc_config;
 	union afe_port_config port_config;
+	u16 vi_feed_mono;
 };
 
 struct msm_dai_q6_spdif_dai_data {
@@ -212,6 +218,7 @@
 	u32 rate;
 	u32 channels;
 	u32 bitwidth;
+	u32 num_group_ports;
 	struct afe_clk_set clk_set; /* hold LPASS clock config. */
 	union afe_port_group_config group_cfg; /* hold tdm group config */
 	struct afe_tdm_port_config port_cfg; /* hold tdm config */
@@ -230,8 +237,14 @@
 	"Compr-60958"
 };
 
+static const char *const mi2s_vi_feed_mono[] = {
+	"Left",
+	"Right",
+};
+
 static const struct soc_enum mi2s_config_enum[] = {
 	SOC_ENUM_SINGLE_EXT(4, mi2s_format),
+	SOC_ENUM_SINGLE_EXT(2, mi2s_vi_feed_mono),
 };
 
 static const char *const sb_format[] = {
@@ -247,6 +260,7 @@
 static const char *const tdm_data_format[] = {
 	"LPCM",
 	"Compr",
+	"Gen Compr"
 };
 
 static const char *const tdm_header_type[] = {
@@ -256,8 +270,8 @@
 };
 
 static const struct soc_enum tdm_config_enum[] = {
-	SOC_ENUM_SINGLE_EXT(2, tdm_data_format),
-	SOC_ENUM_SINGLE_EXT(3, tdm_header_type),
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tdm_data_format), tdm_data_format),
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tdm_header_type), tdm_header_type),
 };
 
 static DEFINE_MUTEX(tdm_mutex);
@@ -285,6 +299,8 @@
 	0xFF,
 };
 
+static u32 num_tdm_group_ports;
+
 static struct afe_clk_set tdm_clk_set = {
 	AFE_API_VERSION_CLOCK_SET,
 	Q6AFE_LPASS_CLK_ID_QUAD_TDM_EBIT,
@@ -1882,6 +1898,11 @@
 			pr_err("%s: rx slot not found\n", __func__);
 			return -EINVAL;
 		}
+		if (rx_num > AFE_PORT_MAX_AUDIO_CHAN_CNT) {
+			pr_err("%s: invalid rx num %d\n", __func__, rx_num);
+			return -EINVAL;
+		}
+
 		for (i = 0; i < rx_num; i++) {
 			dai_data->port_config.slim_sch.shared_ch_mapping[i] =
 			    rx_slot[i];
@@ -1914,6 +1935,11 @@
 			pr_err("%s: tx slot not found\n", __func__);
 			return -EINVAL;
 		}
+		if (tx_num > AFE_PORT_MAX_AUDIO_CHAN_CNT) {
+			pr_err("%s: invalid tx num %d\n", __func__, tx_num);
+			return -EINVAL;
+		}
+
 		for (i = 0; i < tx_num; i++) {
 			dai_data->port_config.slim_sch.shared_ch_mapping[i] =
 			    tx_slot[i];
@@ -2046,6 +2072,42 @@
 	return 0;
 }
 
+static int msm_dai_q6_usb_audio_endian_cfg_put(struct snd_kcontrol *kcontrol,
+					 struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+	u32 val = ucontrol->value.integer.value[0];
+
+	if (dai_data) {
+		dai_data->port_config.usb_audio.endian = val;
+		pr_debug("%s: endian = 0x%x\n",  __func__,
+				 dai_data->port_config.usb_audio.endian);
+	} else {
+		pr_err("%s: dai_data is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int msm_dai_q6_usb_audio_endian_cfg_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+
+	if (dai_data) {
+		ucontrol->value.integer.value[0] =
+			 dai_data->port_config.usb_audio.endian;
+		pr_debug("%s: endian = 0x%x\n",  __func__,
+				 dai_data->port_config.usb_audio.endian);
+	} else {
+		pr_err("%s: dai_data is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int  msm_dai_q6_afe_enc_cfg_info(struct snd_kcontrol *kcontrol,
 					struct snd_ctl_elem_info *uinfo)
 {
@@ -2293,9 +2355,15 @@
 	SOC_SINGLE_EXT("USB_AUDIO_RX dev_token", 0, 0, UINT_MAX, 0,
 			msm_dai_q6_usb_audio_cfg_get,
 			msm_dai_q6_usb_audio_cfg_put),
+	SOC_SINGLE_EXT("USB_AUDIO_RX endian", 0, 0, 1, 0,
+			msm_dai_q6_usb_audio_endian_cfg_get,
+			msm_dai_q6_usb_audio_endian_cfg_put),
 	SOC_SINGLE_EXT("USB_AUDIO_TX dev_token", 0, 0, UINT_MAX, 0,
 			msm_dai_q6_usb_audio_cfg_get,
 			msm_dai_q6_usb_audio_cfg_put),
+	SOC_SINGLE_EXT("USB_AUDIO_TX endian", 0, 0, 1, 0,
+			msm_dai_q6_usb_audio_endian_cfg_get,
+			msm_dai_q6_usb_audio_endian_cfg_put),
 };
 
 static int msm_dai_q6_dai_probe(struct snd_soc_dai *dai)
@@ -2360,10 +2428,16 @@
 		rc = snd_ctl_add(dai->component->card->snd_card,
 				 snd_ctl_new1(&usb_audio_cfg_controls[0],
 				 dai_data));
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 snd_ctl_new1(&usb_audio_cfg_controls[1],
+				 dai_data));
 		break;
 	case AFE_PORT_ID_USB_TX:
 		rc = snd_ctl_add(dai->component->card->snd_card,
-				 snd_ctl_new1(&usb_audio_cfg_controls[1],
+				 snd_ctl_new1(&usb_audio_cfg_controls[2],
+				 dai_data));
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				 snd_ctl_new1(&usb_audio_cfg_controls[3],
 				 dai_data));
 		break;
 	}
@@ -3308,6 +3382,26 @@
 	return 0;
 }
 
+static int msm_dai_q6_mi2s_vi_feed_mono_put(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+	int value = ucontrol->value.integer.value[0];
+
+	dai_data->vi_feed_mono = value;
+	pr_debug("%s: value = %d\n", __func__, value);
+	return 0;
+}
+
+static int msm_dai_q6_mi2s_vi_feed_mono_get(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+
+	ucontrol->value.integer.value[0] = dai_data->vi_feed_mono;
+	return 0;
+}
+
 static const struct snd_kcontrol_new mi2s_config_controls[] = {
 	SOC_ENUM_EXT("PRI MI2S RX Format", mi2s_config_enum[0],
 		     msm_dai_q6_mi2s_format_get,
@@ -3342,6 +3436,15 @@
 	SOC_ENUM_EXT("SENARY MI2S TX Format", mi2s_config_enum[0],
 		     msm_dai_q6_mi2s_format_get,
 		     msm_dai_q6_mi2s_format_put),
+	SOC_ENUM_EXT("INT5 MI2S TX Format", mi2s_config_enum[0],
+		     msm_dai_q6_mi2s_format_get,
+		     msm_dai_q6_mi2s_format_put),
+};
+
+static const struct snd_kcontrol_new mi2s_vi_feed_controls[] = {
+	SOC_ENUM_EXT("INT5 MI2S VI MONO", mi2s_config_enum[1],
+		     msm_dai_q6_mi2s_vi_feed_mono_get,
+		     msm_dai_q6_mi2s_vi_feed_mono_put),
 };
 
 static int msm_dai_q6_dai_mi2s_probe(struct snd_soc_dai *dai)
@@ -3353,6 +3456,7 @@
 	struct snd_kcontrol *kcontrol = NULL;
 	int rc = 0;
 	const struct snd_kcontrol_new *ctrl = NULL;
+	const struct snd_kcontrol_new *vi_feed_ctrl = NULL;
 
 	dai->id = mi2s_pdata->intf_id;
 
@@ -3394,6 +3498,8 @@
 			ctrl = &mi2s_config_controls[9];
 		if (dai->id == MSM_SENARY_MI2S)
 			ctrl = &mi2s_config_controls[10];
+		if (dai->id == MSM_INT5_MI2S)
+			ctrl = &mi2s_config_controls[11];
 	}
 
 	if (ctrl) {
@@ -3408,6 +3514,21 @@
 				__func__, dai->name);
 		}
 	}
+
+	if (dai->id == MSM_INT5_MI2S)
+		vi_feed_ctrl = &mi2s_vi_feed_controls[0];
+
+	if (vi_feed_ctrl) {
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				snd_ctl_new1(vi_feed_ctrl,
+				&mi2s_dai_data->tx_dai.mi2s_dai_data));
+
+		if (rc < 0) {
+			dev_err(dai->dev, "%s: err add TX vi feed channel ctl DAI = %s\n",
+				__func__, dai->name);
+		}
+	}
+
 	rc = msm_dai_q6_dai_add_route(dai);
 rtn:
 	return rc;
@@ -3654,8 +3775,12 @@
 		case AFE_PORT_I2S_QUAD01:
 		case AFE_PORT_I2S_6CHS:
 		case AFE_PORT_I2S_8CHS:
-			dai_data->port_config.i2s.channel_mode =
-						AFE_PORT_I2S_SD0;
+			if (dai_data->vi_feed_mono == SPKR_1)
+				dai_data->port_config.i2s.channel_mode =
+							AFE_PORT_I2S_SD0;
+			else
+				dai_data->port_config.i2s.channel_mode =
+							AFE_PORT_I2S_SD1;
 			break;
 		case AFE_PORT_I2S_QUAD23:
 			dai_data->port_config.i2s.channel_mode =
@@ -4812,7 +4937,6 @@
 static int msm_dai_tdm_q6_probe(struct platform_device *pdev)
 {
 	int rc = 0;
-	u32 num_ports = 0;
 	const uint32_t *port_id_array = NULL;
 	uint32_t array_length = 0;
 	int i = 0;
@@ -4835,18 +4959,19 @@
 
 	rc = of_property_read_u32(pdev->dev.of_node,
 		"qcom,msm-cpudai-tdm-group-num-ports",
-		&num_ports);
+		&num_tdm_group_ports);
 	if (rc) {
 		dev_err(&pdev->dev, "%s: Group Num Ports from DT file %s\n",
 			__func__, "qcom,msm-cpudai-tdm-group-num-ports");
 		goto rtn;
 	}
 	dev_dbg(&pdev->dev, "%s: Group Num Ports from DT file 0x%x\n",
-		__func__, num_ports);
+		__func__, num_tdm_group_ports);
 
-	if (num_ports > AFE_GROUP_DEVICE_NUM_PORTS) {
+	if (num_tdm_group_ports > AFE_GROUP_DEVICE_NUM_PORTS) {
 		dev_err(&pdev->dev, "%s Group Num Ports %d greater than Max %d\n",
-			__func__, num_ports, AFE_GROUP_DEVICE_NUM_PORTS);
+			__func__, num_tdm_group_ports,
+			AFE_GROUP_DEVICE_NUM_PORTS);
 		rc = -EINVAL;
 		goto rtn;
 	}
@@ -4860,18 +4985,19 @@
 		rc = -EINVAL;
 		goto rtn;
 	}
-	if (array_length != sizeof(uint32_t) * num_ports) {
+	if (array_length != sizeof(uint32_t) * num_tdm_group_ports) {
 		dev_err(&pdev->dev, "%s array_length is %d, expected is %zd\n",
-			__func__, array_length, sizeof(uint32_t) * num_ports);
+			__func__, array_length,
+			sizeof(uint32_t) * num_tdm_group_ports);
 		rc = -EINVAL;
 		goto rtn;
 	}
 
-	for (i = 0; i < num_ports; i++)
+	for (i = 0; i < num_tdm_group_ports; i++)
 		tdm_group_cfg.port_id[i] =
 			(u16)be32_to_cpu(port_id_array[i]);
 	/* Unused index should be filled with 0 or AFE_PORT_INVALID */
-	for (i = num_ports; i < AFE_GROUP_DEVICE_NUM_PORTS; i++)
+	for (i = num_tdm_group_ports; i < AFE_GROUP_DEVICE_NUM_PORTS; i++)
 		tdm_group_cfg.port_id[i] =
 			AFE_PORT_INVALID;
 
@@ -4938,7 +5064,20 @@
 	struct msm_dai_q6_tdm_dai_data *dai_data = kcontrol->private_data;
 	int value = ucontrol->value.integer.value[0];
 
-	dai_data->port_cfg.tdm.data_format = value;
+	switch (value) {
+	case 0:
+		dai_data->port_cfg.tdm.data_format = AFE_LINEAR_PCM_DATA;
+		break;
+	case 1:
+		dai_data->port_cfg.tdm.data_format = AFE_NON_LINEAR_DATA;
+		break;
+	case 2:
+		dai_data->port_cfg.tdm.data_format = AFE_GENERIC_COMPRESSED;
+		break;
+	default:
+		pr_err("%s: data_format invalid\n", __func__);
+		break;
+	}
 	pr_debug("%s: data_format = %d\n",
 		__func__, dai_data->port_cfg.tdm.data_format);
 	return 0;
@@ -5875,6 +6014,9 @@
 
 	/* HW only supports 16 and 8 slots configuration */
 	switch (slots) {
+	case 2:
+		cap_mask = 0x03;
+		break;
 	case 8:
 		cap_mask = 0xFF;
 		break;
@@ -6295,17 +6437,25 @@
 					__func__, dai->id);
 				goto rtn;
 			}
-			rc = afe_port_group_enable(group_id,
-				&dai_data->group_cfg, true);
-			if (rc < 0) {
-				dev_err(dai->dev, "%s: fail to enable AFE group 0x%x\n",
+
+			/*
+			 * if only one port, don't do group enable as there
+			 * is no group need for only one port
+			 */
+			if (dai_data->num_group_ports > 1) {
+				rc = afe_port_group_enable(group_id,
+					&dai_data->group_cfg, true);
+				if (rc < 0) {
+					dev_err(dai->dev,
+					"%s: fail to enable AFE group 0x%x\n",
 					__func__, group_id);
-				goto rtn;
+					goto rtn;
+				}
 			}
 		}
 
 		rc = afe_tdm_port_start(dai->id, &dai_data->port_cfg,
-			dai_data->rate);
+			dai_data->rate, dai_data->num_group_ports);
 		if (rc < 0) {
 			if (atomic_read(group_ref) == 0) {
 				afe_port_group_enable(group_id,
@@ -6399,13 +6549,15 @@
 			.stream_name = "Primary TDM0 Playback",
 			.aif_name = "PRI_TDM_RX_0",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_PRIMARY_TDM_RX,
@@ -6417,13 +6569,15 @@
 			.stream_name = "Primary TDM1 Playback",
 			.aif_name = "PRI_TDM_RX_1",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_PRIMARY_TDM_RX_1,
@@ -6435,13 +6589,15 @@
 			.stream_name = "Primary TDM2 Playback",
 			.aif_name = "PRI_TDM_RX_2",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_PRIMARY_TDM_RX_2,
@@ -6453,13 +6609,15 @@
 			.stream_name = "Primary TDM3 Playback",
 			.aif_name = "PRI_TDM_RX_3",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_PRIMARY_TDM_RX_3,
@@ -6471,13 +6629,15 @@
 			.stream_name = "Primary TDM4 Playback",
 			.aif_name = "PRI_TDM_RX_4",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_PRIMARY_TDM_RX_4,
@@ -6489,13 +6649,15 @@
 			.stream_name = "Primary TDM5 Playback",
 			.aif_name = "PRI_TDM_RX_5",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_PRIMARY_TDM_RX_5,
@@ -6507,13 +6669,15 @@
 			.stream_name = "Primary TDM6 Playback",
 			.aif_name = "PRI_TDM_RX_6",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_PRIMARY_TDM_RX_6,
@@ -6525,13 +6689,15 @@
 			.stream_name = "Primary TDM7 Playback",
 			.aif_name = "PRI_TDM_RX_7",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_PRIMARY_TDM_RX_7,
@@ -6543,13 +6709,15 @@
 			.stream_name = "Primary TDM0 Capture",
 			.aif_name = "PRI_TDM_TX_0",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_PRIMARY_TDM_TX,
@@ -6561,13 +6729,15 @@
 			.stream_name = "Primary TDM1 Capture",
 			.aif_name = "PRI_TDM_TX_1",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_PRIMARY_TDM_TX_1,
@@ -6579,13 +6749,15 @@
 			.stream_name = "Primary TDM2 Capture",
 			.aif_name = "PRI_TDM_TX_2",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_PRIMARY_TDM_TX_2,
@@ -6597,13 +6769,15 @@
 			.stream_name = "Primary TDM3 Capture",
 			.aif_name = "PRI_TDM_TX_3",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_PRIMARY_TDM_TX_3,
@@ -6615,13 +6789,15 @@
 			.stream_name = "Primary TDM4 Capture",
 			.aif_name = "PRI_TDM_TX_4",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_PRIMARY_TDM_TX_4,
@@ -6633,13 +6809,15 @@
 			.stream_name = "Primary TDM5 Capture",
 			.aif_name = "PRI_TDM_TX_5",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_PRIMARY_TDM_TX_5,
@@ -6651,13 +6829,15 @@
 			.stream_name = "Primary TDM6 Capture",
 			.aif_name = "PRI_TDM_TX_6",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_PRIMARY_TDM_TX_6,
@@ -6669,13 +6849,15 @@
 			.stream_name = "Primary TDM7 Capture",
 			.aif_name = "PRI_TDM_TX_7",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_PRIMARY_TDM_TX_7,
@@ -6687,13 +6869,15 @@
 			.stream_name = "Secondary TDM0 Playback",
 			.aif_name = "SEC_TDM_RX_0",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_SECONDARY_TDM_RX,
@@ -6705,13 +6889,15 @@
 			.stream_name = "Secondary TDM1 Playback",
 			.aif_name = "SEC_TDM_RX_1",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_SECONDARY_TDM_RX_1,
@@ -6723,13 +6909,15 @@
 			.stream_name = "Secondary TDM2 Playback",
 			.aif_name = "SEC_TDM_RX_2",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_SECONDARY_TDM_RX_2,
@@ -6741,13 +6929,15 @@
 			.stream_name = "Secondary TDM3 Playback",
 			.aif_name = "SEC_TDM_RX_3",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_SECONDARY_TDM_RX_3,
@@ -6759,13 +6949,15 @@
 			.stream_name = "Secondary TDM4 Playback",
 			.aif_name = "SEC_TDM_RX_4",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_SECONDARY_TDM_RX_4,
@@ -6777,13 +6969,15 @@
 			.stream_name = "Secondary TDM5 Playback",
 			.aif_name = "SEC_TDM_RX_5",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_SECONDARY_TDM_RX_5,
@@ -6795,13 +6989,15 @@
 			.stream_name = "Secondary TDM6 Playback",
 			.aif_name = "SEC_TDM_RX_6",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_SECONDARY_TDM_RX_6,
@@ -6813,13 +7009,15 @@
 			.stream_name = "Secondary TDM7 Playback",
 			.aif_name = "SEC_TDM_RX_7",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_SECONDARY_TDM_RX_7,
@@ -6831,13 +7029,15 @@
 			.stream_name = "Secondary TDM0 Capture",
 			.aif_name = "SEC_TDM_TX_0",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_SECONDARY_TDM_TX,
@@ -6849,13 +7049,15 @@
 			.stream_name = "Secondary TDM1 Capture",
 			.aif_name = "SEC_TDM_TX_1",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_SECONDARY_TDM_TX_1,
@@ -6867,13 +7069,15 @@
 			.stream_name = "Secondary TDM2 Capture",
 			.aif_name = "SEC_TDM_TX_2",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_SECONDARY_TDM_TX_2,
@@ -6885,13 +7089,15 @@
 			.stream_name = "Secondary TDM3 Capture",
 			.aif_name = "SEC_TDM_TX_3",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_SECONDARY_TDM_TX_3,
@@ -6903,13 +7109,15 @@
 			.stream_name = "Secondary TDM4 Capture",
 			.aif_name = "SEC_TDM_TX_4",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_SECONDARY_TDM_TX_4,
@@ -6921,13 +7129,15 @@
 			.stream_name = "Secondary TDM5 Capture",
 			.aif_name = "SEC_TDM_TX_5",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_SECONDARY_TDM_TX_5,
@@ -6939,13 +7149,15 @@
 			.stream_name = "Secondary TDM6 Capture",
 			.aif_name = "SEC_TDM_TX_6",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_SECONDARY_TDM_TX_6,
@@ -6957,13 +7169,15 @@
 			.stream_name = "Secondary TDM7 Capture",
 			.aif_name = "SEC_TDM_TX_7",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_SECONDARY_TDM_TX_7,
@@ -6975,13 +7189,15 @@
 			.stream_name = "Tertiary TDM0 Playback",
 			.aif_name = "TERT_TDM_RX_0",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_TERTIARY_TDM_RX,
@@ -6993,13 +7209,15 @@
 			.stream_name = "Tertiary TDM1 Playback",
 			.aif_name = "TERT_TDM_RX_1",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_TERTIARY_TDM_RX_1,
@@ -7011,13 +7229,15 @@
 			.stream_name = "Tertiary TDM2 Playback",
 			.aif_name = "TERT_TDM_RX_2",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_TERTIARY_TDM_RX_2,
@@ -7029,13 +7249,15 @@
 			.stream_name = "Tertiary TDM3 Playback",
 			.aif_name = "TERT_TDM_RX_3",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_TERTIARY_TDM_RX_3,
@@ -7047,13 +7269,15 @@
 			.stream_name = "Tertiary TDM4 Playback",
 			.aif_name = "TERT_TDM_RX_4",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_TERTIARY_TDM_RX_4,
@@ -7065,13 +7289,15 @@
 			.stream_name = "Tertiary TDM5 Playback",
 			.aif_name = "TERT_TDM_RX_5",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_TERTIARY_TDM_RX_5,
@@ -7083,13 +7309,15 @@
 			.stream_name = "Tertiary TDM6 Playback",
 			.aif_name = "TERT_TDM_RX_6",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_TERTIARY_TDM_RX_6,
@@ -7101,13 +7329,15 @@
 			.stream_name = "Tertiary TDM7 Playback",
 			.aif_name = "TERT_TDM_RX_7",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_TERTIARY_TDM_RX_7,
@@ -7119,13 +7349,15 @@
 			.stream_name = "Tertiary TDM0 Capture",
 			.aif_name = "TERT_TDM_TX_0",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_TERTIARY_TDM_TX,
@@ -7137,13 +7369,15 @@
 			.stream_name = "Tertiary TDM1 Capture",
 			.aif_name = "TERT_TDM_TX_1",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_TERTIARY_TDM_TX_1,
@@ -7155,13 +7389,15 @@
 			.stream_name = "Tertiary TDM2 Capture",
 			.aif_name = "TERT_TDM_TX_2",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_TERTIARY_TDM_TX_2,
@@ -7173,13 +7409,15 @@
 			.stream_name = "Tertiary TDM3 Capture",
 			.aif_name = "TERT_TDM_TX_3",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_TERTIARY_TDM_TX_3,
@@ -7191,13 +7429,15 @@
 			.stream_name = "Tertiary TDM4 Capture",
 			.aif_name = "TERT_TDM_TX_4",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_TERTIARY_TDM_TX_4,
@@ -7209,13 +7449,15 @@
 			.stream_name = "Tertiary TDM5 Capture",
 			.aif_name = "TERT_TDM_TX_5",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_TERTIARY_TDM_TX_5,
@@ -7227,13 +7469,15 @@
 			.stream_name = "Tertiary TDM6 Capture",
 			.aif_name = "TERT_TDM_TX_6",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_TERTIARY_TDM_TX_6,
@@ -7245,13 +7489,15 @@
 			.stream_name = "Tertiary TDM7 Capture",
 			.aif_name = "TERT_TDM_TX_7",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_TERTIARY_TDM_TX_7,
@@ -7263,13 +7509,15 @@
 			.stream_name = "Quaternary TDM0 Playback",
 			.aif_name = "QUAT_TDM_RX_0",
 			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
-				SNDRV_PCM_RATE_16000,
+				SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_QUATERNARY_TDM_RX,
@@ -7281,13 +7529,15 @@
 			.stream_name = "Quaternary TDM1 Playback",
 			.aif_name = "QUAT_TDM_RX_1",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_QUATERNARY_TDM_RX_1,
@@ -7299,13 +7549,15 @@
 			.stream_name = "Quaternary TDM2 Playback",
 			.aif_name = "QUAT_TDM_RX_2",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_QUATERNARY_TDM_RX_2,
@@ -7317,13 +7569,15 @@
 			.stream_name = "Quaternary TDM3 Playback",
 			.aif_name = "QUAT_TDM_RX_3",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_QUATERNARY_TDM_RX_3,
@@ -7335,13 +7589,15 @@
 			.stream_name = "Quaternary TDM4 Playback",
 			.aif_name = "QUAT_TDM_RX_4",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_QUATERNARY_TDM_RX_4,
@@ -7353,13 +7609,15 @@
 			.stream_name = "Quaternary TDM5 Playback",
 			.aif_name = "QUAT_TDM_RX_5",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_QUATERNARY_TDM_RX_5,
@@ -7371,13 +7629,15 @@
 			.stream_name = "Quaternary TDM6 Playback",
 			.aif_name = "QUAT_TDM_RX_6",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_QUATERNARY_TDM_RX_6,
@@ -7389,13 +7649,15 @@
 			.stream_name = "Quaternary TDM7 Playback",
 			.aif_name = "QUAT_TDM_RX_7",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_QUATERNARY_TDM_RX_7,
@@ -7407,13 +7669,15 @@
 			.stream_name = "Quaternary TDM0 Capture",
 			.aif_name = "QUAT_TDM_TX_0",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_QUATERNARY_TDM_TX,
@@ -7425,13 +7689,15 @@
 			.stream_name = "Quaternary TDM1 Capture",
 			.aif_name = "QUAT_TDM_TX_1",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_QUATERNARY_TDM_TX_1,
@@ -7443,13 +7709,15 @@
 			.stream_name = "Quaternary TDM2 Capture",
 			.aif_name = "QUAT_TDM_TX_2",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_QUATERNARY_TDM_TX_2,
@@ -7461,13 +7729,15 @@
 			.stream_name = "Quaternary TDM3 Capture",
 			.aif_name = "QUAT_TDM_TX_3",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_QUATERNARY_TDM_TX_3,
@@ -7479,13 +7749,15 @@
 			.stream_name = "Quaternary TDM4 Capture",
 			.aif_name = "QUAT_TDM_TX_4",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_QUATERNARY_TDM_TX_4,
@@ -7497,13 +7769,15 @@
 			.stream_name = "Quaternary TDM5 Capture",
 			.aif_name = "QUAT_TDM_TX_5",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_QUATERNARY_TDM_TX_5,
@@ -7515,13 +7789,15 @@
 			.stream_name = "Quaternary TDM6 Capture",
 			.aif_name = "QUAT_TDM_TX_6",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_QUATERNARY_TDM_TX_6,
@@ -7533,13 +7809,15 @@
 			.stream_name = "Quaternary TDM7 Capture",
 			.aif_name = "QUAT_TDM_TX_7",
 			.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
-				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+				SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min = 8000,
-			.rate_max = 48000,
+			.rate_max = 352800,
 		},
 		.ops = &msm_dai_q6_tdm_ops,
 		.id = AFE_PORT_ID_QUATERNARY_TDM_TX_7,
@@ -7732,6 +8010,9 @@
 	dai_data->clk_set = tdm_clk_set;
 	/* copy static group cfg per parent node */
 	dai_data->group_cfg.tdm_cfg = tdm_group_cfg;
+	/* copy static num group ports per parent node */
+	dai_data->num_group_ports = num_tdm_group_ports;
+
 
 	dev_set_drvdata(&pdev->dev, dai_data);
 
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-slim.c b/sound/soc/msm/qdsp6v2/msm-dai-slim.c
index 77fb8d4..779a2e6 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-slim.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-slim.c
@@ -313,7 +313,7 @@
 	struct msm_slim_dai_data *dai_data = NULL;
 	struct slim_ch prop;
 	int rc;
-	u8 i, j;
+	u8 i;
 
 	dai_data = msm_slim_get_dai_data(drv_data, dai);
 	if (!dai_data) {
@@ -330,6 +330,13 @@
 		return -EINVAL;
 	}
 
+	if (dai_data->status & DAI_STATE_PREPARED) {
+		dev_dbg(dai->dev,
+			"%s: dai id (%d) has already prepared.\n",
+			__func__, dai->id);
+		return 0;
+	}
+
 	dma_data = &dai_data->dma_data;
 	snd_soc_dai_set_dma_data(dai, substream, dma_data);
 
@@ -343,6 +350,10 @@
 		}
 	}
 
+	/* To decrement the channel ref count*/
+	for (i = 0; i < dai_data->ch_cnt; i++)
+		slim_dealloc_ch(drv_data->sdev, dai_data->chan_h[i]);
+
 	prop.prot = SLIM_AUTO_ISO;
 	prop.baser = SLIM_RATE_4000HZ;
 	prop.dataf = SLIM_CH_DATAF_NOT_DEFINED;
@@ -366,8 +377,6 @@
 
 error_define_chan:
 error_chan_query:
-	for (j = 0; j < i; j++)
-		slim_dealloc_ch(drv_data->sdev, dai_data->chan_h[j]);
 	return rc;
 }
 
@@ -377,7 +386,6 @@
 	struct msm_dai_slim_drv_data *drv_data = dev_get_drvdata(dai->dev);
 	struct msm_slim_dma_data *dma_data = NULL;
 	struct msm_slim_dai_data *dai_data;
-	int i, rc = 0;
 
 	dai_data = msm_slim_get_dai_data(drv_data, dai);
 	dma_data = snd_soc_dai_get_dma_data(dai, stream);
@@ -396,15 +404,6 @@
 		return;
 	}
 
-	for (i = 0; i < dai_data->ch_cnt; i++) {
-		rc = slim_dealloc_ch(drv_data->sdev, dai_data->chan_h[i]);
-		if (rc) {
-			dev_err(dai->dev,
-				"%s: dealloc_ch failed, err = %d\n",
-				__func__, rc);
-		}
-	}
-
 	snd_soc_dai_set_dma_data(dai, stream, NULL);
 	/* clear prepared state for the dai */
 	CLR_DAI_STATE(dai_data->status, DAI_STATE_PREPARED);
diff --git a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
deleted file mode 100644
index 29a1b3d..0000000
--- a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
+++ /dev/null
@@ -1,1071 +0,0 @@
-/* Copyright (c) 2013-2014, 2017 The Linux Foundation. All rights reserved.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/bitops.h>
-#include <sound/control.h>
-#include <sound/q6adm-v2.h>
-#include <sound/q6core.h>
-
-#include "msm-dolby-dap-config.h"
-
-/* dolby endp based parameters */
-struct dolby_dap_endp_params_s {
-	int device;
-	int device_ch_caps;
-	int dap_device;
-	int params_id[DOLBY_NUM_ENDP_DEPENDENT_PARAMS];
-	int params_len[DOLBY_NUM_ENDP_DEPENDENT_PARAMS];
-	int params_offset[DOLBY_NUM_ENDP_DEPENDENT_PARAMS];
-	int params_val[DOLBY_ENDDEP_PARAM_LENGTH];
-};
-
-const struct dolby_dap_endp_params_s
-			dolby_dap_endp_params[NUM_DOLBY_ENDP_DEVICE] = {
-	{EARPIECE, 2, DOLBY_ENDP_EXT_SPEAKERS,
-		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-		{-320, -320, 144}
-	},
-	{SPEAKER, 2, DOLBY_ENDP_INT_SPEAKERS,
-		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-		{-320, -320, 144}
-	},
-	{WIRED_HEADSET,	2, DOLBY_ENDP_HEADPHONES,
-		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-		{-320, -320, 144}
-	},
-	{WIRED_HEADPHONE, 2, DOLBY_ENDP_HEADPHONES,
-		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-		{-320, -320, 144}
-	},
-	{BLUETOOTH_SCO,	2, DOLBY_ENDP_EXT_SPEAKERS,
-		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-		{-320, -320, 144}
-	},
-	{BLUETOOTH_SCO_HEADSET,	2, DOLBY_ENDP_EXT_SPEAKERS,
-		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-		{-320, -320, 144}
-	},
-	{BLUETOOTH_SCO_CARKIT, 2, DOLBY_ENDP_EXT_SPEAKERS,
-		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-		{-320, -320, 144}
-	},
-	{BLUETOOTH_A2DP, 2, DOLBY_ENDP_EXT_SPEAKERS,
-		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-		{-320, -320, 144}
-	},
-	{BLUETOOTH_A2DP_HEADPHONES, 2, DOLBY_ENDP_HEADPHONES,
-		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-		{-320, -320, 144}
-	},
-	{BLUETOOTH_A2DP_SPEAKER, 2, DOLBY_ENDP_EXT_SPEAKERS,
-		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-		{-320, -320, 144}
-	},
-	{AUX_DIGITAL, 2, DOLBY_ENDP_HDMI,
-		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-		{-496, -496, 0}
-	},
-	{AUX_DIGITAL, 6, DOLBY_ENDP_HDMI,
-		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-		{-496, -496, 0}
-	},
-	{AUX_DIGITAL, 8, DOLBY_ENDP_HDMI,
-		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-		{-496, -496, 0}
-	},
-	{ANLG_DOCK_HEADSET, 2, DOLBY_ENDP_HEADPHONES,
-		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-		{-320, -320, 144}
-	},
-	{DGTL_DOCK_HEADSET, 2, DOLBY_ENDP_HEADPHONES,
-		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-		{-320, -320, 144}
-	},
-	{USB_ACCESSORY,	2, DOLBY_ENDP_EXT_SPEAKERS,
-		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-		{-320, -320, 144}
-	},
-	{USB_DEVICE, 2, DOLBY_ENDP_EXT_SPEAKERS,
-		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-		{-320, -320, 144}
-	},
-	{REMOTE_SUBMIX,	2, DOLBY_ENDP_EXT_SPEAKERS,
-		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-		{-320, -320, 144}
-	},
-	{PROXY,	2, DOLBY_ENDP_EXT_SPEAKERS,
-		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-		{-320, -320, 144}
-	},
-	{PROXY,	6, DOLBY_ENDP_EXT_SPEAKERS,
-		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-		{-320, -320, 144}
-	},
-	{FM, 2, DOLBY_ENDP_EXT_SPEAKERS,
-		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-		{-320, -320, 144}
-	},
-	{FM_TX,	2, DOLBY_ENDP_EXT_SPEAKERS,
-		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-		{-320, -320, 144}
-	},
-};
-
-/* dolby param ids to/from dsp */
-static uint32_t	dolby_dap_params_id[ALL_DOLBY_PARAMS] = {
-	DOLBY_PARAM_ID_VDHE, DOLBY_PARAM_ID_VSPE, DOLBY_PARAM_ID_DSSF,
-	DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLE,
-	DOLBY_PARAM_ID_DVMC, DOLBY_PARAM_ID_DVME, DOLBY_PARAM_ID_IENB,
-	DOLBY_PARAM_ID_IEBF, DOLBY_PARAM_ID_IEON, DOLBY_PARAM_ID_DEON,
-	DOLBY_PARAM_ID_NGON, DOLBY_PARAM_ID_GEON, DOLBY_PARAM_ID_GENB,
-	DOLBY_PARAM_ID_GEBF, DOLBY_PARAM_ID_AONB, DOLBY_PARAM_ID_AOBF,
-	DOLBY_PARAM_ID_AOBG, DOLBY_PARAM_ID_AOON, DOLBY_PARAM_ID_ARNB,
-	DOLBY_PARAM_ID_ARBF, DOLBY_PARAM_ID_PLB,  DOLBY_PARAM_ID_PLMD,
-	DOLBY_PARAM_ID_DHSB, DOLBY_PARAM_ID_DHRG, DOLBY_PARAM_ID_DSSB,
-	DOLBY_PARAM_ID_DSSA, DOLBY_PARAM_ID_DVLA, DOLBY_PARAM_ID_IEBT,
-	DOLBY_PARAM_ID_IEA,  DOLBY_PARAM_ID_DEA,  DOLBY_PARAM_ID_DED,
-	DOLBY_PARAM_ID_GEBG, DOLBY_PARAM_ID_AOCC, DOLBY_PARAM_ID_ARBI,
-	DOLBY_PARAM_ID_ARBL, DOLBY_PARAM_ID_ARBH, DOLBY_PARAM_ID_AROD,
-	DOLBY_PARAM_ID_ARTP, DOLBY_PARAM_ID_VMON, DOLBY_PARAM_ID_VMB,
-	DOLBY_PARAM_ID_VCNB, DOLBY_PARAM_ID_VCBF, DOLBY_PARAM_ID_PREG,
-	DOLBY_PARAM_ID_VEN,  DOLBY_PARAM_ID_PSTG, DOLBY_COMMIT_ALL_TO_DSP,
-	DOLBY_COMMIT_TO_DSP, DOLBY_USE_CACHE, DOLBY_AUTO_ENDP,
-	DOLBY_AUTO_ENDDEP_PARAMS
-};
-
-/* modifed state:	0x00000000 - Not updated
- *			> 0x00000000 && < 0x00010000
- *				Updated and not committed to DSP
- *			0x00010001 - Updated and committed to DSP
- *			> 0x00010001 - Modified the committed value
- */
-static int dolby_dap_params_modified[MAX_DOLBY_PARAMS] = { 0 };
-/* param offset */
-static uint32_t	dolby_dap_params_offset[MAX_DOLBY_PARAMS] = {
-	DOLBY_PARAM_VDHE_OFFSET, DOLBY_PARAM_VSPE_OFFSET,
-	DOLBY_PARAM_DSSF_OFFSET, DOLBY_PARAM_DVLI_OFFSET,
-	DOLBY_PARAM_DVLO_OFFSET, DOLBY_PARAM_DVLE_OFFSET,
-	DOLBY_PARAM_DVMC_OFFSET, DOLBY_PARAM_DVME_OFFSET,
-	DOLBY_PARAM_IENB_OFFSET, DOLBY_PARAM_IEBF_OFFSET,
-	DOLBY_PARAM_IEON_OFFSET, DOLBY_PARAM_DEON_OFFSET,
-	DOLBY_PARAM_NGON_OFFSET, DOLBY_PARAM_GEON_OFFSET,
-	DOLBY_PARAM_GENB_OFFSET, DOLBY_PARAM_GEBF_OFFSET,
-	DOLBY_PARAM_AONB_OFFSET, DOLBY_PARAM_AOBF_OFFSET,
-	DOLBY_PARAM_AOBG_OFFSET, DOLBY_PARAM_AOON_OFFSET,
-	DOLBY_PARAM_ARNB_OFFSET, DOLBY_PARAM_ARBF_OFFSET,
-	DOLBY_PARAM_PLB_OFFSET,  DOLBY_PARAM_PLMD_OFFSET,
-	DOLBY_PARAM_DHSB_OFFSET, DOLBY_PARAM_DHRG_OFFSET,
-	DOLBY_PARAM_DSSB_OFFSET, DOLBY_PARAM_DSSA_OFFSET,
-	DOLBY_PARAM_DVLA_OFFSET, DOLBY_PARAM_IEBT_OFFSET,
-	DOLBY_PARAM_IEA_OFFSET,  DOLBY_PARAM_DEA_OFFSET,
-	DOLBY_PARAM_DED_OFFSET,  DOLBY_PARAM_GEBG_OFFSET,
-	DOLBY_PARAM_AOCC_OFFSET, DOLBY_PARAM_ARBI_OFFSET,
-	DOLBY_PARAM_ARBL_OFFSET, DOLBY_PARAM_ARBH_OFFSET,
-	DOLBY_PARAM_AROD_OFFSET, DOLBY_PARAM_ARTP_OFFSET,
-	DOLBY_PARAM_VMON_OFFSET, DOLBY_PARAM_VMB_OFFSET,
-	DOLBY_PARAM_VCNB_OFFSET, DOLBY_PARAM_VCBF_OFFSET,
-	DOLBY_PARAM_PREG_OFFSET, DOLBY_PARAM_VEN_OFFSET,
-	DOLBY_PARAM_PSTG_OFFSET
-};
-/* param_length */
-static uint32_t	dolby_dap_params_length[MAX_DOLBY_PARAMS] = {
-	DOLBY_PARAM_VDHE_LENGTH, DOLBY_PARAM_VSPE_LENGTH,
-	DOLBY_PARAM_DSSF_LENGTH, DOLBY_PARAM_DVLI_LENGTH,
-	DOLBY_PARAM_DVLO_LENGTH, DOLBY_PARAM_DVLE_LENGTH,
-	DOLBY_PARAM_DVMC_LENGTH, DOLBY_PARAM_DVME_LENGTH,
-	DOLBY_PARAM_IENB_LENGTH, DOLBY_PARAM_IEBF_LENGTH,
-	DOLBY_PARAM_IEON_LENGTH, DOLBY_PARAM_DEON_LENGTH,
-	DOLBY_PARAM_NGON_LENGTH, DOLBY_PARAM_GEON_LENGTH,
-	DOLBY_PARAM_GENB_LENGTH, DOLBY_PARAM_GEBF_LENGTH,
-	DOLBY_PARAM_AONB_LENGTH, DOLBY_PARAM_AOBF_LENGTH,
-	DOLBY_PARAM_AOBG_LENGTH, DOLBY_PARAM_AOON_LENGTH,
-	DOLBY_PARAM_ARNB_LENGTH, DOLBY_PARAM_ARBF_LENGTH,
-	DOLBY_PARAM_PLB_LENGTH,  DOLBY_PARAM_PLMD_LENGTH,
-	DOLBY_PARAM_DHSB_LENGTH, DOLBY_PARAM_DHRG_LENGTH,
-	DOLBY_PARAM_DSSB_LENGTH, DOLBY_PARAM_DSSA_LENGTH,
-	DOLBY_PARAM_DVLA_LENGTH, DOLBY_PARAM_IEBT_LENGTH,
-	DOLBY_PARAM_IEA_LENGTH,  DOLBY_PARAM_DEA_LENGTH,
-	DOLBY_PARAM_DED_LENGTH,  DOLBY_PARAM_GEBG_LENGTH,
-	DOLBY_PARAM_AOCC_LENGTH, DOLBY_PARAM_ARBI_LENGTH,
-	DOLBY_PARAM_ARBL_LENGTH, DOLBY_PARAM_ARBH_LENGTH,
-	DOLBY_PARAM_AROD_LENGTH, DOLBY_PARAM_ARTP_LENGTH,
-	DOLBY_PARAM_VMON_LENGTH, DOLBY_PARAM_VMB_LENGTH,
-	DOLBY_PARAM_VCNB_LENGTH, DOLBY_PARAM_VCBF_LENGTH,
-	DOLBY_PARAM_PREG_LENGTH, DOLBY_PARAM_VEN_LENGTH,
-	DOLBY_PARAM_PSTG_LENGTH
-};
-
-/* param_value */
-static uint32_t	dolby_dap_params_value[TOTAL_LENGTH_DOLBY_PARAM] = {0};
-
-struct dolby_dap_params_get_s {
-	int32_t  port_id;
-	uint32_t device_id;
-	uint32_t param_id;
-	uint32_t offset;
-	uint32_t length;
-};
-
-struct dolby_dap_params_states_s {
-	bool use_cache;
-	bool auto_endp;
-	bool enddep_params;
-	int  port_id[AFE_MAX_PORTS];
-	int  copp_idx[AFE_MAX_PORTS];
-	int  port_open_count;
-	int  port_ids_dolby_can_be_enabled;
-	int  device;
-};
-
-static struct dolby_dap_params_get_s dolby_dap_params_get = {-1, DEVICE_OUT_ALL,
-							     0, 0, 0};
-static struct dolby_dap_params_states_s dolby_dap_params_states = { true, true,
-						true, {DOLBY_INVALID_PORT_ID},
-						{-1}, 0, DEVICE_OUT_ALL, 0 };
-/*
- * port_ids_dolby_can_be_enabled is set to 0x7FFFFFFF.
- * this needs to be removed after interface validation
- */
-
-static int msm_dolby_dap_map_device_to_dolby_endpoint(int device)
-{
-	int i, dolby_dap_device = DOLBY_ENDP_EXT_SPEAKERS;
-
-	for (i = 0; i < NUM_DOLBY_ENDP_DEVICE; i++) {
-		if (dolby_dap_endp_params[i].device == device) {
-			dolby_dap_device = dolby_dap_endp_params[i].dap_device;
-			break;
-		}
-	}
-	/* default the endpoint to speaker if corresponding device entry */
-	/* not found */
-	if (i >= NUM_DOLBY_ENDP_DEVICE)
-		dolby_dap_params_states.device = SPEAKER;
-	return dolby_dap_device;
-}
-
-static int msm_dolby_dap_send_end_point(int port_id, int copp_idx)
-{
-	int rc = 0;
-	char *params_value;
-	int *update_params_value;
-	uint32_t params_length = (DOLBY_PARAM_INT_ENDP_LENGTH +
-				DOLBY_PARAM_PAYLOAD_SIZE) * sizeof(uint32_t);
-
-	pr_debug("%s\n", __func__);
-	params_value = kzalloc(params_length, GFP_KERNEL);
-	if (!params_value) {
-		pr_err("%s, params memory alloc failed", __func__);
-		return -ENOMEM;
-	}
-	update_params_value = (int *)params_value;
-	*update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
-	*update_params_value++ = DOLBY_PARAM_ID_INIT_ENDP;
-	*update_params_value++ = DOLBY_PARAM_INT_ENDP_LENGTH * sizeof(uint32_t);
-	*update_params_value++ =
-		 msm_dolby_dap_map_device_to_dolby_endpoint(
-						dolby_dap_params_states.device);
-	rc = adm_dolby_dap_send_params(port_id, copp_idx, params_value,
-				       params_length);
-	if (rc) {
-		pr_err("%s: send dolby params failed\n", __func__);
-		rc = -EINVAL;
-	}
-	kfree(params_value);
-	return rc;
-}
-
-static int msm_dolby_dap_send_enddep_params(int port_id, int copp_idx,
-					    int device_channels)
-{
-	int i, j, rc = 0, idx, offset;
-	char *params_value;
-	int *update_params_value;
-	uint32_t params_length = (DOLBY_ENDDEP_PARAM_LENGTH +
-					DOLBY_NUM_ENDP_DEPENDENT_PARAMS *
-					DOLBY_PARAM_PAYLOAD_SIZE) *
-				sizeof(uint32_t);
-
-	pr_debug("%s\n", __func__);
-	params_value = kzalloc(params_length, GFP_KERNEL);
-	if (!params_value) {
-		pr_err("%s, params memory alloc failed", __func__);
-		return -ENOMEM;
-	}
-	update_params_value = (int *)params_value;
-	for (idx = 0; idx < NUM_DOLBY_ENDP_DEVICE; idx++) {
-		if (dolby_dap_endp_params[idx].device ==
-			dolby_dap_params_states.device) {
-			if (dolby_dap_params_states.device == AUX_DIGITAL ||
-			    dolby_dap_params_states.device == PROXY) {
-				if (dolby_dap_endp_params[idx].device_ch_caps ==
-					device_channels)
-					break;
-			} else {
-				break;
-			}
-		}
-	}
-	if (idx >= NUM_DOLBY_ENDP_DEVICE) {
-		pr_err("%s: device is not set accordingly\n", __func__);
-		kfree(params_value);
-		return -EINVAL;
-	}
-	for (i = 0; i < DOLBY_ENDDEP_PARAM_LENGTH; i++) {
-		*update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
-		*update_params_value++ =
-				dolby_dap_endp_params[idx].params_id[i];
-		*update_params_value++ =
-			dolby_dap_endp_params[idx].params_len[i] *
-				sizeof(uint32_t);
-		offset = dolby_dap_endp_params[idx].params_offset[i];
-		for (j = 0; j < dolby_dap_endp_params[idx].params_len[i]; j++)
-			*update_params_value++ =
-				dolby_dap_endp_params[idx].params_val[offset+j];
-	}
-	rc = adm_dolby_dap_send_params(port_id, copp_idx, params_value,
-				       params_length);
-	if (rc) {
-		pr_err("%s: send dolby params failed\n", __func__);
-		rc = -EINVAL;
-	}
-	kfree(params_value);
-	return rc;
-}
-
-static int msm_dolby_dap_send_cached_params(int port_id, int copp_idx,
-					    int commit)
-{
-	char *params_value;
-	int *update_params_value, rc = 0;
-	uint32_t index_offset, i, j;
-	uint32_t params_length = (TOTAL_LENGTH_DOLBY_PARAM +
-				MAX_DOLBY_PARAMS * DOLBY_PARAM_PAYLOAD_SIZE) *
-				sizeof(uint32_t);
-
-	params_value = kzalloc(params_length, GFP_KERNEL);
-	if (!params_value)
-		return -ENOMEM;
-
-	update_params_value = (int *)params_value;
-	params_length = 0;
-	for (i = 0; i < MAX_DOLBY_PARAMS; i++) {
-		if ((dolby_dap_params_modified[i] == 0) ||
-		    ((commit) &&
-		     ((dolby_dap_params_modified[i] & 0x00010000) &&
-		     ((dolby_dap_params_modified[i] & 0x0000FFFF) <= 1))))
-			continue;
-		*update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
-		*update_params_value++ = dolby_dap_params_id[i];
-		*update_params_value++ = dolby_dap_params_length[i] *
-						sizeof(uint32_t);
-		index_offset = dolby_dap_params_offset[i];
-		for (j = 0; j < dolby_dap_params_length[i]; j++) {
-			*update_params_value++ =
-					dolby_dap_params_value[index_offset+j];
-		}
-		params_length += (DOLBY_PARAM_PAYLOAD_SIZE +
-				dolby_dap_params_length[i]) * sizeof(uint32_t);
-	}
-	pr_debug("%s, valid param length: %d", __func__, params_length);
-	if (params_length) {
-		rc = adm_dolby_dap_send_params(port_id, copp_idx, params_value,
-						params_length);
-		if (rc) {
-			pr_err("%s: send dolby params failed\n", __func__);
-			kfree(params_value);
-			return -EINVAL;
-		}
-		for (i = 0; i < MAX_DOLBY_PARAMS; i++) {
-			if ((dolby_dap_params_modified[i] == 0) ||
-			    ((commit) &&
-			     ((dolby_dap_params_modified[i] & 0x00010000) &&
-			     ((dolby_dap_params_modified[i] & 0x0000FFFF) <= 1))
-			    ))
-				continue;
-			dolby_dap_params_modified[i] = 0x00010001;
-		}
-	}
-	kfree(params_value);
-	return 0;
-}
-
-int msm_dolby_dap_init(int port_id, int copp_idx, int channels,
-		       bool is_custom_stereo_on)
-{
-	int ret = 0;
-	int index = adm_validate_and_get_port_index(port_id);
-
-	if (index < 0) {
-		pr_err("%s: Invalid port idx %d port_id %#x\n", __func__, index,
-			port_id);
-		return -EINVAL;
-	}
-	if ((port_id != DOLBY_INVALID_PORT_ID) &&
-	    (port_id & dolby_dap_params_states.port_ids_dolby_can_be_enabled)) {
-		dolby_dap_params_states.port_id[index] = port_id;
-		dolby_dap_params_states.copp_idx[index] = copp_idx;
-		dolby_dap_params_states.port_open_count++;
-		if (dolby_dap_params_states.auto_endp) {
-			ret = msm_dolby_dap_send_end_point(port_id, copp_idx);
-			if (ret) {
-				pr_err("%s: err sending endppoint\n", __func__);
-				return ret;
-			}
-		}
-		if (dolby_dap_params_states.use_cache) {
-			ret = msm_dolby_dap_send_cached_params(port_id,
-							       copp_idx, 0);
-			if (ret) {
-				pr_err("%s: err sending cached params\n",
-					__func__);
-				return ret;
-			}
-		}
-		if (dolby_dap_params_states.enddep_params) {
-			msm_dolby_dap_send_enddep_params(port_id, copp_idx,
-							 channels);
-			if (ret) {
-				pr_err("%s: err sending endp dependent params\n",
-					__func__);
-				return ret;
-			}
-		}
-		if (is_custom_stereo_on)
-			dolby_dap_set_custom_stereo_onoff(port_id, copp_idx,
-							  is_custom_stereo_on);
-	}
-	return ret;
-}
-
-void msm_dolby_dap_deinit(int port_id)
-{
-	int index = adm_validate_and_get_port_index(port_id);
-
-	if (index < 0) {
-		pr_err("%s: Invalid port idx %d port_id %#x\n", __func__, index,
-			port_id);
-		return;
-	}
-	dolby_dap_params_states.port_open_count--;
-	if ((dolby_dap_params_states.port_id[index] == port_id) &&
-		(!dolby_dap_params_states.port_open_count)) {
-		dolby_dap_params_states.port_id[index] = DOLBY_INVALID_PORT_ID;
-		dolby_dap_params_states.copp_idx[index] = -1;
-	}
-}
-
-static int msm_dolby_dap_set_vspe_vdhe(int port_id, int copp_idx,
-				       bool is_custom_stereo_enabled)
-{
-	char *params_value;
-	int *update_params_value, rc = 0;
-	uint32_t index_offset, i, j;
-	uint32_t params_length = (TOTAL_LENGTH_DOLBY_PARAM +
-				2 * DOLBY_PARAM_PAYLOAD_SIZE) *
-				sizeof(uint32_t);
-	if (port_id == DOLBY_INVALID_PORT_ID) {
-		pr_err("%s: Not a Dolby topology. Do not set custom stereo mixing\n",
-			__func__);
-		return -EINVAL;
-	}
-	params_value = kzalloc(params_length, GFP_KERNEL);
-	if (!params_value)
-		return -ENOMEM;
-
-	update_params_value = (int *)params_value;
-	params_length = 0;
-	/* for VDHE and VSPE DAP params at index 0 and 1 in table */
-	for (i = 0; i < 2; i++) {
-		*update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
-		*update_params_value++ = dolby_dap_params_id[i];
-		*update_params_value++ = dolby_dap_params_length[i] *
-					sizeof(uint32_t);
-		index_offset = dolby_dap_params_offset[i];
-		for (j = 0; j < dolby_dap_params_length[i]; j++) {
-			if (is_custom_stereo_enabled)
-				*update_params_value++ = 0;
-			else
-				*update_params_value++ =
-					dolby_dap_params_value[index_offset+j];
-		}
-		params_length += (DOLBY_PARAM_PAYLOAD_SIZE +
-				dolby_dap_params_length[i]) * sizeof(uint32_t);
-	}
-	pr_debug("%s, valid param length: %d", __func__, params_length);
-	if (params_length) {
-		rc = adm_dolby_dap_send_params(port_id, copp_idx, params_value,
-					       params_length);
-		if (rc) {
-			pr_err("%s: send vdhe/vspe params failed with rc=%d\n",
-				__func__, rc);
-			kfree(params_value);
-			return -EINVAL;
-		}
-	}
-	kfree(params_value);
-	return 0;
-}
-
-int dolby_dap_set_custom_stereo_onoff(int port_id, int copp_idx,
-				      bool is_custom_stereo_enabled)
-{
-	char *params_value;
-	int *update_params_value, rc = 0;
-	uint32_t params_length = (TOTAL_LENGTH_DOLBY_PARAM +
-				DOLBY_PARAM_PAYLOAD_SIZE) *
-				sizeof(uint32_t);
-	if (port_id == DOLBY_INVALID_PORT_ID)
-		return -EINVAL;
-
-	msm_dolby_dap_set_vspe_vdhe(port_id, copp_idx,
-				    is_custom_stereo_enabled);
-	params_value = kzalloc(params_length, GFP_KERNEL);
-	if (!params_value) {
-		pr_err("%s, params memory alloc failed\n", __func__);
-		return -ENOMEM;
-	}
-	update_params_value = (int *)params_value;
-	params_length = 0;
-	*update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
-	*update_params_value++ = DOLBY_ENABLE_CUSTOM_STEREO;
-	*update_params_value++ = sizeof(uint32_t);
-	if (is_custom_stereo_enabled)
-		*update_params_value++ = 1;
-	else
-		*update_params_value++ = 0;
-	params_length += (DOLBY_PARAM_PAYLOAD_SIZE + 1) * sizeof(uint32_t);
-	pr_debug("%s, valid param length: %d", __func__, params_length);
-	if (params_length) {
-		rc = adm_dolby_dap_send_params(port_id, copp_idx, params_value,
-					       params_length);
-		if (rc) {
-			pr_err("%s: setting ds1 custom stereo param failed with rc=%d\n",
-				__func__, rc);
-			kfree(params_value);
-			return -EINVAL;
-		}
-	}
-	kfree(params_value);
-	return 0;
-}
-
-static int msm_dolby_dap_map_device_to_port_id(int device)
-{
-	int port_id = SLIMBUS_0_RX;
-
-	device = DEVICE_OUT_ALL;
-	/*update the device when single stream to multiple device is handled*/
-	if (device == DEVICE_OUT_ALL) {
-		port_id = PRIMARY_I2S_RX | SLIMBUS_0_RX | HDMI_RX |
-				INT_BT_SCO_RX | INT_FM_RX |
-				RT_PROXY_PORT_001_RX |
-				AFE_PORT_ID_PRIMARY_PCM_RX |
-				MI2S_RX | SECONDARY_I2S_RX |
-				SLIMBUS_1_RX | SLIMBUS_4_RX | SLIMBUS_3_RX |
-				AFE_PORT_ID_SECONDARY_MI2S_RX;
-	} else {
-		/* update port_id based on the device */
-	}
-	return port_id;
-}
-
-int msm_dolby_dap_param_to_set_control_get(struct snd_kcontrol *kcontrol,
-					   struct snd_ctl_elem_value *ucontrol)
-{
-	/* not used while setting the parameters */
-	return 0;
-}
-
-int msm_dolby_dap_param_to_set_control_put(struct snd_kcontrol *kcontrol,
-					   struct snd_ctl_elem_value *ucontrol)
-{
-	int rc = 0, port_id, copp_idx;
-	uint32_t idx, j;
-	uint32_t device = ucontrol->value.integer.value[0];
-	uint32_t param_id = ucontrol->value.integer.value[1];
-	uint32_t offset = ucontrol->value.integer.value[2];
-	uint32_t length = ucontrol->value.integer.value[3];
-
-	dolby_dap_params_states.port_ids_dolby_can_be_enabled =
-				msm_dolby_dap_map_device_to_port_id(device);
-	for (idx = 0; idx < ALL_DOLBY_PARAMS; idx++) {
-		/*paramid from user space*/
-		if (param_id == dolby_dap_params_id[idx])
-			break;
-	}
-	if (idx > ALL_DOLBY_PARAMS-1) {
-		pr_err("%s: invalid param id 0x%x to set\n", __func__,
-			param_id);
-		return -EINVAL;
-	}
-	switch (idx) {
-		case DOLBY_COMMIT_ALL_IDX: {
-			/* COMIIT ALL: Send all parameters to DSP */
-			pr_debug("%s: COMMIT_ALL recvd\n", __func__);
-			for (idx = 0; idx < AFE_MAX_PORTS; idx++) {
-				port_id = dolby_dap_params_states.port_id[idx];
-				copp_idx =
-					dolby_dap_params_states.copp_idx[idx];
-				if ((copp_idx > 0) &&
-				    (copp_idx < MAX_COPPS_PER_PORT) &&
-				    (port_id != DOLBY_INVALID_PORT_ID))
-					rc |= msm_dolby_dap_send_cached_params(
-								      port_id,
-								      copp_idx,
-								      0);
-			}
-		}
-		break;
-		case DOLBY_COMMIT_IDX: {
-			pr_debug("%s: COMMIT recvd\n", __func__);
-			/* COMMIT: Send only modified parameters to DSP */
-			for (idx = 0; idx < AFE_MAX_PORTS; idx++) {
-				port_id = dolby_dap_params_states.port_id[idx];
-				copp_idx =
-					dolby_dap_params_states.copp_idx[idx];
-				if ((copp_idx > 0) &&
-				    (copp_idx < MAX_COPPS_PER_PORT) &&
-				    (port_id == DOLBY_INVALID_PORT_ID))
-					rc |= msm_dolby_dap_send_cached_params(
-								      port_id,
-								      copp_idx,
-								      1);
-			}
-		}
-		break;
-		case DOLBY_USE_CACHE_IDX: {
-			pr_debug("%s: USE CACHE recvd val: %ld\n", __func__,
-				ucontrol->value.integer.value[4]);
-			dolby_dap_params_states.use_cache =
-				ucontrol->value.integer.value[4];
-		}
-		break;
-		case DOLBY_AUTO_ENDP_IDX: {
-			pr_debug("%s: AUTO_ENDP recvd val: %ld\n", __func__,
-				ucontrol->value.integer.value[4]);
-			dolby_dap_params_states.auto_endp =
-				ucontrol->value.integer.value[4];
-		}
-		break;
-		case DOLBY_AUTO_ENDDEP_IDX: {
-			pr_debug("%s: USE_ENDDEP_PARAMS recvd val: %ld\n",
-				__func__, ucontrol->value.integer.value[4]);
-			dolby_dap_params_states.enddep_params =
-				ucontrol->value.integer.value[4];
-		}
-		break;
-		default: {
-			/* cache the parameters */
-			dolby_dap_params_modified[idx] += 1;
-			dolby_dap_params_length[idx] = length;
-			pr_debug("%s: param recvd deviceId=0x%x paramId=0x%x offset=%d length=%d\n",
-				__func__, device, param_id, offset, length);
-			for (j = 0; j < length; j++) {
-				dolby_dap_params_value[
-					dolby_dap_params_offset[idx] +
-					offset + j]
-				= ucontrol->value.integer.value[4+j];
-				pr_debug("value[%d]: %ld\n", j,
-					ucontrol->value.integer.value[4+j]);
-			}
-		}
-	}
-
-	return rc;
-}
-
-int msm_dolby_dap_param_to_get_control_get(struct snd_kcontrol *kcontrol,
-					   struct snd_ctl_elem_value *ucontrol)
-{
-	int rc = 0, i, index;
-	char *params_value;
-	int *update_params_value;
-	uint32_t params_length = DOLBY_MAX_LENGTH_INDIVIDUAL_PARAM *
-					sizeof(uint32_t);
-	uint32_t param_payload_len =
-			DOLBY_PARAM_PAYLOAD_SIZE * sizeof(uint32_t);
-	int port_id = dolby_dap_params_get.port_id, copp_idx;
-
-	if (port_id == DOLBY_INVALID_PORT_ID) {
-		pr_err("%s, port_id not set, do not query ADM\n", __func__);
-		return -EINVAL;
-	}
-	index = adm_validate_and_get_port_index(port_id);
-	if (index < 0) {
-		pr_err("%s: Invalid port idx %d port_id %#x\n", __func__, index,
-			port_id);
-		return -EINVAL;
-	}
-	copp_idx = dolby_dap_params_states.copp_idx[index];
-	if ((copp_idx < 0) || (copp_idx >= MAX_COPPS_PER_PORT)) {
-		pr_debug("%s: get params called before copp open.copp_idx:%d\n",
-			 __func__, copp_idx);
-		return -EINVAL;
-	}
-	params_value = kzalloc(params_length, GFP_KERNEL);
-	if (!params_value)
-		return -ENOMEM;
-
-	if (dolby_dap_params_get.param_id == DOLBY_PARAM_ID_VER) {
-		rc = adm_get_params(port_id, copp_idx,
-				    DOLBY_BUNDLE_MODULE_ID, DOLBY_PARAM_ID_VER,
-				    params_length + param_payload_len,
-				    params_value);
-	} else {
-		for (i = 0; i < MAX_DOLBY_PARAMS; i++)
-			if (dolby_dap_params_id[i] ==
-				dolby_dap_params_get.param_id)
-				break;
-		if (i > MAX_DOLBY_PARAMS-1) {
-			pr_err("%s: invalid param id to set", __func__);
-			rc = -EINVAL;
-		} else {
-			params_length = (dolby_dap_params_length[i] +
-						DOLBY_PARAM_PAYLOAD_SIZE) *
-						sizeof(uint32_t);
-			rc = adm_get_params(port_id, copp_idx,
-					    DOLBY_BUNDLE_MODULE_ID,
-					    dolby_dap_params_id[i],
-					    params_length + param_payload_len,
-					    params_value);
-		}
-	}
-	if (rc) {
-		pr_err("%s: get parameters failed rc:%d\n", __func__, rc);
-		kfree(params_value);
-		return -EINVAL;
-	}
-	update_params_value = (int *)params_value;
-	ucontrol->value.integer.value[0] = dolby_dap_params_get.device_id;
-	ucontrol->value.integer.value[1] = dolby_dap_params_get.param_id;
-	ucontrol->value.integer.value[2] = dolby_dap_params_get.offset;
-	ucontrol->value.integer.value[3] = dolby_dap_params_get.length;
-
-	pr_debug("%s: FROM DSP value[0] 0x%x value[1] %d value[2] 0x%x\n",
-			__func__, update_params_value[0],
-			update_params_value[1], update_params_value[2]);
-	for (i = 0; i < dolby_dap_params_get.length; i++) {
-		ucontrol->value.integer.value[DOLBY_PARAM_PAYLOAD_SIZE+i] =
-			update_params_value[i];
-		pr_debug("value[%d]:%d\n", i, update_params_value[i]);
-	}
-	pr_debug("%s: Returning param_id=0x%x offset=%d length=%d\n",
-			__func__, dolby_dap_params_get.param_id,
-			dolby_dap_params_get.offset,
-			dolby_dap_params_get.length);
-	kfree(params_value);
-	return 0;
-}
-
-int msm_dolby_dap_param_to_get_control_put(struct snd_kcontrol *kcontrol,
-					   struct snd_ctl_elem_value *ucontrol)
-{
-	int port_id, idx, copp_idx;
-
-	dolby_dap_params_get.device_id = ucontrol->value.integer.value[0];
-	port_id = msm_dolby_dap_map_device_to_port_id(
-						dolby_dap_params_get.device_id);
-	for (idx = 0; idx < AFE_MAX_PORTS; idx++) {
-		port_id = dolby_dap_params_states.port_id[idx];
-		copp_idx = dolby_dap_params_states.copp_idx[idx];
-		if ((copp_idx < 0) ||
-		    (copp_idx >= MAX_COPPS_PER_PORT) ||
-		    (port_id == DOLBY_INVALID_PORT_ID))
-			continue;
-		else
-			break;
-	}
-	if (idx == AFE_MAX_PORTS)
-		port_id = SLIMBUS_0_RX;
-	dolby_dap_params_get.port_id = port_id;
-	dolby_dap_params_get.param_id = ucontrol->value.integer.value[1];
-	dolby_dap_params_get.offset = ucontrol->value.integer.value[2];
-	dolby_dap_params_get.length = ucontrol->value.integer.value[3];
-	pr_debug("%s: param_id=0x%x offset=%d length=%d\n", __func__,
-		dolby_dap_params_get.param_id, dolby_dap_params_get.offset,
-		dolby_dap_params_get.length);
-	return 0;
-}
-
-int msm_dolby_dap_param_visualizer_control_get(struct snd_kcontrol *kcontrol,
-					   struct snd_ctl_elem_value *ucontrol)
-{
-	uint32_t length = dolby_dap_params_value[DOLBY_PARAM_VCNB_OFFSET];
-	char *visualizer_data;
-	int i, rc;
-	int *update_visualizer_data;
-	uint32_t offset, params_length =
-		(2*length + DOLBY_VIS_PARAM_HEADER_SIZE)*sizeof(uint32_t);
-	uint32_t param_payload_len =
-		DOLBY_PARAM_PAYLOAD_SIZE * sizeof(uint32_t);
-	int port_id, copp_idx, idx;
-
-	for (idx = 0; idx < AFE_MAX_PORTS; idx++) {
-		port_id = dolby_dap_params_states.port_id[idx];
-		copp_idx = dolby_dap_params_states.copp_idx[idx];
-		if ((copp_idx < 0) ||
-		    (copp_idx >= MAX_COPPS_PER_PORT) ||
-		    (port_id == DOLBY_INVALID_PORT_ID))
-			continue;
-		else
-			break;
-	}
-	if (idx == AFE_MAX_PORTS) {
-		pr_debug("%s, port_id not set, returning error", __func__);
-		ucontrol->value.integer.value[0] = 0;
-		return -EINVAL;
-	}
-	visualizer_data = kzalloc(params_length, GFP_KERNEL);
-	if (!visualizer_data)
-		return -ENOMEM;
-
-	offset = 0;
-	params_length = length * sizeof(uint32_t);
-	rc = adm_get_params(port_id, copp_idx, DOLBY_BUNDLE_MODULE_ID,
-			    DOLBY_PARAM_ID_VCBG,
-			    params_length + param_payload_len,
-			    visualizer_data + offset);
-	if (rc) {
-		pr_err("%s: get parameters failed\n", __func__);
-		kfree(visualizer_data);
-		return -EINVAL;
-	}
-
-	offset = length * sizeof(uint32_t);
-	rc = adm_get_params(port_id, copp_idx, DOLBY_BUNDLE_MODULE_ID,
-			    DOLBY_PARAM_ID_VCBE,
-			    params_length + param_payload_len,
-			    visualizer_data + offset);
-	if (rc) {
-		pr_err("%s: get parameters failed\n", __func__);
-		kfree(visualizer_data);
-		return -EINVAL;
-	}
-
-	ucontrol->value.integer.value[0] = 2*length;
-	pr_debug("%s: visualizer data length %ld\n", __func__,
-			ucontrol->value.integer.value[0]);
-	update_visualizer_data = (int *)visualizer_data;
-	for (i = 0; i < 2*length; i++) {
-		ucontrol->value.integer.value[1+i] = update_visualizer_data[i];
-		pr_debug("value[%d] %d\n", i, update_visualizer_data[i]);
-	}
-	kfree(visualizer_data);
-	return 0;
-}
-
-int msm_dolby_dap_param_visualizer_control_put(struct snd_kcontrol *kcontrol,
-					   struct snd_ctl_elem_value *ucontrol)
-{
-	/* not used while getting the visualizer data */
-	return 0;
-}
-
-int msm_dolby_dap_endpoint_control_get(struct snd_kcontrol *kcontrol,
-				       struct snd_ctl_elem_value *ucontrol)
-{
-	/* not used while setting the endpoint */
-	return 0;
-}
-
-int msm_dolby_dap_endpoint_control_put(struct snd_kcontrol *kcontrol,
-				       struct snd_ctl_elem_value *ucontrol)
-{
-	int device = ucontrol->value.integer.value[0];
-
-	dolby_dap_params_states.device = device;
-	return 0;
-}
-
-int msm_dolby_dap_security_control_get(struct snd_kcontrol *kcontrol,
-				       struct snd_ctl_elem_value *ucontrol)
-{
-	/* not used while setting the manfr id*/
-	return 0;
-}
-
-int msm_dolby_dap_security_control_put(struct snd_kcontrol *kcontrol,
-				       struct snd_ctl_elem_value *ucontrol)
-{
-	int manufacturer_id = ucontrol->value.integer.value[0];
-
-	core_set_dolby_manufacturer_id(manufacturer_id);
-	return 0;
-}
-
-int msm_dolby_dap_license_control_get(struct snd_kcontrol *kcontrol,
-				struct snd_ctl_elem_value *ucontrol)
-{
-	ucontrol->value.integer.value[0] =
-			core_get_license_status(DOLBY_DS1_LICENSE_ID);
-	return 0;
-}
-
-int msm_dolby_dap_license_control_put(struct snd_kcontrol *kcontrol,
-				struct snd_ctl_elem_value *ucontrol)
-{
-	return core_set_license(ucontrol->value.integer.value[0],
-						DOLBY_DS1_LICENSE_ID);
-}
-
-static const struct snd_kcontrol_new dolby_license_controls[] = {
-	SOC_SINGLE_MULTI_EXT("DS1 License", SND_SOC_NOPM, 0,
-	0xFFFFFFFF, 0, 1, msm_dolby_dap_license_control_get,
-	msm_dolby_dap_license_control_put),
-};
-
-static const struct snd_kcontrol_new dolby_security_controls[] = {
-	SOC_SINGLE_MULTI_EXT("DS1 Security", SND_SOC_NOPM, 0,
-	0xFFFFFFFF, 0, 1, msm_dolby_dap_security_control_get,
-	msm_dolby_dap_security_control_put),
-};
-
-static const struct snd_kcontrol_new dolby_dap_param_to_set_controls[] = {
-	SOC_SINGLE_MULTI_EXT("DS1 DAP Set Param", SND_SOC_NOPM, 0, 0xFFFFFFFF,
-	0, 128, msm_dolby_dap_param_to_set_control_get,
-	msm_dolby_dap_param_to_set_control_put),
-};
-
-static const struct snd_kcontrol_new dolby_dap_param_to_get_controls[] = {
-	SOC_SINGLE_MULTI_EXT("DS1 DAP Get Param", SND_SOC_NOPM, 0, 0xFFFFFFFF,
-	0, 128, msm_dolby_dap_param_to_get_control_get,
-	msm_dolby_dap_param_to_get_control_put),
-};
-
-static const struct snd_kcontrol_new dolby_dap_param_visualizer_controls[] = {
-	SOC_SINGLE_MULTI_EXT("DS1 DAP Get Visualizer", SND_SOC_NOPM, 0,
-	0xFFFFFFFF, 0, 41, msm_dolby_dap_param_visualizer_control_get,
-	msm_dolby_dap_param_visualizer_control_put),
-};
-
-static const struct snd_kcontrol_new dolby_dap_param_end_point_controls[] = {
-	SOC_SINGLE_MULTI_EXT("DS1 DAP Endpoint", SND_SOC_NOPM, 0,
-	0xFFFFFFFF, 0, 1, msm_dolby_dap_endpoint_control_get,
-	msm_dolby_dap_endpoint_control_put),
-};
-
-void msm_dolby_dap_add_controls(struct snd_soc_platform *platform)
-{
-	snd_soc_add_platform_controls(platform,
-				dolby_license_controls,
-			ARRAY_SIZE(dolby_license_controls));
-
-	snd_soc_add_platform_controls(platform,
-				dolby_security_controls,
-			ARRAY_SIZE(dolby_security_controls));
-
-	snd_soc_add_platform_controls(platform,
-				dolby_dap_param_to_set_controls,
-			ARRAY_SIZE(dolby_dap_param_to_set_controls));
-
-	snd_soc_add_platform_controls(platform,
-				dolby_dap_param_to_get_controls,
-			ARRAY_SIZE(dolby_dap_param_to_get_controls));
-
-	snd_soc_add_platform_controls(platform,
-				dolby_dap_param_visualizer_controls,
-			ARRAY_SIZE(dolby_dap_param_visualizer_controls));
-
-	snd_soc_add_platform_controls(platform,
-				dolby_dap_param_end_point_controls,
-			ARRAY_SIZE(dolby_dap_param_end_point_controls));
-}
diff --git a/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c b/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c
index 40ea49c..b7e69fa 100644
--- a/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c
+++ b/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c
@@ -20,7 +20,7 @@
 #include <sound/q6core.h>
 
 
-#ifdef CONFIG_DOLBY_DS2
+#if defined(CONFIG_DOLBY_DS2) || defined(CONFIG_DOLBY_LICENSE)
 
 /* ramp up/down for 30ms    */
 #define DOLBY_SOFT_VOLUME_PERIOD	40
@@ -198,7 +198,8 @@
 	uint32_t param_payload_len = PARAM_PAYLOAD_SIZE * sizeof(uint32_t);
 	int rc = 0;
 
-	update_params_value = kzalloc(params_length, GFP_KERNEL);
+	update_params_value = kzalloc(params_length + param_payload_len,
+				      GFP_KERNEL);
 	if (!update_params_value)
 		goto end;
 
@@ -999,6 +1000,20 @@
 							copp_idx, rc);
 					}
 				}
+				/* Turn on qti modules */
+				for (j = 1; j < mod_list[0]; j++) {
+					if (!msm_ds2_dap_can_enable_module(
+						mod_list[j]) ||
+						mod_list[j] ==
+						DS2_MODULE_ID)
+						continue;
+					pr_debug("%s: param enable %d\n",
+						__func__, mod_list[j]);
+					adm_param_enable(port_id, copp_idx,
+							 mod_list[j],
+							 MODULE_ENABLE);
+				}
+
 				/* Add adm api to resend calibration on port */
 				rc = msm_ds2_dap_send_cal_data(i);
 				if (rc < 0) {
@@ -1642,6 +1657,7 @@
 		ret = 0;
 		dolby_data->length = 0;
 		pr_err("%s Incorrect VCNB length", __func__);
+		return -EINVAL;
 	}
 
 	params_length = (2*length + DOLBY_VIS_PARAM_HEADER_SIZE) *
@@ -2296,4 +2312,4 @@
 {
 	return 0;
 }
-#endif /*CONFIG_DOLBY_DS2*/
+#endif /* CONFIG_DOLBY_DS2 || CONFIG_DOLBY_LICENSE */
diff --git a/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.h b/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.h
index f2c2069..5e8c3a6 100644
--- a/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.h
+++ b/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.h
@@ -45,7 +45,7 @@
 		_IOR('U', 0x15, struct dolby_param_data32)
 #endif
 
-#ifdef CONFIG_DOLBY_DS2
+#if defined(CONFIG_DOLBY_DS2) || defined(CONFIG_DOLBY_LICENSE)
 /* DOLBY DOLBY GUIDS */
 #define DS2_MODULE_ID			0x00010775
 
@@ -86,10 +86,11 @@
 /* Dolby DOLBY end */
 #else
 
-static inline void msm_ds2_dap_update_port_parameters(struct snd_hwdep *hw,
+static inline int msm_ds2_dap_update_port_parameters(struct snd_hwdep *hw,
 					       struct file *file,
 					       bool open)
 {
+	return 0;
 }
 
 static inline int msm_ds2_dap_ioctl(struct snd_hwdep *hw, struct file *file,
diff --git a/sound/soc/msm/qdsp6v2/msm-dts-eagle.c b/sound/soc/msm/qdsp6v2/msm-dts-eagle.c
index 5ad55dc..2ff1e02 100644
--- a/sound/soc/msm/qdsp6v2/msm-dts-eagle.c
+++ b/sound/soc/msm/qdsp6v2/msm-dts-eagle.c
@@ -236,7 +236,8 @@
 	if (_vol_cmds) {
 		_vol_cmds_d = kzalloc(_vol_cmd_cnt * sizeof(struct vol_cmds_d),
 					GFP_KERNEL);
-	}
+	} else
+		_vol_cmd_cnt = 0;
 	if (_vol_cmds_d)
 		return 0;
 	_volume_cmds_free();
@@ -1333,9 +1334,9 @@
 			if (((u32 *)_sec_blob[target[0]])[1] != target[1]) {
 				eagle_ioctl_dbg("%s: request new size for already allocated license index %u",
 					 __func__, target[0]);
-				kfree(_sec_blob[target[0]]);
-				_sec_blob[target[0]] = NULL;
 			}
+			kfree(_sec_blob[target[0]]);
+			_sec_blob[target[0]] = NULL;
 		}
 		eagle_ioctl_dbg("%s: allocating %u bytes for license index %u",
 				__func__, target[1], target[0]);
diff --git a/sound/soc/msm/qdsp6v2/msm-lsm-client.c b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
index d19c346..37dd31f 100644
--- a/sound/soc/msm/qdsp6v2/msm-lsm-client.c
+++ b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015, 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,7 +35,7 @@
 
 #define CAPTURE_MIN_NUM_PERIODS     2
 #define CAPTURE_MAX_NUM_PERIODS     8
-#define CAPTURE_MAX_PERIOD_SIZE     4096
+#define CAPTURE_MAX_PERIOD_SIZE     61440
 #define CAPTURE_MIN_PERIOD_SIZE     320
 #define LISTEN_MAX_STATUS_PAYLOAD_SIZE 256
 
@@ -47,12 +47,14 @@
 				SNDRV_PCM_INFO_BLOCK_TRANSFER |
 				SNDRV_PCM_INFO_INTERLEAVED |
 				SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
-	.formats =              SNDRV_PCM_FMTBIT_S16_LE,
-	.rates =                SNDRV_PCM_RATE_16000,
+	.formats =              (SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_S24_LE),
+	.rates =		(SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_48000),
 	.rate_min =             16000,
-	.rate_max =             16000,
+	.rate_max =             48000,
 	.channels_min =         1,
-	.channels_max =         1,
+	.channels_max =         4,
 	.buffer_bytes_max =     CAPTURE_MAX_NUM_PERIODS *
 				CAPTURE_MAX_PERIOD_SIZE,
 	.period_bytes_min =	CAPTURE_MIN_PERIOD_SIZE,
@@ -64,7 +66,7 @@
 
 /* Conventional and unconventional sample rate supported */
 static unsigned int supported_sample_rates[] = {
-	16000,
+	16000, 48000,
 };
 
 static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
@@ -76,7 +78,7 @@
 struct lsm_priv {
 	struct snd_pcm_substream *substream;
 	struct lsm_client *lsm_client;
-	struct snd_lsm_event_status *event_status;
+	struct snd_lsm_event_status_v3 *event_status;
 	spinlock_t event_lock;
 	wait_queue_head_t event_wait;
 	unsigned long event_avail;
@@ -84,10 +86,16 @@
 	atomic_t buf_count;
 	atomic_t read_abort;
 	wait_queue_head_t period_wait;
+	struct mutex lsm_api_lock;
 	int appl_cnt;
 	int dma_write;
 };
 
+enum { /* lsm session states */
+	IDLE = 0,
+	RUNNING,
+};
+
 static int msm_lsm_queue_lab_buffer(struct lsm_priv *prtd, int i)
 {
 	int rc = 0;
@@ -193,10 +201,12 @@
 	struct lsm_priv *prtd = priv;
 	struct snd_pcm_substream *substream = prtd->substream;
 	struct snd_soc_pcm_runtime *rtd;
-	struct snd_lsm_event_status *temp;
+	struct snd_lsm_event_status_v3 *temp;
 	uint16_t status = 0;
 	uint16_t payload_size = 0;
 	uint16_t index = 0;
+	uint32_t event_ts_lsw = 0;
+	uint32_t event_ts_msw = 0;
 
 	if (!substream || !substream->private_data) {
 		pr_err("%s: Invalid %s\n", __func__,
@@ -271,25 +281,45 @@
 			"%s: event detect status = %d payload size = %d\n",
 			__func__, status, payload_size);
 		break;
+
+	case LSM_SESSION_EVENT_DETECTION_STATUS_V3:
+		event_ts_lsw = ((uint32_t *)payload)[0];
+		event_ts_msw = ((uint32_t *)payload)[1];
+		status = (uint16_t)((uint8_t *)payload)[8];
+		payload_size = (uint16_t)((uint8_t *)payload)[9];
+		index = 10;
+		dev_dbg(rtd->dev,
+			"%s: ts_msw = %u, ts_lsw = %u, event detect status = %d payload size = %d\n",
+			__func__, event_ts_msw, event_ts_lsw, status,
+			payload_size);
+		break;
+
 	default:
 		break;
 	}
 
 	if (opcode == LSM_SESSION_EVENT_DETECTION_STATUS ||
-		opcode == LSM_SESSION_EVENT_DETECTION_STATUS_V2) {
+		opcode == LSM_SESSION_EVENT_DETECTION_STATUS_V2 ||
+		opcode == LSM_SESSION_EVENT_DETECTION_STATUS_V3) {
 		spin_lock_irqsave(&prtd->event_lock, flags);
 		temp = krealloc(prtd->event_status,
-				sizeof(struct snd_lsm_event_status) +
+				sizeof(struct snd_lsm_event_status_v3) +
 				payload_size, GFP_ATOMIC);
 		if (!temp) {
 			dev_err(rtd->dev, "%s: no memory for event status\n",
 				__func__);
 			return;
 		}
-
+		/*
+		 * event status timestamp will be non-zero and valid if
+		 * opcode is LSM_SESSION_EVENT_DETECTION_STATUS_V3
+		 */
 		prtd->event_status = temp;
+		prtd->event_status->timestamp_lsw = event_ts_lsw;
+		prtd->event_status->timestamp_msw = event_ts_msw;
 		prtd->event_status->status = status;
 		prtd->event_status->payload_size = payload_size;
+
 		if (likely(prtd->event_status)) {
 			memcpy(prtd->event_status->payload,
 			       &((uint8_t *)payload)[index],
@@ -641,6 +671,54 @@
 	return rc;
 }
 
+static int msm_lsm_set_poll_enable(struct snd_pcm_substream *substream,
+		struct lsm_params_info *p_info)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct lsm_priv *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_lsm_poll_enable poll_enable;
+	int rc = 0;
+
+	if (p_info->param_size != sizeof(poll_enable)) {
+		dev_err(rtd->dev,
+			"%s: Invalid param_size %d\n",
+			__func__, p_info->param_size);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	if (copy_from_user(&poll_enable, p_info->param_data,
+			   sizeof(poll_enable))) {
+		dev_err(rtd->dev,
+			"%s: copy_from_user failed, size = %zd\n",
+			__func__, sizeof(poll_enable));
+		rc = -EFAULT;
+		goto done;
+	}
+
+	if (prtd->lsm_client->poll_enable == poll_enable.poll_en) {
+		dev_dbg(rtd->dev,
+			"%s: Polling for session %d already %s\n",
+			__func__, prtd->lsm_client->session,
+			(poll_enable.poll_en ? "enabled" : "disabled"));
+		rc = 0;
+		goto done;
+	}
+
+	rc = q6lsm_set_one_param(prtd->lsm_client, p_info,
+				 &poll_enable, LSM_POLLING_ENABLE);
+	if (!rc) {
+		prtd->lsm_client->poll_enable = poll_enable.poll_en;
+	} else {
+		dev_err(rtd->dev,
+			"%s: Failed to set poll enable, err = %d\n",
+			__func__, rc);
+	}
+done:
+	return rc;
+}
+
 static int msm_lsm_process_params(struct snd_pcm_substream *substream,
 		struct snd_lsm_module_params *p_data,
 		void *params)
@@ -681,6 +759,9 @@
 		case LSM_CUSTOM_PARAMS:
 			rc = msm_lsm_set_custom(substream, p_info);
 			break;
+		case LSM_POLLING_ENABLE:
+			rc = msm_lsm_set_poll_enable(substream, p_info);
+			break;
 		default:
 			dev_err(rtd->dev,
 				"%s: Invalid param_type %d\n",
@@ -710,10 +791,8 @@
 	struct snd_lsm_session_data session_data;
 	int rc = 0;
 	int xchg = 0;
-	u32 size = 0;
 	struct snd_pcm_runtime *runtime;
 	struct lsm_priv *prtd;
-	struct snd_lsm_event_status *user = arg;
 	struct snd_lsm_detection_params det_params;
 	uint8_t *confidence_level = NULL;
 
@@ -730,8 +809,13 @@
 	switch (cmd) {
 	case SNDRV_LSM_SET_SESSION_DATA:
 		dev_dbg(rtd->dev, "%s: set session data\n", __func__);
-		memcpy(&session_data, arg,
-		       sizeof(struct snd_lsm_session_data));
+		if (copy_from_user(&session_data, arg,
+				   sizeof(session_data))) {
+			dev_err(rtd->dev, "%s: %s: copy_from_user failed\n",
+				__func__, "LSM_SET_SESSION_DATA");
+			return -EFAULT;
+		}
+
 		if (session_data.app_id != LSM_VOICE_WAKEUP_APP_ID_V2) {
 			dev_err(rtd->dev,
 				"%s:Invalid App id %d for Listen client\n",
@@ -820,13 +904,6 @@
 		break;
 
 	case SNDRV_LSM_SET_PARAMS:
-		if (!arg) {
-			dev_err(rtd->dev,
-				"%s: %s Invalid argument\n",
-				__func__, "SNDRV_LSM_SET_PARAMS");
-			return -EINVAL;
-		}
-
 		dev_dbg(rtd->dev, "%s: set_params\n", __func__);
 		memcpy(&det_params, arg,
 			sizeof(det_params));
@@ -872,21 +949,36 @@
 		break;
 
 	case SNDRV_LSM_EVENT_STATUS:
+	case SNDRV_LSM_EVENT_STATUS_V3: {
+		uint32_t ts_lsw, ts_msw;
+		uint16_t status = 0, payload_size = 0;
+
 		dev_dbg(rtd->dev, "%s: Get event status\n", __func__);
 		atomic_set(&prtd->event_wait_stop, 0);
+
+		/*
+		 * Release the api lock before wait to allow
+		 * other IOCTLs to be invoked while waiting
+		 * for event
+		 */
+		mutex_unlock(&prtd->lsm_api_lock);
 		rc = wait_event_freezable(prtd->event_wait,
 				(cmpxchg(&prtd->event_avail, 1, 0) ||
 				 (xchg = atomic_cmpxchg(&prtd->event_wait_stop,
 							1, 0))));
+		mutex_lock(&prtd->lsm_api_lock);
 		dev_dbg(rtd->dev, "%s: wait_event_freezable %d event_wait_stop %d\n",
 			 __func__, rc, xchg);
 		if (!rc && !xchg) {
 			dev_dbg(rtd->dev, "%s: New event available %ld\n",
 				__func__, prtd->event_avail);
 			spin_lock_irqsave(&prtd->event_lock, flags);
+
 			if (prtd->event_status) {
-				size = sizeof(*(prtd->event_status)) +
-				prtd->event_status->payload_size;
+				payload_size = prtd->event_status->payload_size;
+				ts_lsw = prtd->event_status->timestamp_lsw;
+				ts_msw = prtd->event_status->timestamp_msw;
+				status = prtd->event_status->status;
 				spin_unlock_irqrestore(&prtd->event_lock,
 						       flags);
 			} else {
@@ -898,15 +990,43 @@
 					__func__);
 				break;
 			}
-			if (user->payload_size <
-			    prtd->event_status->payload_size) {
-				dev_dbg(rtd->dev,
-					"%s: provided %d bytes isn't enough, needs %d bytes\n",
-					__func__, user->payload_size,
-					prtd->event_status->payload_size);
-				rc = -ENOMEM;
+
+			if (cmd == SNDRV_LSM_EVENT_STATUS) {
+				struct snd_lsm_event_status *user = arg;
+
+				if (user->payload_size < payload_size) {
+					dev_dbg(rtd->dev,
+						"%s: provided %d bytes isn't enough, needs %d bytes\n",
+						__func__, user->payload_size,
+						payload_size);
+					rc = -ENOMEM;
+				} else {
+					user->status = status;
+					user->payload_size = payload_size;
+					memcpy(user->payload,
+						prtd->event_status->payload,
+						payload_size);
+				}
 			} else {
-				memcpy(user, prtd->event_status, size);
+				struct snd_lsm_event_status_v3 *user_v3 = arg;
+
+				if (user_v3->payload_size < payload_size) {
+					dev_dbg(rtd->dev,
+						"%s: provided %d bytes isn't enough, needs %d bytes\n",
+						__func__, user_v3->payload_size,
+						payload_size);
+					rc = -ENOMEM;
+				} else {
+					user_v3->timestamp_lsw = ts_lsw;
+					user_v3->timestamp_msw = ts_msw;
+					user_v3->status = status;
+					user_v3->payload_size = payload_size;
+					memcpy(user_v3->payload,
+						prtd->event_status->payload,
+						payload_size);
+				}
+			}
+			if (!rc) {
 				if (prtd->lsm_client->lab_enable
 					&& !prtd->lsm_client->lab_started
 					&& prtd->event_status->status ==
@@ -931,6 +1051,7 @@
 			rc = 0;
 		}
 		break;
+	}
 
 	case SNDRV_LSM_ABORT_EVENT:
 		dev_dbg(rtd->dev, "%s: Aborting event status wait\n",
@@ -978,46 +1099,43 @@
 		break;
 	}
 	case SNDRV_LSM_LAB_CONTROL: {
-		u32 *enable = NULL;
+		u32 enable;
 
-		if (!arg) {
-			dev_err(rtd->dev,
-				"%s: Invalid param arg for ioctl %s session %d\n",
-				__func__, "SNDRV_LSM_LAB_CONTROL",
-				prtd->lsm_client->session);
-			rc = -EINVAL;
-			break;
+		if (copy_from_user(&enable, arg, sizeof(enable))) {
+			dev_err(rtd->dev, "%s: %s: copy_frm_user failed\n",
+				__func__, "LSM_LAB_CONTROL");
+			return -EFAULT;
 		}
-		enable = (int *)arg;
+
 		dev_dbg(rtd->dev, "%s: ioctl %s, enable = %d\n",
-			 __func__, "SNDRV_LSM_LAB_CONTROL", *enable);
+			 __func__, "SNDRV_LSM_LAB_CONTROL", enable);
 		if (!prtd->lsm_client->started) {
-			if (prtd->lsm_client->lab_enable == *enable) {
+			if (prtd->lsm_client->lab_enable == enable) {
 				dev_dbg(rtd->dev,
 					"%s: Lab for session %d already %s\n",
 					__func__, prtd->lsm_client->session,
-					((*enable) ? "enabled" : "disabled"));
+					enable ? "enabled" : "disabled");
 				rc = 0;
 				break;
 			}
-			rc = q6lsm_lab_control(prtd->lsm_client, *enable);
+			rc = q6lsm_lab_control(prtd->lsm_client, enable);
 			if (rc) {
 				dev_err(rtd->dev,
 					"%s: ioctl %s failed rc %d to %s lab for session %d\n",
 					__func__, "SNDRV_LAB_CONTROL", rc,
-					((*enable) ? "enable" : "disable"),
+					enable ? "enable" : "disable",
 					prtd->lsm_client->session);
 			} else {
 				rc = msm_lsm_lab_buffer_alloc(prtd,
-					((*enable) ? LAB_BUFFER_ALLOC
-					: LAB_BUFFER_DEALLOC));
+					enable ? LAB_BUFFER_ALLOC
+					: LAB_BUFFER_DEALLOC);
 				if (rc)
 					dev_err(rtd->dev,
 						"%s: msm_lsm_lab_buffer_alloc failed rc %d for %s",
 						__func__, rc,
-					((*enable) ? "ALLOC" : "DEALLOC"));
+						enable ? "ALLOC" : "DEALLOC");
 				if (!rc)
-					prtd->lsm_client->lab_enable = *enable;
+					prtd->lsm_client->lab_enable = enable;
 			}
 		} else {
 			dev_err(rtd->dev, "%s: ioctl %s issued after start",
@@ -1040,6 +1158,43 @@
 			prtd->lsm_client->lab_started = false;
 		}
 	break;
+
+	case SNDRV_LSM_SET_PORT:
+		dev_dbg(rtd->dev, "%s: set LSM port\n", __func__);
+		rc = q6lsm_set_port_connected(prtd->lsm_client);
+		break;
+
+	case SNDRV_LSM_SET_FWK_MODE_CONFIG: {
+		u32 *mode = NULL;
+
+		if (!arg) {
+			dev_err(rtd->dev,
+				"%s: Invalid param arg for ioctl %s session %d\n",
+				__func__, "SNDRV_LSM_SET_FWK_MODE_CONFIG",
+				prtd->lsm_client->session);
+			rc = -EINVAL;
+			break;
+		}
+		mode = (u32 *)arg;
+		if (prtd->lsm_client->event_mode == *mode) {
+			dev_dbg(rtd->dev,
+				"%s: mode for %d already set to %d\n",
+				__func__, prtd->lsm_client->session, *mode);
+			rc = 0;
+		} else {
+			dev_dbg(rtd->dev, "%s: Event mode = %d\n",
+				 __func__, *mode);
+			rc = q6lsm_set_fwk_mode_cfg(prtd->lsm_client, *mode);
+			if (!rc)
+				prtd->lsm_client->event_mode = *mode;
+			else
+				dev_err(rtd->dev,
+					"%s: set event mode failed %d\n",
+					__func__, rc);
+		}
+		break;
+	}
+
 	default:
 		dev_dbg(rtd->dev,
 			"%s: Falling into default snd_lib_ioctl cmd 0x%x\n",
@@ -1058,12 +1213,21 @@
 	return rc;
 }
 #ifdef CONFIG_COMPAT
+
 struct snd_lsm_event_status32 {
 	u16 status;
 	u16 payload_size;
 	u8 payload[0];
 };
 
+struct snd_lsm_event_status_v3_32 {
+	u32 timestamp_lsw;
+	u32 timestamp_msw;
+	u16 status;
+	u16 payload_size;
+	u8 payload[0];
+};
+
 struct snd_lsm_sound_model_v2_32 {
 	compat_uptr_t data;
 	compat_uptr_t confidence_level;
@@ -1085,7 +1249,7 @@
 	u32 param_id;
 	u32 param_size;
 	compat_uptr_t param_data;
-	enum LSM_PARAM_TYPE param_type;
+	uint32_t param_type;
 };
 
 struct snd_lsm_module_params_32 {
@@ -1095,14 +1259,14 @@
 };
 
 enum {
-	SNDRV_LSM_EVENT_STATUS32 =
-		_IOW('U', 0x02, struct snd_lsm_event_status32),
 	SNDRV_LSM_REG_SND_MODEL_V2_32 =
 		_IOW('U', 0x07, struct snd_lsm_sound_model_v2_32),
 	SNDRV_LSM_SET_PARAMS_32 =
 		_IOW('U', 0x0A, struct snd_lsm_detection_params_32),
 	SNDRV_LSM_SET_MODULE_PARAMS_32 =
 		_IOW('U', 0x0B, struct snd_lsm_module_params_32),
+	SNDRV_LSM_EVENT_STATUS_V3_32 =
+		_IOW('U', 0x0F, struct snd_lsm_event_status_v3_32),
 };
 
 static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
@@ -1126,15 +1290,18 @@
 	rtd = substream->private_data;
 	prtd = runtime->private_data;
 
+	mutex_lock(&prtd->lsm_api_lock);
+
 	switch (cmd) {
-	case SNDRV_LSM_EVENT_STATUS32: {
-		struct snd_lsm_event_status32 userarg32, *user32 = NULL;
-		struct snd_lsm_event_status *user = NULL;
+	case SNDRV_LSM_EVENT_STATUS: {
+		struct snd_lsm_event_status *user = NULL, userarg32;
+		struct snd_lsm_event_status *user32 = NULL;
 
 		if (copy_from_user(&userarg32, arg, sizeof(userarg32))) {
 			dev_err(rtd->dev, "%s: err copyuser ioctl %s\n",
-				__func__, "SNDRV_LSM_EVENT_STATUS32");
-			return -EFAULT;
+				__func__, "SNDRV_LSM_EVENT_STATUS");
+			err = -EFAULT;
+			goto done;
 		}
 
 		if (userarg32.payload_size >
@@ -1142,7 +1309,8 @@
 			pr_err("%s: payload_size %d is invalid, max allowed = %d\n",
 				__func__, userarg32.payload_size,
 				LISTEN_MAX_STATUS_PAYLOAD_SIZE);
-			return -EINVAL;
+			err = -EINVAL;
+			goto done;
 		}
 
 		size = sizeof(*user) + userarg32.payload_size;
@@ -1151,7 +1319,8 @@
 			dev_err(rtd->dev,
 				"%s: Allocation failed event status size %d\n",
 				__func__, size);
-			return -EFAULT;
+			err = -EFAULT;
+			goto done;
 		} else {
 			cmd = SNDRV_LSM_EVENT_STATUS;
 			user->payload_size = userarg32.payload_size;
@@ -1193,6 +1362,73 @@
 		break;
 	}
 
+	case SNDRV_LSM_EVENT_STATUS_V3_32: {
+		struct snd_lsm_event_status_v3_32 userarg32, *user32 = NULL;
+		struct snd_lsm_event_status_v3 *user = NULL;
+
+		if (copy_from_user(&userarg32, arg, sizeof(userarg32))) {
+			dev_err(rtd->dev, "%s: err copyuser ioctl %s\n",
+				__func__, "SNDRV_LSM_EVENT_STATUS_V3_32");
+			return -EFAULT;
+		}
+
+		if (userarg32.payload_size >
+		    LISTEN_MAX_STATUS_PAYLOAD_SIZE) {
+			pr_err("%s: payload_size %d is invalid, max allowed = %d\n",
+				__func__, userarg32.payload_size,
+				LISTEN_MAX_STATUS_PAYLOAD_SIZE);
+			return -EINVAL;
+		}
+
+		size = sizeof(*user) + userarg32.payload_size;
+		user = kmalloc(size, GFP_KERNEL);
+		if (!user) {
+			dev_err(rtd->dev,
+				"%s: Allocation failed event status size %d\n",
+				__func__, size);
+			return -EFAULT;
+		}
+		cmd = SNDRV_LSM_EVENT_STATUS_V3;
+		user->payload_size = userarg32.payload_size;
+		err = msm_lsm_ioctl_shared(substream, cmd, user);
+
+		/* Update size with actual payload size */
+		size = sizeof(userarg32) + user->payload_size;
+		if (!err && !access_ok(VERIFY_WRITE, arg, size)) {
+			dev_err(rtd->dev,
+				"%s: write verify failed size %d\n",
+				__func__, size);
+			err = -EFAULT;
+		}
+		if (!err) {
+			user32 = kmalloc(size, GFP_KERNEL);
+			if (!user32) {
+				dev_err(rtd->dev,
+					"%s: Allocation event user status size %d\n",
+					__func__, size);
+				err = -EFAULT;
+			} else {
+				user32->timestamp_lsw = user->timestamp_lsw;
+				user32->timestamp_msw = user->timestamp_msw;
+				user32->status = user->status;
+				user32->payload_size = user->payload_size;
+				memcpy(user32->payload,
+				user->payload, user32->payload_size);
+			}
+		}
+		if (!err && (copy_to_user(arg, user32, size))) {
+			dev_err(rtd->dev, "%s: failed to copy payload %d",
+				__func__, size);
+			err = -EFAULT;
+		}
+		kfree(user);
+		kfree(user32);
+		if (err)
+			dev_err(rtd->dev, "%s: lsmevent failed %d",
+				__func__, err);
+		break;
+	}
+
 	case SNDRV_LSM_REG_SND_MODEL_V2_32: {
 		struct snd_lsm_sound_model_v2_32 snd_modelv232;
 		struct snd_lsm_sound_model_v2 snd_modelv2;
@@ -1201,7 +1437,8 @@
 			dev_err(rtd->dev,
 				"%s: %s: not supported if using topology\n",
 				__func__, "REG_SND_MODEL_V2");
-			return -EINVAL;
+			err = -EINVAL;
+			goto done;
 		}
 
 		if (copy_from_user(&snd_modelv232, arg,
@@ -1242,7 +1479,7 @@
 			dev_err(rtd->dev,
 				"%s: %s: not supported if using topology\n",
 				__func__, "SET_PARAMS_32");
-			return -EINVAL;
+			err = -EINVAL;
 		}
 
 		if (copy_from_user(&det_params32, arg,
@@ -1285,14 +1522,8 @@
 			dev_err(rtd->dev,
 				"%s: %s: not supported if not using topology\n",
 				__func__, "SET_MODULE_PARAMS_32");
-			return -EINVAL;
-		}
-
-		if (!arg) {
-			dev_err(rtd->dev,
-				"%s: %s: No Param data to set\n",
-				__func__, "SET_MODULE_PARAMS_32");
-			return -EINVAL;
+			err = -EINVAL;
+			goto done;
 		}
 
 		if (copy_from_user(&p_data_32, arg,
@@ -1301,7 +1532,8 @@
 				"%s: %s: copy_from_user failed, size = %zd\n",
 				__func__, "SET_MODULE_PARAMS_32",
 				sizeof(p_data_32));
-			return -EFAULT;
+			err = -EFAULT;
+			goto done;
 		}
 
 		p_data.params = compat_ptr(p_data_32.params);
@@ -1313,7 +1545,8 @@
 				"%s: %s: Invalid num_params %d\n",
 				__func__, "SET_MODULE_PARAMS_32",
 				p_data.num_params);
-			return -EINVAL;
+			err = -EINVAL;
+			goto done;
 		}
 
 		if (p_data.data_size !=
@@ -1322,15 +1555,18 @@
 				"%s: %s: Invalid size %d\n",
 				__func__, "SET_MODULE_PARAMS_32",
 				p_data.data_size);
-			return -EINVAL;
+			err = -EINVAL;
+			goto done;
 		}
 
 		p_size = sizeof(struct lsm_params_info_32) *
 			 p_data.num_params;
 
 		params32 = kzalloc(p_size, GFP_KERNEL);
-		if (!params32)
-			return -ENOMEM;
+		if (!params32) {
+			err = -ENOMEM;
+			goto done;
+		}
 
 		p_size = sizeof(struct lsm_params_info) * p_data.num_params;
 		params = kzalloc(p_size, GFP_KERNEL);
@@ -1339,7 +1575,8 @@
 				"%s: no memory for params, size = %zd\n",
 				__func__, p_size);
 			kfree(params32);
-			return -ENOMEM;
+			err = -ENOMEM;
+			goto done;
 		}
 
 		if (copy_from_user(params32, p_data.params,
@@ -1349,7 +1586,8 @@
 				__func__, "params32", p_data.data_size);
 			kfree(params32);
 			kfree(params);
-			return -EFAULT;
+			err = -EFAULT;
+			goto done;
 		}
 
 		p_info_32 = (struct lsm_params_info_32 *) params32;
@@ -1375,10 +1613,25 @@
 		kfree(params32);
 		break;
 	}
+	case SNDRV_LSM_REG_SND_MODEL_V2:
+	case SNDRV_LSM_SET_PARAMS:
+	case SNDRV_LSM_SET_MODULE_PARAMS:
+		/*
+		 * In ideal cases, the compat_ioctl should never be called
+		 * with the above unlocked ioctl commands. Print error
+		 * and return error if it does.
+		 */
+		dev_err(rtd->dev,
+			"%s: Invalid cmd for compat_ioctl\n",
+			__func__);
+		err = -EINVAL;
+		break;
 	default:
 		err = msm_lsm_ioctl_shared(substream, cmd, arg);
 		break;
 	}
+done:
+	mutex_unlock(&prtd->lsm_api_lock);
 	return err;
 }
 #else
@@ -1390,7 +1643,6 @@
 {
 	int err = 0;
 	u32 size = 0;
-	struct snd_lsm_session_data session_data;
 	struct snd_pcm_runtime *runtime;
 	struct snd_soc_pcm_runtime *rtd;
 	struct lsm_priv *prtd;
@@ -1404,27 +1656,8 @@
 	prtd = runtime->private_data;
 	rtd = substream->private_data;
 
+	mutex_lock(&prtd->lsm_api_lock);
 	switch (cmd) {
-	case SNDRV_LSM_SET_SESSION_DATA:
-		dev_dbg(rtd->dev,
-			"%s: SNDRV_LSM_SET_SESSION_DATA\n",
-			__func__);
-		if (copy_from_user(&session_data, (void *)arg,
-				   sizeof(struct snd_lsm_session_data))) {
-			err = -EFAULT;
-			dev_err(rtd->dev,
-				"%s: copy from user failed, size %zd\n",
-				__func__, sizeof(struct snd_lsm_session_data));
-			break;
-		}
-		if (!err)
-			err = msm_lsm_ioctl_shared(substream,
-						   cmd, &session_data);
-		if (err)
-			dev_err(rtd->dev,
-				"%s REG_SND_MODEL failed err %d\n",
-				__func__, err);
-		break;
 	case SNDRV_LSM_REG_SND_MODEL_V2: {
 		struct snd_lsm_sound_model_v2 snd_model_v2;
 
@@ -1432,14 +1665,10 @@
 			dev_err(rtd->dev,
 				"%s: %s: not supported if using topology\n",
 				__func__, "REG_SND_MODEL_V2");
-			return -EINVAL;
+			err = -EINVAL;
+			goto done;
 		}
 
-		if (!arg) {
-			dev_err(rtd->dev,
-				"%s: Invalid params snd_model\n", __func__);
-			return -EINVAL;
-		}
 		if (copy_from_user(&snd_model_v2, arg, sizeof(snd_model_v2))) {
 			err = -EFAULT;
 			dev_err(rtd->dev,
@@ -1464,16 +1693,11 @@
 			dev_err(rtd->dev,
 				"%s: %s: not supported if using topology\n",
 				__func__, "SET_PARAMS");
-			return -EINVAL;
+			err = -EINVAL;
+			goto done;
 		}
 
 		pr_debug("%s: SNDRV_LSM_SET_PARAMS\n", __func__);
-		if (!arg) {
-			dev_err(rtd->dev,
-				"%s: %s, Invalid params\n",
-				__func__, "SNDRV_LSM_SET_PARAMS");
-			return -EINVAL;
-		}
 
 		if (copy_from_user(&det_params, arg,
 				   sizeof(det_params))) {
@@ -1491,7 +1715,8 @@
 			dev_err(rtd->dev,
 				"%s: LSM_SET_PARAMS failed, err %d\n",
 				__func__, err);
-		return err;
+
+		goto done;
 	}
 
 	case SNDRV_LSM_SET_MODULE_PARAMS: {
@@ -1503,14 +1728,8 @@
 			dev_err(rtd->dev,
 				"%s: %s: not supported if not using topology\n",
 				__func__, "SET_MODULE_PARAMS");
-			return -EINVAL;
-		}
-
-		if (!arg) {
-			dev_err(rtd->dev,
-				"%s: %s: No Param data to set\n",
-				__func__, "SET_MODULE_PARAMS");
-			return -EINVAL;
+			err = -EINVAL;
+			goto done;
 		}
 
 		if (copy_from_user(&p_data, arg,
@@ -1518,7 +1737,8 @@
 			dev_err(rtd->dev,
 				"%s: %s: copy_from_user failed, size = %zd\n",
 				__func__, "p_data", sizeof(p_data));
-			return -EFAULT;
+			err = -EFAULT;
+			goto done;
 		}
 
 		if (p_data.num_params > LSM_PARAMS_MAX) {
@@ -1526,7 +1746,8 @@
 				"%s: %s: Invalid num_params %d\n",
 				__func__, "SET_MODULE_PARAMS",
 				p_data.num_params);
-			return -EINVAL;
+			err = -EINVAL;
+			goto done;
 		}
 
 		p_size = p_data.num_params *
@@ -1537,12 +1758,15 @@
 				"%s: %s: Invalid size %zd\n",
 				__func__, "SET_MODULE_PARAMS", p_size);
 
-			return -EFAULT;
+			err = -EFAULT;
+			goto done;
 		}
 
 		params = kzalloc(p_size, GFP_KERNEL);
-		if (!params)
-			return -ENOMEM;
+		if (!params) {
+			err = -ENOMEM;
+			goto done;
+		}
 
 		if (copy_from_user(params, p_data.params,
 				   p_data.data_size)) {
@@ -1550,7 +1774,8 @@
 				"%s: %s: copy_from_user failed, size = %d\n",
 				__func__, "params", p_data.data_size);
 			kfree(params);
-			return -EFAULT;
+			err = -EFAULT;
+			goto done;
 		}
 
 		err = msm_lsm_process_params(substream, &p_data, params);
@@ -1567,15 +1792,72 @@
 
 		dev_dbg(rtd->dev,
 			"%s: SNDRV_LSM_EVENT_STATUS\n", __func__);
+		if (copy_from_user(&userarg, arg, sizeof(userarg))) {
+			dev_err(rtd->dev,
+				"%s: err copyuser event_status\n",
+				__func__);
+			err = -EFAULT;
+			goto done;
+		}
+
+		if (userarg.payload_size >
+		    LISTEN_MAX_STATUS_PAYLOAD_SIZE) {
+			pr_err("%s: payload_size %d is invalid, max allowed = %d\n",
+				__func__, userarg.payload_size,
+				LISTEN_MAX_STATUS_PAYLOAD_SIZE);
+			err = -EINVAL;
+			goto done;
+		}
+
+		size = sizeof(struct snd_lsm_event_status) +
+		userarg.payload_size;
+		user = kmalloc(size, GFP_KERNEL);
+		if (!user) {
+			dev_err(rtd->dev,
+				"%s: Allocation failed event status size %d\n",
+				__func__, size);
+			err = -EFAULT;
+			goto done;
+		}
+		user->payload_size = userarg.payload_size;
+		err = msm_lsm_ioctl_shared(substream, cmd, user);
+
+		/* Update size with actual payload size */
+		size = sizeof(*user) + user->payload_size;
+		if (!err && !access_ok(VERIFY_WRITE, arg, size)) {
+			dev_err(rtd->dev,
+				"%s: write verify failed size %d\n",
+				__func__, size);
+			err = -EFAULT;
+		}
+		if (!err && (copy_to_user(arg, user, size))) {
+			dev_err(rtd->dev,
+				"%s: failed to copy payload %d",
+				__func__, size);
+			err = -EFAULT;
+		}
+		kfree(user);
+		if (err)
+			dev_err(rtd->dev,
+				"%s: lsmevent failed %d", __func__, err);
+		goto done;
+	}
+
+	case SNDRV_LSM_EVENT_STATUS_V3: {
+		struct snd_lsm_event_status_v3 *user = NULL;
+		struct snd_lsm_event_status_v3 userarg;
+
+		dev_dbg(rtd->dev,
+			"%s: SNDRV_LSM_EVENT_STATUS_V3\n", __func__);
 		if (!arg) {
 			dev_err(rtd->dev,
-				"%s: Invalid params event status\n",
+				"%s: Invalid params event_status_v3\n",
 				__func__);
 			return -EINVAL;
 		}
 		if (copy_from_user(&userarg, arg, sizeof(userarg))) {
 			dev_err(rtd->dev,
-				"%s: err copyuser event_status\n",
+				"%s: err copyuser event_status_v3\n",
 				__func__);
 			return -EFAULT;
 		}
@@ -1588,8 +1870,8 @@
 			return -EINVAL;
 		}
 
-		size = sizeof(struct snd_lsm_event_status) +
-		userarg.payload_size;
+		size = sizeof(struct snd_lsm_event_status_v3) +
+			userarg.payload_size;
 		user = kmalloc(size, GFP_KERNEL);
 		if (!user) {
 			dev_err(rtd->dev,
@@ -1617,13 +1899,16 @@
 		kfree(user);
 		if (err)
 			dev_err(rtd->dev,
-				"%s: lsmevent failed %d", __func__, err);
-		return err;
+				"%s: lsm_event_v3 failed %d", __func__, err);
+		break;
 	}
+
 	default:
 		err = msm_lsm_ioctl_shared(substream, cmd, arg);
 	break;
 	}
+done:
+	mutex_unlock(&prtd->lsm_api_lock);
 	return err;
 }
 
@@ -1640,6 +1925,7 @@
 		       __func__);
 		return -ENOMEM;
 	}
+	mutex_init(&prtd->lsm_api_lock);
 	spin_lock_init(&prtd->event_lock);
 	init_waitqueue_head(&prtd->event_wait);
 	init_waitqueue_head(&prtd->period_wait);
@@ -1687,6 +1973,11 @@
 		return -ENOMEM;
 	}
 	prtd->lsm_client->opened = false;
+	prtd->lsm_client->session_state = IDLE;
+	prtd->lsm_client->poll_enable = true;
+	prtd->lsm_client->perf_mode = 0;
+	prtd->lsm_client->event_mode = LSM_EVENT_NON_TIME_STAMP_MODE;
+
 	return 0;
 }
 
@@ -1695,6 +1986,7 @@
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	struct lsm_priv *prtd = runtime->private_data;
 	struct snd_soc_pcm_runtime *rtd;
+	int ret = 0;
 
 	if (!substream->private_data) {
 		pr_err("%s: Invalid private_data", __func__);
@@ -1708,9 +2000,30 @@
 			"%s: LSM client data ptr is NULL\n", __func__);
 		return -EINVAL;
 	}
+
+	if (q6lsm_set_media_fmt_params(prtd->lsm_client))
+		dev_dbg(rtd->dev,
+			"%s: failed to set lsm media fmt params\n", __func__);
+
+	if (prtd->lsm_client->session_state == IDLE) {
+		ret = msm_pcm_routing_reg_phy_compr_stream(
+				rtd->dai_link->id,
+				prtd->lsm_client->perf_mode,
+				prtd->lsm_client->session,
+				SNDRV_PCM_STREAM_CAPTURE,
+				LISTEN);
+		if (ret) {
+			dev_err(rtd->dev,
+				"%s: register phy compr stream failed %d\n",
+					__func__, ret);
+			return ret;
+		}
+	}
+
+	prtd->lsm_client->session_state = RUNNING;
 	prtd->lsm_client->started = false;
 	runtime->private_data = prtd;
-	return 0;
+	return ret;
 }
 
 static int msm_lsm_close(struct snd_pcm_substream *substream)
@@ -1759,6 +2072,9 @@
 				 __func__);
 	}
 
+	msm_pcm_routing_dereg_phy_stream(rtd->dai_link->id,
+					SNDRV_PCM_STREAM_CAPTURE);
+
 	if (prtd->lsm_client->opened) {
 		q6lsm_close(prtd->lsm_client);
 		prtd->lsm_client->opened = false;
@@ -1769,6 +2085,7 @@
 	kfree(prtd->event_status);
 	prtd->event_status = NULL;
 	spin_unlock_irqrestore(&prtd->event_lock, flags);
+	mutex_destroy(&prtd->lsm_api_lock);
 	kfree(prtd);
 	runtime->private_data = NULL;
 
@@ -1780,7 +2097,7 @@
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	struct lsm_priv *prtd = runtime->private_data;
-	struct lsm_lab_hw_params *hw_params = NULL;
+	struct lsm_hw_params *hw_params = NULL;
 	struct snd_soc_pcm_runtime *rtd;
 
 	if (!substream->private_data) {
@@ -1796,25 +2113,36 @@
 		return -EINVAL;
 	}
 	hw_params = &prtd->lsm_client->hw_params;
-	hw_params->sample_rate = params_rate(params);
-	hw_params->sample_size =
-	(params_format(params) == SNDRV_PCM_FORMAT_S16_LE) ? 16 : 0;
+	hw_params->num_chs = params_channels(params);
 	hw_params->period_count = params_periods(params);
-	if (hw_params->sample_rate != 16000 || hw_params->sample_size != 16 ||
-		hw_params->period_count == 0) {
+	hw_params->sample_rate = params_rate(params);
+	if (((hw_params->sample_rate != 16000) &&
+		(hw_params->sample_rate != 48000)) ||
+		(hw_params->period_count == 0)) {
 		dev_err(rtd->dev,
-			"%s: Invalid params sample rate %d sample size %d period count %d",
+			"%s: Invalid Params sample rate %d period count %d\n",
 			__func__, hw_params->sample_rate,
-			hw_params->sample_size,
-		hw_params->period_count);
+			hw_params->period_count);
 		return -EINVAL;
 	}
+
+	if (params_format(params) == SNDRV_PCM_FORMAT_S16_LE) {
+		hw_params->sample_size = 16;
+	} else if (params_format(params) == SNDRV_PCM_FORMAT_S24_LE) {
+		hw_params->sample_size = 24;
+	} else {
+		dev_err(rtd->dev, "%s: Invalid Format 0x%x\n",
+			__func__, params_format(params));
+		return -EINVAL;
+	}
+
 	hw_params->buf_sz = params_buffer_bytes(params) /
-	hw_params->period_count;
+			hw_params->period_count;
 	dev_dbg(rtd->dev,
-		"%s: sample rate %d sample size %d buffer size %d period count %d\n",
-		__func__, hw_params->sample_rate, hw_params->sample_size,
-		hw_params->buf_sz, hw_params->period_count);
+		"%s: channels %d sample rate %d sample size %d buffer size %d period count %d\n",
+		__func__, hw_params->num_chs, hw_params->sample_rate,
+		hw_params->sample_size, hw_params->buf_sz,
+		hw_params->period_count);
 	return 0;
 }
 
@@ -1910,6 +2238,105 @@
 	return 0;
 }
 
+static int msm_lsm_app_type_cfg_ctl_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_TX;
+	int be_id = ucontrol->value.integer.value[3];
+	int ret = 0;
+	int app_type;
+	int acdb_dev_id;
+	int sample_rate;
+
+	app_type = ucontrol->value.integer.value[0];
+	acdb_dev_id = ucontrol->value.integer.value[1];
+	sample_rate = ucontrol->value.integer.value[2];
+
+	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+		__func__, fe_id, session_type, be_id,
+		app_type, acdb_dev_id, sample_rate);
+	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+						      be_id, app_type,
+						      acdb_dev_id, sample_rate);
+	if (ret < 0)
+		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+			__func__, ret);
+
+	return 0;
+}
+
+static int msm_lsm_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_TX;
+	int be_id = ucontrol->value.integer.value[3];
+	int ret = 0;
+	int app_type;
+	int acdb_dev_id;
+	int sample_rate;
+
+	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+						      be_id, &app_type,
+						      &acdb_dev_id,
+						      &sample_rate);
+	if (ret < 0) {
+		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	ucontrol->value.integer.value[0] = app_type;
+	ucontrol->value.integer.value[1] = acdb_dev_id;
+	ucontrol->value.integer.value[2] = sample_rate;
+	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fe_id, session_type, be_id,
+		app_type, acdb_dev_id, sample_rate);
+done:
+	return ret;
+}
+
+static int msm_lsm_add_app_type_controls(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_pcm *pcm = rtd->pcm;
+	struct snd_pcm_usr *app_type_info;
+	struct snd_kcontrol *kctl;
+	const char *mixer_ctl_name	= "Listen Stream";
+	const char *deviceNo		= "NN";
+	const char *suffix		= "App Type Cfg";
+	int ctl_len, ret = 0;
+
+	ctl_len = strlen(mixer_ctl_name) + 1 +
+			strlen(deviceNo) + 1 + strlen(suffix) + 1;
+	pr_debug("%s: Listen app type cntrl add\n", __func__);
+	ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_CAPTURE,
+				NULL, 1, ctl_len, rtd->dai_link->id,
+				&app_type_info);
+	if (ret < 0) {
+		pr_err("%s: Listen app type cntrl add failed: %d\n",
+			__func__, ret);
+		return ret;
+	}
+	kctl = app_type_info->kctl;
+	snprintf(kctl->id.name, ctl_len, "%s %d %s",
+		mixer_ctl_name, rtd->pcm->device, suffix);
+	kctl->put = msm_lsm_app_type_cfg_ctl_put;
+	kctl->get = msm_lsm_app_type_cfg_ctl_get;
+	return 0;
+}
+
+static int msm_lsm_add_controls(struct snd_soc_pcm_runtime *rtd)
+{
+	int ret = 0;
+
+	ret = msm_lsm_add_app_type_controls(rtd);
+	if (ret)
+		pr_err("%s, add  app type controls failed:%d\n", __func__, ret);
+
+	return ret;
+}
+
 static const struct snd_pcm_ops msm_lsm_ops = {
 	.open           = msm_lsm_open,
 	.close          = msm_lsm_close,
@@ -1924,11 +2351,16 @@
 static int msm_asoc_lsm_new(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_card *card = rtd->card->snd_card;
+	int ret = 0;
 
 	if (!card->dev->coherent_dma_mask)
 		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
 
-	return 0;
+	ret = msm_lsm_add_controls(rtd);
+	if (ret)
+		pr_err("%s, kctl add failed:%d\n", __func__, ret);
+
+	return ret;
 }
 
 static int msm_asoc_lsm_probe(struct snd_soc_platform *platform)
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
index 8d43186..ab9b310 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
@@ -682,6 +682,7 @@
 	mutex_unlock(&prtd->lock);
 	prtd->prepared--;
 	kfree(prtd);
+	runtime->private_data = NULL;
 	return 0;
 }
 static int msm_afe_prepare(struct snd_pcm_substream *substream)
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c
index 15809ce..f668e95 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c
@@ -25,6 +25,7 @@
 #include <sound/control.h>
 #include <sound/tlv.h>
 #include <asm/dma.h>
+#include <sound/q6audio-v2.h>
 
 #include "msm-pcm-routing-v2.h"
 
@@ -67,6 +68,10 @@
 
 static u32 hfp_tx_mute;
 
+struct msm_pcm_pdata {
+	int perf_mode;
+};
+
 static void stop_pcm(struct msm_pcm_loopback *pcm);
 static int msm_pcm_loopback_get_session(struct snd_soc_pcm_runtime *rtd,
 					struct msm_pcm_loopback **pcm);
@@ -245,6 +250,7 @@
 	struct msm_pcm_routing_evt event;
 	struct asm_session_mtmx_strtr_param_window_v2_t asm_mtmx_strtr_window;
 	uint32_t param_id;
+	struct msm_pcm_pdata *pdata;
 
 	ret =  msm_pcm_loopback_get_session(rtd, &pcm);
 	if (ret)
@@ -270,6 +276,15 @@
 		if (pcm->audio_client != NULL)
 			stop_pcm(pcm);
 
+		pdata = (struct msm_pcm_pdata *)
+			dev_get_drvdata(rtd->platform->dev);
+		if (!pdata) {
+			dev_err(rtd->platform->dev,
+				"%s: platform data not populated\n", __func__);
+			mutex_unlock(&pcm->lock);
+			return -EINVAL;
+		}
+
 		pcm->audio_client = q6asm_audio_client_alloc(
 				(app_cb)msm_pcm_loopback_event_handler, pcm);
 		if (!pcm->audio_client) {
@@ -279,7 +294,7 @@
 			return -ENOMEM;
 		}
 		pcm->session_id = pcm->audio_client->session;
-		pcm->audio_client->perf_mode = false;
+		pcm->audio_client->perf_mode = pdata->perf_mode;
 		ret = q6asm_open_loopback_v2(pcm->audio_client,
 					     bits_per_sample);
 		if (ret < 0) {
@@ -542,48 +557,45 @@
 					struct snd_ctl_elem_value *ucontrol)
 {
 	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_RX;
+	int be_id = ucontrol->value.integer.value[3];
+	int ret = 0;
 	int app_type;
 	int acdb_dev_id;
 	int sample_rate = 48000;
 
-	pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
-	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
-		pr_err("%s: Received out of bounds fe_id %llu\n",
-			__func__, fe_id);
-		return -EINVAL;
-	}
-
 	app_type = ucontrol->value.integer.value[0];
 	acdb_dev_id = ucontrol->value.integer.value[1];
 	if (ucontrol->value.integer.value[2] != 0)
 		sample_rate = ucontrol->value.integer.value[2];
-	pr_debug("%s: app_type- %d acdb_dev_id- %d sample_rate- %d session_type- %d\n",
-		__func__, app_type, acdb_dev_id, sample_rate, SESSION_TYPE_RX);
-	msm_pcm_routing_reg_stream_app_type_cfg(fe_id, app_type,
-			acdb_dev_id, sample_rate, SESSION_TYPE_RX);
+	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+		__func__, fe_id, session_type, be_id,
+		app_type, acdb_dev_id, sample_rate);
+	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+						      be_id, app_type,
+						      acdb_dev_id, sample_rate);
+	if (ret < 0)
+		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+			__func__, ret);
 
-	return 0;
+	return ret;
 }
 
 static int msm_pcm_playback_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
 					struct snd_ctl_elem_value *ucontrol)
 {
 	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_RX;
+	int be_id = ucontrol->value.integer.value[3];
 	int ret = 0;
 	int app_type;
 	int acdb_dev_id;
 	int sample_rate;
 
-	pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
-	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
-		pr_err("%s: Received out of bounds fe_id %llu\n",
-			__func__, fe_id);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, SESSION_TYPE_RX,
-		&app_type, &acdb_dev_id, &sample_rate);
+	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+						      be_id, &app_type,
+						      &acdb_dev_id,
+						      &sample_rate);
 	if (ret < 0) {
 		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
 			__func__, ret);
@@ -593,8 +605,8 @@
 	ucontrol->value.integer.value[0] = app_type;
 	ucontrol->value.integer.value[1] = acdb_dev_id;
 	ucontrol->value.integer.value[2] = sample_rate;
-	pr_debug("%s: fedai_id %llu, session_type %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
-		__func__, fe_id, SESSION_TYPE_RX,
+	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fe_id, session_type, be_id,
 		app_type, acdb_dev_id, sample_rate);
 done:
 	return ret;
@@ -604,48 +616,45 @@
 					struct snd_ctl_elem_value *ucontrol)
 {
 	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_TX;
+	int be_id = ucontrol->value.integer.value[3];
+	int ret = 0;
 	int app_type;
 	int acdb_dev_id;
 	int sample_rate = 48000;
 
-	pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
-	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
-		pr_err("%s: Received out of bounds fe_id %llu\n",
-			__func__, fe_id);
-		return -EINVAL;
-	}
-
 	app_type = ucontrol->value.integer.value[0];
 	acdb_dev_id = ucontrol->value.integer.value[1];
 	if (ucontrol->value.integer.value[2] != 0)
 		sample_rate = ucontrol->value.integer.value[2];
-	pr_debug("%s: app_type- %d acdb_dev_id- %d sample_rate- %d session_type- %d\n",
-		__func__, app_type, acdb_dev_id, sample_rate, SESSION_TYPE_TX);
-	msm_pcm_routing_reg_stream_app_type_cfg(fe_id, app_type,
-			acdb_dev_id, sample_rate, SESSION_TYPE_TX);
+	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+		__func__, fe_id, session_type, be_id,
+		app_type, acdb_dev_id, sample_rate);
+	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+						      be_id, app_type,
+						      acdb_dev_id, sample_rate);
+	if (ret < 0)
+		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+			__func__, ret);
 
-	return 0;
+	return ret;
 }
 
 static int msm_pcm_capture_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
 					struct snd_ctl_elem_value *ucontrol)
 {
 	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_TX;
+	int be_id = ucontrol->value.integer.value[3];
 	int ret = 0;
 	int app_type;
 	int acdb_dev_id;
 	int sample_rate;
 
-	pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
-	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
-		pr_err("%s: Received out of bounds fe_id %llu\n",
-			__func__, fe_id);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, SESSION_TYPE_TX,
-		&app_type, &acdb_dev_id, &sample_rate);
+	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+						      be_id, &app_type,
+						      &acdb_dev_id,
+						      &sample_rate);
 	if (ret < 0) {
 		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
 			__func__, ret);
@@ -655,8 +664,8 @@
 	ucontrol->value.integer.value[0] = app_type;
 	ucontrol->value.integer.value[1] = acdb_dev_id;
 	ucontrol->value.integer.value[2] = sample_rate;
-	pr_debug("%s: fedai_id %llu, session_type %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
-		__func__, fe_id, SESSION_TYPE_TX,
+	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fe_id, session_type, be_id,
 		app_type, acdb_dev_id, sample_rate);
 done:
 	return ret;
@@ -746,9 +755,23 @@
 
 static int msm_pcm_probe(struct platform_device *pdev)
 {
+	struct msm_pcm_pdata *pdata;
+
 	dev_dbg(&pdev->dev, "%s: dev name %s\n",
 		__func__, dev_name(&pdev->dev));
 
+	pdata = kzalloc(sizeof(struct msm_pcm_pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	if (of_property_read_bool(pdev->dev.of_node,
+				"qcom,msm-pcm-loopback-low-latency"))
+		pdata->perf_mode = LOW_LATENCY_PCM_MODE;
+	else
+		pdata->perf_mode = LEGACY_PCM_MODE;
+
+	dev_set_drvdata(&pdev->dev, pdata);
+
 	return snd_soc_register_platform(&pdev->dev,
 				   &msm_soc_platform);
 }
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
index ecf194f..9b7c6fb 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
@@ -139,6 +139,17 @@
 	.mask = 0,
 };
 
+static unsigned long msm_pcm_fe_topology[MSM_FRONTEND_DAI_MAX];
+
+/* default value is DTS (i.e read from device tree) */
+static char const *msm_pcm_fe_topology_text[] = {
+	"DTS", "ULL", "ULL_PP", "LL" };
+
+static const struct soc_enum msm_pcm_fe_topology_enum[] = {
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(msm_pcm_fe_topology_text),
+			    msm_pcm_fe_topology_text),
+};
+
 static void event_handler(uint32_t opcode,
 		uint32_t token, uint32_t *payload, void *priv)
 {
@@ -258,6 +269,8 @@
 	uint16_t bits_per_sample;
 	int ret;
 	int dir = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? IN : OUT;
+	unsigned long topology;
+	int perf_mode;
 
 	pdata = (struct msm_plat_data *)
 		dev_get_drvdata(soc_prtd->platform->dev);
@@ -268,11 +281,24 @@
 		return ret;
 	}
 
+	topology = msm_pcm_fe_topology[soc_prtd->dai_link->id];
+
+	if (!strcmp(msm_pcm_fe_topology_text[topology], "ULL_PP"))
+		perf_mode = ULL_POST_PROCESSING_PCM_MODE;
+	else if (!strcmp(msm_pcm_fe_topology_text[topology], "ULL"))
+		perf_mode = ULTRA_LOW_LATENCY_PCM_MODE;
+	else if (!strcmp(msm_pcm_fe_topology_text[topology], "LL"))
+		perf_mode = LOW_LATENCY_PCM_MODE;
+	else
+		/* use the default from the device tree */
+		perf_mode = pdata->perf_mode;
+
+
 	/* need to set LOW_LATENCY_PCM_MODE for capture since
 	 * push mode does not support ULL
 	 */
 	prtd->audio_client->perf_mode = (dir == IN) ?
-					pdata->perf_mode :
+					perf_mode :
 					LOW_LATENCY_PCM_MODE;
 
 	/* rate and channels are sent to audio driver */
@@ -544,6 +570,8 @@
 					 SNDRV_PCM_STREAM_PLAYBACK :
 					 SNDRV_PCM_STREAM_CAPTURE);
 	kfree(prtd);
+	runtime->private_data = NULL;
+
 	return 0;
 }
 
@@ -721,6 +749,269 @@
 	return 0;
 }
 
+static int msm_pcm_fe_topology_info(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_info *uinfo)
+{
+	const struct soc_enum *e = &msm_pcm_fe_topology_enum[0];
+
+	return snd_ctl_enum_info(uinfo, 1, e->items, e->texts);
+}
+
+static int msm_pcm_fe_topology_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	unsigned long fe_id = kcontrol->private_value;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received out of bound fe_id %lu\n", __func__, fe_id);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: %lu topology %s\n", __func__, fe_id,
+		 msm_pcm_fe_topology_text[msm_pcm_fe_topology[fe_id]]);
+	ucontrol->value.enumerated.item[0] = msm_pcm_fe_topology[fe_id];
+	return 0;
+}
+
+static int msm_pcm_fe_topology_put(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	unsigned long fe_id = kcontrol->private_value;
+	unsigned int item;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received out of bound fe_id %lu\n", __func__, fe_id);
+		return -EINVAL;
+	}
+
+	item = ucontrol->value.enumerated.item[0];
+	if (item >= ARRAY_SIZE(msm_pcm_fe_topology_text)) {
+		pr_err("%s Received out of bound topology %lu\n", __func__,
+		       fe_id);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: %lu new topology %s\n", __func__, fe_id,
+		 msm_pcm_fe_topology_text[item]);
+	msm_pcm_fe_topology[fe_id] = item;
+	return 0;
+}
+
+static int msm_pcm_add_fe_topology_control(struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = "PCM_Dev";
+	const char *deviceNo       = "NN";
+	const char *topo_text      = "Topology";
+	char *mixer_str = NULL;
+	int ctl_len;
+	int ret;
+	struct snd_kcontrol_new topology_control[1] = {
+		{
+			.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+			.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+			.name =  "?",
+			.info =  msm_pcm_fe_topology_info,
+			.get = msm_pcm_fe_topology_get,
+			.put = msm_pcm_fe_topology_put,
+			.private_value = 0,
+		},
+	};
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1 +
+		  strlen(topo_text) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+
+	if (!mixer_str)
+		return -ENOMEM;
+
+	snprintf(mixer_str, ctl_len, "%s %d %s", mixer_ctl_name,
+		 rtd->pcm->device, topo_text);
+
+	topology_control[0].name = mixer_str;
+	topology_control[0].private_value = rtd->dai_link->id;
+	ret = snd_soc_add_platform_controls(rtd->platform, topology_control,
+					    ARRAY_SIZE(topology_control));
+	msm_pcm_fe_topology[rtd->dai_link->id] = 0;
+	kfree(mixer_str);
+	return ret;
+}
+
+static int msm_pcm_playback_app_type_cfg_ctl_put(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_RX;
+	int be_id = ucontrol->value.integer.value[3];
+	int ret = 0;
+	int app_type;
+	int acdb_dev_id;
+	int sample_rate = 48000;
+
+	app_type = ucontrol->value.integer.value[0];
+	acdb_dev_id = ucontrol->value.integer.value[1];
+	if (ucontrol->value.integer.value[2] != 0)
+		sample_rate = ucontrol->value.integer.value[2];
+
+	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+						      be_id, app_type,
+						      acdb_dev_id, sample_rate);
+	if (ret < 0)
+		pr_err("%s: msm_pcm_playback_app_type_cfg_ctl_put failed, err %d\n",
+			__func__, ret);
+
+	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+		__func__, fe_id, session_type, be_id,
+		app_type, acdb_dev_id, sample_rate);
+	return ret;
+}
+
+static int msm_pcm_playback_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_RX;
+	int be_id = ucontrol->value.integer.value[3];
+	int ret = 0;
+	int app_type;
+	int acdb_dev_id;
+	int sample_rate;
+
+	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+						      be_id, &app_type,
+						      &acdb_dev_id,
+						      &sample_rate);
+	if (ret < 0) {
+		pr_err("%s: msm_pcm_playback_app_type_cfg_ctl_get failed, err: %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	ucontrol->value.integer.value[0] = app_type;
+	ucontrol->value.integer.value[1] = acdb_dev_id;
+	ucontrol->value.integer.value[2] = sample_rate;
+
+	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fe_id, session_type, be_id,
+		app_type, acdb_dev_id, sample_rate);
+done:
+	return ret;
+}
+
+static int msm_pcm_capture_app_type_cfg_ctl_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_TX;
+	int be_id = ucontrol->value.integer.value[3];
+	int ret = 0;
+	int app_type;
+	int acdb_dev_id;
+	int sample_rate = 48000;
+
+	app_type = ucontrol->value.integer.value[0];
+	acdb_dev_id = ucontrol->value.integer.value[1];
+	if (ucontrol->value.integer.value[2] != 0)
+		sample_rate = ucontrol->value.integer.value[2];
+
+	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+						      be_id, app_type,
+						      acdb_dev_id, sample_rate);
+	if (ret < 0)
+		pr_err("%s: msm_pcm_capture_app_type_cfg_ctl_put failed, err: %d\n",
+			__func__, ret);
+
+	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+		__func__, fe_id, session_type, be_id,
+		app_type, acdb_dev_id, sample_rate);
+
+	return ret;
+}
+
+static int msm_pcm_capture_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_TX;
+	int be_id = ucontrol->value.integer.value[3];
+	int ret = 0;
+	int app_type;
+	int acdb_dev_id;
+	int sample_rate;
+
+	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+						      be_id, &app_type,
+						      &acdb_dev_id,
+						      &sample_rate);
+	if (ret < 0) {
+		pr_err("%s: msm_pcm_capture_app_type_cfg_ctl_get failed, err: %d\n",
+			__func__, ret);
+		goto done;
+	}
+
+	ucontrol->value.integer.value[0] = app_type;
+	ucontrol->value.integer.value[1] = acdb_dev_id;
+	ucontrol->value.integer.value[2] = sample_rate;
+	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fe_id, session_type, be_id,
+		app_type, acdb_dev_id, sample_rate);
+done:
+	return ret;
+}
+
+static int msm_pcm_add_app_type_controls(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_pcm *pcm = rtd->pcm;
+	struct snd_pcm_usr *app_type_info;
+	struct snd_kcontrol *kctl;
+	const char *playback_mixer_ctl_name = "Audio Stream";
+	const char *capture_mixer_ctl_name = "Audio Stream Capture";
+	const char *deviceNo = "NN";
+	const char *suffix = "App Type Cfg";
+	int ctl_len, ret = 0;
+
+	if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
+		ctl_len = strlen(playback_mixer_ctl_name) + 1 +
+				strlen(deviceNo) + 1 +
+				strlen(suffix) + 1;
+		pr_debug("%s: Playback app type cntrl add\n", __func__);
+		ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+				NULL, 1, ctl_len, rtd->dai_link->id,
+				&app_type_info);
+		if (ret < 0) {
+			pr_err("%s: playback app type cntrl add failed, err: %d\n",
+				__func__, ret);
+			return ret;
+		}
+		kctl = app_type_info->kctl;
+		snprintf(kctl->id.name, ctl_len, "%s %d %s",
+			     playback_mixer_ctl_name, rtd->pcm->device, suffix);
+		kctl->put = msm_pcm_playback_app_type_cfg_ctl_put;
+		kctl->get = msm_pcm_playback_app_type_cfg_ctl_get;
+	}
+
+	if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
+		ctl_len = strlen(capture_mixer_ctl_name) + 1 +
+				strlen(deviceNo) + 1 + strlen(suffix) + 1;
+		pr_debug("%s: Capture app type cntrl add\n", __func__);
+		ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_CAPTURE,
+				NULL, 1, ctl_len, rtd->dai_link->id,
+				&app_type_info);
+		if (ret < 0) {
+			pr_err("%s: capture app type cntrl add failed, err: %d\n",
+				__func__, ret);
+			return ret;
+		}
+		kctl = app_type_info->kctl;
+		snprintf(kctl->id.name, ctl_len, "%s %d %s",
+		 capture_mixer_ctl_name, rtd->pcm->device, suffix);
+		kctl->put = msm_pcm_capture_app_type_cfg_ctl_put;
+		kctl->get = msm_pcm_capture_app_type_cfg_ctl_get;
+	}
+
+	return 0;
+}
+
+
 static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_card *card = rtd->card->snd_card;
@@ -741,6 +1032,19 @@
 		pr_err("%s: Could not add pcm Volume Control %d\n",
 			__func__, ret);
 	}
+
+	ret = msm_pcm_add_fe_topology_control(rtd);
+	if (ret) {
+		pr_err("%s: Could not add pcm topology control %d\n",
+			__func__, ret);
+	}
+
+	ret = msm_pcm_add_app_type_controls(rtd);
+	if (ret) {
+		pr_err("%s: Could not add app type controls failed %d\n",
+			__func__, ret);
+	}
+
 	pcm->nonatomic = true;
 exit:
 	return ret;
@@ -778,8 +1082,12 @@
 
 		rc = of_property_read_string(pdev->dev.of_node,
 			"qcom,latency-level", &latency_level);
-		if (!rc && !strcmp(latency_level, "ultra"))
-			perf_mode = ULTRA_LOW_LATENCY_PCM_MODE;
+		if (!rc) {
+			if (!strcmp(latency_level, "ultra"))
+				perf_mode = ULTRA_LOW_LATENCY_PCM_MODE;
+			else if (!strcmp(latency_level, "ull-pp"))
+				perf_mode = ULL_POST_PROCESSING_PCM_MODE;
+		}
 	}
 
 	pdata = devm_kzalloc(&pdev->dev,
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
index e899f5e..1799d0d 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
@@ -110,7 +110,7 @@
 /* Conventional and unconventional sample rate supported */
 static unsigned int supported_sample_rates[] = {
 	8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000,
-	88200, 96000, 176400, 192000, 384000
+	88200, 96000, 176400, 192000, 352800, 384000
 };
 
 static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
@@ -286,6 +286,7 @@
 	struct msm_plat_data *pdata;
 	struct snd_pcm_hw_params *params;
 	int ret;
+	uint32_t fmt_type = FORMAT_LINEAR_PCM;
 	uint16_t bits_per_sample;
 	uint16_t sample_word_size;
 
@@ -334,38 +335,67 @@
 		sample_word_size = 16;
 		break;
 	}
+	if (prtd->compress_enable) {
+		fmt_type = FORMAT_GEN_COMPR;
+		pr_debug("%s: Compressed enabled!\n", __func__);
+		ret = q6asm_open_write_compressed(prtd->audio_client, fmt_type,
+				COMPRESSED_PASSTHROUGH_GEN);
+		if (ret < 0) {
+			pr_err("%s: q6asm_open_write_compressed failed (%d)\n",
+			__func__, ret);
+			q6asm_audio_client_free(prtd->audio_client);
+			prtd->audio_client = NULL;
+			return -ENOMEM;
+		}
+	} else {
+		ret = q6asm_open_write_v4(prtd->audio_client,
+			fmt_type, bits_per_sample);
 
-	ret = q6asm_open_write_v4(prtd->audio_client,
-				  FORMAT_LINEAR_PCM, bits_per_sample);
+		if (ret < 0) {
+			pr_err("%s: q6asm_open_write_v4 failed (%d)\n",
+			__func__, ret);
+			q6asm_audio_client_free(prtd->audio_client);
+			prtd->audio_client = NULL;
+			return -ENOMEM;
+		}
 
-	if (ret < 0) {
-		pr_err("%s: q6asm_open_write_v2 failed\n", __func__);
-		q6asm_audio_client_free(prtd->audio_client);
-		prtd->audio_client = NULL;
-		return -ENOMEM;
+		ret = q6asm_send_cal(prtd->audio_client);
+		if (ret < 0)
+			pr_debug("%s : Send cal failed : %d", __func__, ret);
 	}
-
-	ret = q6asm_send_cal(prtd->audio_client);
-	if (ret < 0)
-		pr_debug("%s : Send cal failed : %d", __func__, ret);
-
 	pr_debug("%s: session ID %d\n", __func__,
 			prtd->audio_client->session);
 	prtd->session_id = prtd->audio_client->session;
-	ret = msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->id,
+
+	if (prtd->compress_enable) {
+		ret = msm_pcm_routing_reg_phy_compr_stream(
+				soc_prtd->dai_link->id,
+				prtd->audio_client->perf_mode,
+				prtd->session_id,
+				SNDRV_PCM_STREAM_PLAYBACK,
+				COMPRESSED_PASSTHROUGH_GEN);
+	} else {
+		ret = msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->id,
 			prtd->audio_client->perf_mode,
 			prtd->session_id, substream->stream);
+	}
 	if (ret) {
 		pr_err("%s: stream reg failed ret:%d\n", __func__, ret);
 		return ret;
 	}
-
-	ret = q6asm_media_format_block_multi_ch_pcm_v4(
+	if (prtd->compress_enable) {
+		ret = q6asm_media_format_block_gen_compr(
+			prtd->audio_client, runtime->rate,
+			runtime->channels, !prtd->set_channel_map,
+			prtd->channel_map, bits_per_sample);
+	} else {
+		ret = q6asm_media_format_block_multi_ch_pcm_v4(
 				prtd->audio_client, runtime->rate,
 				runtime->channels, !prtd->set_channel_map,
 				prtd->channel_map, bits_per_sample,
 				sample_word_size, ASM_LITTLE_ENDIAN,
 				DEFAULT_QF);
+	}
 	if (ret < 0)
 		pr_info("%s: CMD Format block failed\n", __func__);
 
@@ -424,7 +454,7 @@
 				prtd->audio_client->perf_mode);
 
 		ret = q6asm_open_read_v4(prtd->audio_client, FORMAT_LINEAR_PCM,
-				bits_per_sample);
+				bits_per_sample, false);
 		if (ret < 0) {
 			pr_err("%s: q6asm_open_read failed\n", __func__);
 			q6asm_audio_client_free(prtd->audio_client);
@@ -774,6 +804,8 @@
 	msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->id,
 						SNDRV_PCM_STREAM_PLAYBACK);
 	kfree(prtd);
+	runtime->private_data = NULL;
+
 	return 0;
 }
 
@@ -879,6 +911,7 @@
 	msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->id,
 		SNDRV_PCM_STREAM_CAPTURE);
 	kfree(prtd);
+	runtime->private_data = NULL;
 
 	return 0;
 }
@@ -1091,6 +1124,136 @@
 	return 0;
 }
 
+static int msm_pcm_compress_ctl_info(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = 1;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = 0x2000;
+	return 0;
+}
+
+static int msm_pcm_compress_ctl_get(struct snd_kcontrol *kcontrol,
+		      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_platform *platform = snd_soc_component_to_platform(comp);
+	struct msm_plat_data *pdata = dev_get_drvdata(platform->dev);
+	struct snd_pcm_substream *substream;
+	struct msm_audio *prtd;
+
+	if (!pdata) {
+		pr_err("%s pdata is NULL\n", __func__);
+		return -ENODEV;
+	}
+	substream = pdata->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+	if (!substream) {
+		pr_err("%s substream not found\n", __func__);
+		return -EINVAL;
+	}
+	if (!substream->runtime) {
+		pr_err("%s substream runtime not found\n", __func__);
+		return 0;
+	}
+	prtd = substream->runtime->private_data;
+	if (prtd)
+		ucontrol->value.integer.value[0] = prtd->compress_enable;
+	return 0;
+}
+
+static int msm_pcm_compress_ctl_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int rc = 0;
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_platform *platform = snd_soc_component_to_platform(comp);
+	struct msm_plat_data *pdata = dev_get_drvdata(platform->dev);
+	struct snd_pcm_substream *substream;
+	struct msm_audio *prtd;
+	int compress = ucontrol->value.integer.value[0];
+
+	if (!pdata) {
+		pr_err("%s pdata is NULL\n", __func__);
+		return -ENODEV;
+	}
+	substream = pdata->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+	pr_debug("%s: compress : 0x%x\n", __func__, compress);
+	if (!substream) {
+		pr_err("%s substream not found\n", __func__);
+		return -EINVAL;
+	}
+	if (!substream->runtime) {
+		pr_err("%s substream runtime not found\n", __func__);
+		return 0;
+	}
+	prtd = substream->runtime->private_data;
+	if (prtd) {
+		pr_debug("%s: setting compress flag to 0x%x\n",
+		__func__, compress);
+		prtd->compress_enable = compress;
+	}
+	return rc;
+}
+
+static int msm_pcm_add_compress_control(struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = "Playback ";
+	const char *mixer_ctl_end_name = " Compress";
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len;
+	int ret = 0;
+	struct msm_plat_data *pdata;
+	struct snd_kcontrol_new pcm_compress_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_pcm_compress_ctl_info,
+		.get = msm_pcm_compress_ctl_get,
+		.put = msm_pcm_compress_ctl_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s: NULL rtd\n", __func__);
+		return -EINVAL;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + strlen(deviceNo) +
+		  strlen(mixer_ctl_end_name) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+
+	if (!mixer_str)
+		return -ENOMEM;
+
+	snprintf(mixer_str, ctl_len, "%s%d%s", mixer_ctl_name,
+			rtd->pcm->device, mixer_ctl_end_name);
+
+	pcm_compress_control[0].name = mixer_str;
+	pcm_compress_control[0].private_value = rtd->dai_link->id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	pdata = dev_get_drvdata(rtd->platform->dev);
+	if (pdata) {
+		if (!pdata->pcm) {
+			pdata->pcm = rtd->pcm;
+			snd_soc_add_platform_controls(rtd->platform,
+						      pcm_compress_control,
+						      ARRAY_SIZE
+						      (pcm_compress_control));
+			pr_debug("%s: add control success plt = %pK\n",
+				 __func__, rtd->platform);
+		}
+	} else {
+		pr_err("%s: NULL pdata\n", __func__);
+		ret = -EINVAL;
+	}
+	kfree(mixer_str);
+	return ret;
+}
+
 static int msm_pcm_chmap_ctl_put(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_value *ucontrol)
 {
@@ -1182,48 +1345,45 @@
 					struct snd_ctl_elem_value *ucontrol)
 {
 	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_RX;
+	int be_id = ucontrol->value.integer.value[3];
+	int ret = 0;
 	int app_type;
 	int acdb_dev_id;
 	int sample_rate = 48000;
 
-	pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
-	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
-		pr_err("%s Received out of bounds fe_id %llu\n",
-			__func__, fe_id);
-		return -EINVAL;
-	}
-
 	app_type = ucontrol->value.integer.value[0];
 	acdb_dev_id = ucontrol->value.integer.value[1];
 	if (ucontrol->value.integer.value[2] != 0)
 		sample_rate = ucontrol->value.integer.value[2];
-	pr_debug("%s: app_type- %d acdb_dev_id- %d sample_rate- %d session_type- %d\n",
-		__func__, app_type, acdb_dev_id, sample_rate, SESSION_TYPE_RX);
-	msm_pcm_routing_reg_stream_app_type_cfg(fe_id, app_type,
-			acdb_dev_id, sample_rate, SESSION_TYPE_RX);
+	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+		__func__, fe_id, session_type, be_id,
+		app_type, acdb_dev_id, sample_rate);
+	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+						      be_id, app_type,
+						      acdb_dev_id, sample_rate);
+	if (ret < 0)
+		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+			__func__, ret);
 
-	return 0;
+	return ret;
 }
 
 static int msm_pcm_playback_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
 					struct snd_ctl_elem_value *ucontrol)
 {
 	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_RX;
+	int be_id = ucontrol->value.integer.value[3];
 	int ret = 0;
 	int app_type;
 	int acdb_dev_id;
 	int sample_rate;
 
-	pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
-	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
-		pr_err("%s Received out of bounds fe_id %llu\n",
-			__func__, fe_id);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, SESSION_TYPE_RX,
-		&app_type, &acdb_dev_id, &sample_rate);
+	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+						      be_id, &app_type,
+						      &acdb_dev_id,
+						      &sample_rate);
 	if (ret < 0) {
 		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
 			__func__, ret);
@@ -1233,8 +1393,8 @@
 	ucontrol->value.integer.value[0] = app_type;
 	ucontrol->value.integer.value[1] = acdb_dev_id;
 	ucontrol->value.integer.value[2] = sample_rate;
-	pr_debug("%s: fedai_id %llu, session_type %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
-		__func__, fe_id, SESSION_TYPE_RX,
+	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fe_id, session_type, be_id,
 		app_type, acdb_dev_id, sample_rate);
 done:
 	return ret;
@@ -1244,48 +1404,45 @@
 					struct snd_ctl_elem_value *ucontrol)
 {
 	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_TX;
+	int be_id = ucontrol->value.integer.value[3];
+	int ret = 0;
 	int app_type;
 	int acdb_dev_id;
 	int sample_rate = 48000;
 
-	pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
-	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
-		pr_err("%s: Received out of bounds fe_id %llu\n",
-			__func__, fe_id);
-		return -EINVAL;
-	}
-
 	app_type = ucontrol->value.integer.value[0];
 	acdb_dev_id = ucontrol->value.integer.value[1];
 	if (ucontrol->value.integer.value[2] != 0)
 		sample_rate = ucontrol->value.integer.value[2];
-	pr_debug("%s: app_type- %d acdb_dev_id- %d sample_rate- %d session_type- %d\n",
-		__func__, app_type, acdb_dev_id, sample_rate, SESSION_TYPE_TX);
-	msm_pcm_routing_reg_stream_app_type_cfg(fe_id, app_type,
-			acdb_dev_id, sample_rate, SESSION_TYPE_TX);
+	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+		__func__, fe_id, session_type, be_id,
+		app_type, acdb_dev_id, sample_rate);
+	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+						      be_id, app_type,
+						      acdb_dev_id, sample_rate);
+	if (ret < 0)
+		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+			__func__, ret);
 
-	return 0;
+	return ret;
 }
 
 static int msm_pcm_capture_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
 					struct snd_ctl_elem_value *ucontrol)
 {
 	u64 fe_id = kcontrol->private_value;
+	int session_type = SESSION_TYPE_TX;
+	int be_id = ucontrol->value.integer.value[3];
 	int ret = 0;
 	int app_type;
 	int acdb_dev_id;
 	int sample_rate;
 
-	pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
-	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
-		pr_err("%s: Received out of bounds fe_id %llu\n",
-			__func__, fe_id);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, SESSION_TYPE_TX,
-		&app_type, &acdb_dev_id, &sample_rate);
+	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+						      be_id, &app_type,
+						      &acdb_dev_id,
+						      &sample_rate);
 	if (ret < 0) {
 		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
 			__func__, ret);
@@ -1295,8 +1452,8 @@
 	ucontrol->value.integer.value[0] = app_type;
 	ucontrol->value.integer.value[1] = acdb_dev_id;
 	ucontrol->value.integer.value[2] = sample_rate;
-	pr_debug("%s: fedai_id %llu, session_type %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
-		__func__, fe_id, SESSION_TYPE_TX,
+	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fe_id, session_type, be_id,
 		app_type, acdb_dev_id, sample_rate);
 done:
 	return ret;
@@ -1388,6 +1545,11 @@
 		pr_err("%s: Could not add pcm Volume Control %d\n",
 			__func__, ret);
 
+	ret = msm_pcm_add_compress_control(rtd);
+	if (ret)
+		pr_err("%s: Could not add pcm Compress Control %d\n",
+			__func__, ret);
+
 	return ret;
 }
 
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.h
index 5290d34..3b3f048 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.h
@@ -109,6 +109,7 @@
 	int cmd_interrupt;
 	bool meta_data_mode;
 	uint32_t volume;
+	bool compress_enable;
 	/* array of frame info */
 	struct msm_audio_in_frame_info in_frame_info[CAPTURE_MAX_NUM_PERIODS];
 };
@@ -123,6 +124,7 @@
 
 struct msm_plat_data {
 	int perf_mode;
+	struct snd_pcm *pcm;
 };
 
 #endif /*_MSM_PCM_H*/
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index de7d790..465634b 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -46,7 +46,15 @@
 #include "q6voice.h"
 #include "sound/q6lsm.h"
 
-static int get_cal_path(int path_type);
+#ifndef CONFIG_DOLBY_DAP
+#undef DOLBY_ADM_COPP_TOPOLOGY_ID
+#define DOLBY_ADM_COPP_TOPOLOGY_ID 0xFFFFFFFE
+#endif
+
+#ifndef CONFIG_DOLBY_DS2
+#undef DS2_ADM_COPP_TOPOLOGY_ID
+#define DS2_ADM_COPP_TOPOLOGY_ID 0xFFFFFFFF
+#endif
 
 static struct mutex routing_lock;
 
@@ -54,14 +62,20 @@
 
 static int fm_switch_enable;
 static int hfp_switch_enable;
+static int int0_mi2s_switch_enable;
+static int int4_mi2s_switch_enable;
 static int pri_mi2s_switch_enable;
 static int sec_mi2s_switch_enable;
 static int tert_mi2s_switch_enable;
 static int quat_mi2s_switch_enable;
 static int fm_pcmrx_switch_enable;
-static int lsm_mux_slim_port;
+static int usb_switch_enable;
+static int lsm_port_index;
 static int slim0_rx_aanc_fb_port;
 static int msm_route_ec_ref_rx;
+static int msm_ec_ref_ch = 4;
+static int msm_ec_ref_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_ec_ref_sampling_rate = 48000;
 static uint32_t voc_session_id = ALL_SESSION_VSID;
 static int msm_route_ext_ec_ref;
 static bool is_custom_stereo_on;
@@ -75,6 +89,8 @@
 	MADSWAUDIO,
 };
 
+#define ADM_LSM_PORT_INDEX 9
+
 #define SLIMBUS_0_TX_TEXT "SLIMBUS_0_TX"
 #define SLIMBUS_1_TX_TEXT "SLIMBUS_1_TX"
 #define SLIMBUS_2_TX_TEXT "SLIMBUS_2_TX"
@@ -82,12 +98,17 @@
 #define SLIMBUS_4_TX_TEXT "SLIMBUS_4_TX"
 #define SLIMBUS_5_TX_TEXT "SLIMBUS_5_TX"
 #define TERT_MI2S_TX_TEXT "TERT_MI2S_TX"
+#define QUAT_MI2S_TX_TEXT "QUAT_MI2S_TX"
+#define ADM_LSM_TX_TEXT "ADM_LSM_TX"
+#define INT3_MI2S_TX_TEXT "INT3_MI2S_TX"
+
 #define LSM_FUNCTION_TEXT "LSM Function"
-static const char * const mad_audio_mux_text[] = {
+static const char * const lsm_port_text[] = {
 	"None",
 	SLIMBUS_0_TX_TEXT, SLIMBUS_1_TX_TEXT, SLIMBUS_2_TX_TEXT,
 	SLIMBUS_3_TX_TEXT, SLIMBUS_4_TX_TEXT, SLIMBUS_5_TX_TEXT,
-	TERT_MI2S_TX_TEXT
+	TERT_MI2S_TX_TEXT, QUAT_MI2S_TX_TEXT, ADM_LSM_TX_TEXT,
+	INT3_MI2S_TX_TEXT
 };
 
 struct msm_pcm_route_bdai_pp_params {
@@ -103,6 +124,18 @@
 	{DISPLAY_PORT_RX, 0, 0, 0},
 };
 
+/*
+ * The be_dai_name_table is passed to HAL so that it can specify the
+ * BE ID for the BE it wants to enable based on the name. Thus there
+ * is a matching table and structure in HAL that need to be updated
+ * if any changes to these are made.
+ */
+struct msm_pcm_route_bdai_name {
+	unsigned int be_id;
+	char be_name[LPASS_BE_NAME_MAX_LENGTH];
+};
+static struct msm_pcm_route_bdai_name be_dai_name_table[MSM_BACKEND_DAI_MAX];
+
 static int msm_routing_send_device_pp_params(int port_id,  int copp_idx);
 
 static int msm_routing_get_bit_width(unsigned int format)
@@ -110,6 +143,9 @@
 	int bit_width;
 
 	switch (format) {
+	case SNDRV_PCM_FORMAT_S32_LE:
+		bit_width = 32;
+		break;
 	case SNDRV_PCM_FORMAT_S24_LE:
 	case SNDRV_PCM_FORMAT_S24_3LE:
 		bit_width = 24;
@@ -255,253 +291,261 @@
 
 #define SLIMBUS_EXTPROC_RX AFE_PORT_INVALID
 struct msm_pcm_routing_bdai_data msm_bedais[MSM_BACKEND_DAI_MAX] = {
-	{ PRIMARY_I2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_RX},
-	{ PRIMARY_I2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_TX},
-	{ SLIMBUS_0_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_RX},
-	{ SLIMBUS_0_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_TX},
-	{ HDMI_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_HDMI},
-	{ INT_BT_SCO_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_RX},
-	{ INT_BT_SCO_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_TX},
-	{ INT_FM_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_RX},
-	{ INT_FM_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_TX},
-	{ RT_PROXY_PORT_001_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_AFE_PCM_RX},
-	{ RT_PROXY_PORT_001_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_AFE_PCM_TX},
-	{ AFE_PORT_ID_PRIMARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ PRIMARY_I2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_RX},
+	{ PRIMARY_I2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_TX},
+	{ SLIMBUS_0_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_RX},
+	{ SLIMBUS_0_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_TX},
+	{ HDMI_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_HDMI},
+	{ INT_BT_SCO_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_RX},
+	{ INT_BT_SCO_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_TX},
+	{ INT_FM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_RX},
+	{ INT_FM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_TX},
+	{ RT_PROXY_PORT_001_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	  LPASS_BE_AFE_PCM_RX},
+	{ RT_PROXY_PORT_001_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	  LPASS_BE_AFE_PCM_TX},
+	{ AFE_PORT_ID_PRIMARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_AUXPCM_RX},
-	{ AFE_PORT_ID_PRIMARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_AUXPCM_TX},
-	{ VOICE_PLAYBACK_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ VOICE_PLAYBACK_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_VOICE_PLAYBACK_TX},
-	{ VOICE2_PLAYBACK_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ VOICE2_PLAYBACK_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_VOICE2_PLAYBACK_TX},
-	{ VOICE_RECORD_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INCALL_RECORD_RX},
-	{ VOICE_RECORD_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INCALL_RECORD_TX},
-	{ MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_MI2S_RX},
-	{ MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_MI2S_TX},
-	{ SECONDARY_I2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SEC_I2S_RX},
-	{ SLIMBUS_1_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_RX},
-	{ SLIMBUS_1_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_TX},
-	{ SLIMBUS_2_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_2_RX},
-	{ SLIMBUS_2_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_2_TX},
-	{ SLIMBUS_3_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_RX},
-	{ SLIMBUS_3_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_TX},
-	{ SLIMBUS_4_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_RX},
-	{ SLIMBUS_4_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_TX},
-	{ SLIMBUS_5_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_RX},
-	{ SLIMBUS_5_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_TX},
-	{ SLIMBUS_6_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_RX},
-	{ SLIMBUS_6_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_TX},
-	{ SLIMBUS_7_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_RX},
-	{ SLIMBUS_7_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_TX},
-	{ SLIMBUS_8_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_RX},
-	{ SLIMBUS_8_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_TX},
-	{ SLIMBUS_EXTPROC_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_RX},
-	{ SLIMBUS_EXTPROC_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_TX},
-	{ SLIMBUS_EXTPROC_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_1_TX},
-	{ AFE_PORT_ID_QUATERNARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ VOICE_RECORD_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	  LPASS_BE_INCALL_RECORD_RX},
+	{ VOICE_RECORD_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	  LPASS_BE_INCALL_RECORD_TX},
+	{ MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_MI2S_RX},
+	{ MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_MI2S_TX},
+	{ SECONDARY_I2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SEC_I2S_RX},
+	{ SLIMBUS_1_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_RX},
+	{ SLIMBUS_1_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_TX},
+	{ SLIMBUS_2_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_2_RX},
+	{ SLIMBUS_2_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_2_TX},
+	{ SLIMBUS_3_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_RX},
+	{ SLIMBUS_3_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_TX},
+	{ SLIMBUS_4_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_RX},
+	{ SLIMBUS_4_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_TX},
+	{ SLIMBUS_5_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_RX},
+	{ SLIMBUS_5_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_TX},
+	{ SLIMBUS_6_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_RX},
+	{ SLIMBUS_6_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_TX},
+	{ SLIMBUS_7_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_RX},
+	{ SLIMBUS_7_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_TX},
+	{ SLIMBUS_8_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_RX},
+	{ SLIMBUS_8_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_TX},
+	{ SLIMBUS_EXTPROC_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_RX},
+	{ SLIMBUS_EXTPROC_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_TX},
+	{ SLIMBUS_EXTPROC_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_1_TX},
+	{ AFE_PORT_ID_QUATERNARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_QUAT_MI2S_RX},
-	{ AFE_PORT_ID_QUATERNARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_QUAT_MI2S_TX},
-	{ AFE_PORT_ID_SECONDARY_MI2S_RX,  0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_MI2S_RX,  0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_SEC_MI2S_RX},
-	{ AFE_PORT_ID_SECONDARY_MI2S_TX,  0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_MI2S_TX,  0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_SEC_MI2S_TX},
-	{ AFE_PORT_ID_PRIMARY_MI2S_RX,    0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_MI2S_RX,    0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_PRI_MI2S_RX},
-	{ AFE_PORT_ID_PRIMARY_MI2S_TX,    0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_MI2S_TX,    0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_PRI_MI2S_TX},
-	{ AFE_PORT_ID_TERTIARY_MI2S_RX,   0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_MI2S_RX,   0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_TERT_MI2S_RX},
-	{ AFE_PORT_ID_TERTIARY_MI2S_TX,   0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_MI2S_TX,   0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_TERT_MI2S_TX},
-	{ AUDIO_PORT_ID_I2S_RX,           0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AUDIO_PORT_ID_I2S_RX,           0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_AUDIO_I2S_RX},
-	{ AFE_PORT_ID_SECONDARY_PCM_RX,	  0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_PCM_RX,	  0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_SEC_AUXPCM_RX},
-	{ AFE_PORT_ID_SECONDARY_PCM_TX,   0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_PCM_TX,   0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_SEC_AUXPCM_TX},
-	{ AFE_PORT_ID_SPDIF_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SPDIF_RX},
-	{ AFE_PORT_ID_SECONDARY_MI2S_RX_SD1, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SPDIF_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SPDIF_RX},
+	{ AFE_PORT_ID_SECONDARY_MI2S_RX_SD1, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_SEC_MI2S_RX_SD1},
-	{ AFE_PORT_ID_QUINARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUINARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_QUIN_MI2S_RX},
-	{ AFE_PORT_ID_QUINARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUINARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_QUIN_MI2S_TX},
-	{ AFE_PORT_ID_SENARY_MI2S_TX,   0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SENARY_MI2S_TX,   0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_SENARY_MI2S_TX},
-	{ AFE_PORT_ID_PRIMARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_PRI_TDM_RX_0},
-	{ AFE_PORT_ID_PRIMARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_PRI_TDM_TX_0},
-	{ AFE_PORT_ID_PRIMARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_PRI_TDM_RX_1},
-	{ AFE_PORT_ID_PRIMARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_PRI_TDM_TX_1},
-	{ AFE_PORT_ID_PRIMARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_PRI_TDM_RX_2},
-	{ AFE_PORT_ID_PRIMARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_PRI_TDM_TX_2},
-	{ AFE_PORT_ID_PRIMARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_PRI_TDM_RX_3},
-	{ AFE_PORT_ID_PRIMARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_PRI_TDM_TX_3},
-	{ AFE_PORT_ID_PRIMARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_PRI_TDM_RX_4},
-	{ AFE_PORT_ID_PRIMARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_PRI_TDM_TX_4},
-	{ AFE_PORT_ID_PRIMARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_PRI_TDM_RX_5},
-	{ AFE_PORT_ID_PRIMARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_PRI_TDM_TX_5},
-	{ AFE_PORT_ID_PRIMARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_PRI_TDM_RX_6},
-	{ AFE_PORT_ID_PRIMARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_PRI_TDM_TX_6},
-	{ AFE_PORT_ID_PRIMARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_PRI_TDM_RX_7},
-	{ AFE_PORT_ID_PRIMARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_PRI_TDM_TX_7},
-	{ AFE_PORT_ID_SECONDARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_SEC_TDM_RX_0},
-	{ AFE_PORT_ID_SECONDARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_SEC_TDM_TX_0},
-	{ AFE_PORT_ID_SECONDARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_SEC_TDM_RX_1},
-	{ AFE_PORT_ID_SECONDARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_SEC_TDM_TX_1},
-	{ AFE_PORT_ID_SECONDARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_SEC_TDM_RX_2},
-	{ AFE_PORT_ID_SECONDARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_SEC_TDM_TX_2},
-	{ AFE_PORT_ID_SECONDARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_SEC_TDM_RX_3},
-	{ AFE_PORT_ID_SECONDARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_SEC_TDM_TX_3},
-	{ AFE_PORT_ID_SECONDARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_SEC_TDM_RX_4},
-	{ AFE_PORT_ID_SECONDARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_SEC_TDM_TX_4},
-	{ AFE_PORT_ID_SECONDARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_SEC_TDM_RX_5},
-	{ AFE_PORT_ID_SECONDARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_SEC_TDM_TX_5},
-	{ AFE_PORT_ID_SECONDARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_SEC_TDM_RX_6},
-	{ AFE_PORT_ID_SECONDARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_SEC_TDM_TX_6},
-	{ AFE_PORT_ID_SECONDARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_SEC_TDM_RX_7},
-	{ AFE_PORT_ID_SECONDARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_SEC_TDM_TX_7},
-	{ AFE_PORT_ID_TERTIARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_TERT_TDM_RX_0},
-	{ AFE_PORT_ID_TERTIARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_TERT_TDM_TX_0},
-	{ AFE_PORT_ID_TERTIARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_TERT_TDM_RX_1},
-	{ AFE_PORT_ID_TERTIARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_TERT_TDM_TX_1},
-	{ AFE_PORT_ID_TERTIARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_TERT_TDM_RX_2},
-	{ AFE_PORT_ID_TERTIARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_TERT_TDM_TX_2},
-	{ AFE_PORT_ID_TERTIARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_TERT_TDM_RX_3},
-	{ AFE_PORT_ID_TERTIARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_TERT_TDM_TX_3},
-	{ AFE_PORT_ID_TERTIARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_TERT_TDM_RX_4},
-	{ AFE_PORT_ID_TERTIARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_TERT_TDM_TX_4},
-	{ AFE_PORT_ID_TERTIARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_TERT_TDM_RX_5},
-	{ AFE_PORT_ID_TERTIARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_TERT_TDM_TX_5},
-	{ AFE_PORT_ID_TERTIARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_TERT_TDM_RX_6},
-	{ AFE_PORT_ID_TERTIARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_TERT_TDM_TX_6},
-	{ AFE_PORT_ID_TERTIARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_TERT_TDM_RX_7},
-	{ AFE_PORT_ID_TERTIARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_TERT_TDM_TX_7},
-	{ AFE_PORT_ID_QUATERNARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_QUAT_TDM_RX_0},
-	{ AFE_PORT_ID_QUATERNARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_QUAT_TDM_TX_0},
-	{ AFE_PORT_ID_QUATERNARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_QUAT_TDM_RX_1},
-	{ AFE_PORT_ID_QUATERNARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_QUAT_TDM_TX_1},
-	{ AFE_PORT_ID_QUATERNARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_QUAT_TDM_RX_2},
-	{ AFE_PORT_ID_QUATERNARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_QUAT_TDM_TX_2},
-	{ AFE_PORT_ID_QUATERNARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_QUAT_TDM_RX_3},
-	{ AFE_PORT_ID_QUATERNARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_QUAT_TDM_TX_3},
-	{ AFE_PORT_ID_QUATERNARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_QUAT_TDM_RX_4},
-	{ AFE_PORT_ID_QUATERNARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_QUAT_TDM_TX_4},
-	{ AFE_PORT_ID_QUATERNARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_QUAT_TDM_RX_5},
-	{ AFE_PORT_ID_QUATERNARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_QUAT_TDM_TX_5},
-	{ AFE_PORT_ID_QUATERNARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_QUAT_TDM_RX_6},
-	{ AFE_PORT_ID_QUATERNARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_QUAT_TDM_TX_6},
-	{ AFE_PORT_ID_QUATERNARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_QUAT_TDM_RX_7},
-	{ AFE_PORT_ID_QUATERNARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_QUAT_TDM_TX_7},
-	{ INT_BT_A2DP_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_A2DP_RX},
-	{ AFE_PORT_ID_USB_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_USB_AUDIO_RX},
-	{ AFE_PORT_ID_USB_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_USB_AUDIO_TX},
-	{ DISPLAY_PORT_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_DISPLAY_PORT},
-	{ AFE_PORT_ID_TERTIARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ INT_BT_A2DP_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_A2DP_RX},
+	{ AFE_PORT_ID_USB_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	  LPASS_BE_USB_AUDIO_RX},
+	{ AFE_PORT_ID_USB_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	  LPASS_BE_USB_AUDIO_TX},
+	{ DISPLAY_PORT_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_DISPLAY_PORT},
+	{ AFE_PORT_ID_TERTIARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_TERT_AUXPCM_RX},
-	{ AFE_PORT_ID_TERTIARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_TERT_AUXPCM_TX},
-	{ AFE_PORT_ID_QUATERNARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_QUAT_AUXPCM_RX},
-	{ AFE_PORT_ID_QUATERNARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_QUAT_AUXPCM_TX},
-	{ AFE_PORT_ID_INT0_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT0_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_INT0_MI2S_RX},
-	{ AFE_PORT_ID_INT0_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT0_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_INT0_MI2S_TX},
-	{ AFE_PORT_ID_INT1_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT1_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_INT1_MI2S_RX},
-	{ AFE_PORT_ID_INT1_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT1_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_INT1_MI2S_TX},
-	{ AFE_PORT_ID_INT2_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT2_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_INT2_MI2S_RX},
-	{ AFE_PORT_ID_INT2_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT2_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_INT2_MI2S_TX},
-	{ AFE_PORT_ID_INT3_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT3_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_INT3_MI2S_RX},
-	{ AFE_PORT_ID_INT3_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT3_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_INT3_MI2S_TX},
-	{ AFE_PORT_ID_INT4_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT4_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_INT4_MI2S_RX},
-	{ AFE_PORT_ID_INT4_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT4_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_INT4_MI2S_TX},
-	{ AFE_PORT_ID_INT5_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT5_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_INT5_MI2S_RX},
-	{ AFE_PORT_ID_INT5_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT5_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_INT5_MI2S_TX},
-	{ AFE_PORT_ID_INT6_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT6_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_INT6_MI2S_RX},
-	{ AFE_PORT_ID_INT6_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT6_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
 	  LPASS_BE_INT6_MI2S_TX},
 };
 
-/* Track ASM playback & capture sessions of DAI */
+/* Track ASM playback & capture sessions of DAI
+ * Track LSM listen sessions
+ */
 static struct msm_pcm_routing_fdai_data
-	fe_dai_map[MSM_FRONTEND_DAI_MM_SIZE][2] = {
+	fe_dai_map[MSM_FRONTEND_DAI_MAX][2] = {
 	/* MULTIMEDIA1 */
 	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
 	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
@@ -559,13 +603,80 @@
 	/* MULTIMEDIA19 */
 	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
 	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* CS_VOICE */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* VOIP */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* AFE_RX */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* AFE_TX */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* VOICE_STUB */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* VOLTE */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* DTMF_RX */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* VOICE2 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* QCHAT */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* VOLTE_STUB */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* LSM1 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* LSM2 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* LSM3 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* LSM4 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* LSM5 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* LSM6 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* LSM7 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* LSM8 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* VOICE2_STUB */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* VOWLAN */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* VOICEMMODE1 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* VOICEMMODE2 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
 };
 
-static unsigned long session_copp_map[MSM_FRONTEND_DAI_MM_SIZE][2]
+static unsigned long session_copp_map[MSM_FRONTEND_DAI_MAX][2]
 				     [MSM_BACKEND_DAI_MAX];
 static struct msm_pcm_routing_app_type_data app_type_cfg[MAX_APP_TYPES];
+static struct msm_pcm_routing_app_type_data lsm_app_type_cfg[MAX_APP_TYPES];
 static struct msm_pcm_stream_app_type_cfg
-			 fe_dai_app_type_cfg[MSM_FRONTEND_DAI_MM_SIZE][2];
+	fe_dai_app_type_cfg[MSM_FRONTEND_DAI_MAX][2][MSM_BACKEND_DAI_MAX];
 
 /* The caller of this should aqcuire routing lock */
 void msm_pcm_routing_get_bedai_info(int be_idx,
@@ -608,44 +719,89 @@
 	return 0;
 }
 
-void msm_pcm_routing_reg_stream_app_type_cfg(int fedai_id, int app_type,
-	int acdb_dev_id, int sample_rate, int session_type)
+static int msm_pcm_routing_get_lsm_app_type_idx(int app_type)
 {
-	pr_debug("%s: fedai_id %d, session_type %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
-		__func__, fedai_id, session_type, app_type,
-		acdb_dev_id, sample_rate);
-	if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
+	int idx;
+
+	pr_debug("%s: app_type: %d\n", __func__, app_type);
+	for (idx = 0; idx < MAX_APP_TYPES; idx++) {
+		if (lsm_app_type_cfg[idx].app_type == app_type)
+			return idx;
+	}
+	pr_debug("%s: App type not available, fallback to default\n", __func__);
+	return 0;
+}
+
+static bool is_mm_lsm_fe_id(int fe_id)
+{
+	bool rc = true;
+
+	if (fe_id > MSM_FRONTEND_DAI_MM_MAX_ID &&
+		((fe_id < MSM_FRONTEND_DAI_LSM1) ||
+		 (fe_id > MSM_FRONTEND_DAI_LSM8))) {
+		rc = false;
+	}
+	return rc;
+}
+
+int msm_pcm_routing_reg_stream_app_type_cfg(int fedai_id, int session_type,
+					    int be_id, int app_type,
+					    int acdb_dev_id, int sample_rate)
+{
+	int ret = 0;
+
+	pr_debug("%s: fedai_id %d, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fedai_id, session_type, be_id,
+		app_type, acdb_dev_id, sample_rate);
+
+	if (!is_mm_lsm_fe_id(fedai_id)) {
 		pr_err("%s: Invalid machine driver ID %d\n",
 			__func__, fedai_id);
-		return;
+		ret = -EINVAL;
+		goto done;
 	}
 	if (session_type != SESSION_TYPE_RX &&
 		session_type != SESSION_TYPE_TX) {
 		pr_err("%s: Invalid session type %d\n",
 			__func__, session_type);
-		return;
+		ret = -EINVAL;
+		goto done;
 	}
-	fe_dai_app_type_cfg[fedai_id][session_type].app_type = app_type;
-	fe_dai_app_type_cfg[fedai_id][session_type].acdb_dev_id = acdb_dev_id;
-	fe_dai_app_type_cfg[fedai_id][session_type].sample_rate = sample_rate;
+	if (be_id < 0 || be_id >= MSM_BACKEND_DAI_MAX) {
+		pr_err("%s: Received out of bounds be_id %d\n",
+			__func__, be_id);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	fe_dai_app_type_cfg[fedai_id][session_type][be_id].app_type = app_type;
+	fe_dai_app_type_cfg[fedai_id][session_type][be_id].acdb_dev_id =
+		acdb_dev_id;
+	fe_dai_app_type_cfg[fedai_id][session_type][be_id].sample_rate =
+		sample_rate;
+
+done:
+	return ret;
 }
 
 /**
  * msm_pcm_routing_get_stream_app_type_cfg
  *
- * Receives fedai_id, session_type and populates app_type, acdb_dev_id, &
- * sample rate. Returns 0 on success. On failure returns
+ * Receives fedai_id, session_type, be_id, and populates app_type,
+ * acdb_dev_id, & sample rate. Returns 0 on success. On failure returns
  * -EINVAL and does not alter passed values.
  *
  * fedai_id - Passed value, front end ID for which app type config is wanted
  * session_type - Passed value, session type for which app type config
  *                is wanted
+ * be_id - Passed value, back end device id for which app type config is wanted
  * app_type - Returned value, app type used by app type config
  * acdb_dev_id - Returned value, ACDB device ID used by app type config
  * sample_rate - Returned value, sample rate used by app type config
  */
 int msm_pcm_routing_get_stream_app_type_cfg(int fedai_id, int session_type,
-	int *app_type, int *acdb_dev_id, int *sample_rate)
+					    int be_id, int *app_type,
+					    int *acdb_dev_id, int *sample_rate)
 {
 	int ret = 0;
 
@@ -661,24 +817,31 @@
 		pr_err("%s: NULL pointer sent for sample rate\n", __func__);
 		ret = -EINVAL;
 		goto done;
-	} else if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
+	} else if (!is_mm_lsm_fe_id(fedai_id)) {
 		pr_err("%s: Invalid FE ID %d\n",
 			__func__, fedai_id);
 		ret = -EINVAL;
 		goto done;
 	} else if (session_type != SESSION_TYPE_RX &&
-		session_type != SESSION_TYPE_TX) {
+		   session_type != SESSION_TYPE_TX) {
 		pr_err("%s: Invalid session type %d\n",
 			__func__, session_type);
 		ret = -EINVAL;
 		goto done;
+	} else if (be_id < 0 || be_id >= MSM_BACKEND_DAI_MAX) {
+		pr_err("%s: Received out of bounds be_id %d\n",
+			__func__, be_id);
+		return -EINVAL;
 	}
-	*app_type = fe_dai_app_type_cfg[fedai_id][session_type].app_type;
-	*acdb_dev_id = fe_dai_app_type_cfg[fedai_id][session_type].acdb_dev_id;
-	*sample_rate = fe_dai_app_type_cfg[fedai_id][session_type].sample_rate;
 
-	pr_debug("%s: fedai_id %d, session_type %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
-		__func__, fedai_id, session_type,
+	*app_type = fe_dai_app_type_cfg[fedai_id][session_type][be_id].app_type;
+	*acdb_dev_id =
+		fe_dai_app_type_cfg[fedai_id][session_type][be_id].acdb_dev_id;
+	*sample_rate =
+		fe_dai_app_type_cfg[fedai_id][session_type][be_id].sample_rate;
+
+	pr_debug("%s: fedai_id %d, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+		__func__, fedai_id, session_type, be_id,
 		*app_type, *acdb_dev_id, *sample_rate);
 done:
 	return ret;
@@ -709,8 +872,7 @@
 
 static struct cal_block_data *msm_routing_find_topology(int path,
 							int app_type,
-							int acdb_id,
-							int sample_rate)
+							int acdb_id)
 {
 	struct list_head *ptr, *next;
 	struct cal_block_data *cal_block = NULL;
@@ -728,37 +890,37 @@
 			cal_block->cal_info;
 		if ((cal_info->path == path)  &&
 			(cal_info->app_type == app_type) &&
-			(cal_info->acdb_id == acdb_id) &&
-			(cal_info->sample_rate == sample_rate)) {
+			(cal_info->acdb_id == acdb_id)) {
 			return cal_block;
 		}
 	}
-	pr_debug("%s: Can't find topology for path %d, app %d, acdb_id %d sample_rate %d defaulting to search by path\n",
-		__func__, path, app_type, acdb_id, sample_rate);
+	pr_debug("%s: Can't find topology for path %d, app %d, acdb_id %d defaulting to search by path\n",
+		__func__, path, app_type, acdb_id);
 	return msm_routing_find_topology_by_path(path);
 }
 
-static int msm_routing_get_adm_topology(int path, int fedai_id,
-					int session_type)
+static int msm_routing_get_adm_topology(int fedai_id, int session_type,
+					int be_id)
 {
 	int topology = NULL_COPP_TOPOLOGY;
 	struct cal_block_data *cal_block = NULL;
-	int app_type = 0, acdb_dev_id = 0, sample_rate = 0;
+	int app_type = 0, acdb_dev_id = 0;
 
-	pr_debug("%s\n", __func__);
 
-	path = get_cal_path(path);
+	pr_debug("%s: fedai_id %d, session_type %d, be_id %d\n",
+	       __func__, fedai_id, session_type, be_id);
+
 	if (cal_data == NULL)
 		goto done;
 
 	mutex_lock(&cal_data->lock);
 
-	app_type = fe_dai_app_type_cfg[fedai_id][session_type].app_type;
-	acdb_dev_id = fe_dai_app_type_cfg[fedai_id][session_type].acdb_dev_id;
-	sample_rate = fe_dai_app_type_cfg[fedai_id][session_type].sample_rate;
+	app_type = fe_dai_app_type_cfg[fedai_id][session_type][be_id].app_type;
+	acdb_dev_id =
+		fe_dai_app_type_cfg[fedai_id][session_type][be_id].acdb_dev_id;
 
-	cal_block = msm_routing_find_topology(path, app_type,
-					      acdb_dev_id, sample_rate);
+	cal_block = msm_routing_find_topology(session_type, app_type,
+					      acdb_dev_id);
 	if (cal_block == NULL)
 		goto unlock;
 
@@ -782,20 +944,21 @@
 }
 
 static void msm_pcm_routing_build_matrix(int fedai_id, int sess_type,
-					 int path_type, int perf_mode)
+					 int path_type, int perf_mode,
+					 uint32_t passthr_mode)
 {
 	int i, port_type, j, num_copps = 0;
 	struct route_payload payload;
 
-	port_type = ((path_type == ADM_PATH_PLAYBACK  ||
-		     path_type == ADM_PATH_COMPRESSED_RX) ?
+	port_type = ((path_type == ADM_PATH_PLAYBACK ||
+		      path_type == ADM_PATH_COMPRESSED_RX) ?
 		MSM_AFE_PORT_TYPE_RX : MSM_AFE_PORT_TYPE_TX);
 
 	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
 		if (!is_be_dai_extproc(i) &&
 		   (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
 		   (msm_bedais[i].active) &&
-		   (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
+		   (test_bit(fedai_id, &msm_bedais[i].fe_sessions[0]))) {
 			for (j = 0; j < MAX_COPPS_PER_PORT; j++) {
 				unsigned long copp =
 				      session_copp_map[fedai_id][sess_type][i];
@@ -803,6 +966,18 @@
 					payload.port_id[num_copps] =
 							msm_bedais[i].port_id;
 					payload.copp_idx[num_copps] = j;
+					payload.app_type[num_copps] =
+						fe_dai_app_type_cfg
+							[fedai_id][sess_type][i]
+								.app_type;
+					payload.acdb_dev_id[num_copps] =
+						fe_dai_app_type_cfg
+							[fedai_id][sess_type][i]
+								.acdb_dev_id;
+					payload.sample_rate[num_copps] =
+						fe_dai_app_type_cfg
+							[fedai_id][sess_type][i]
+								.sample_rate;
 					num_copps++;
 				}
 			}
@@ -812,13 +987,7 @@
 	if (num_copps) {
 		payload.num_copps = num_copps;
 		payload.session_id = fe_dai_map[fedai_id][sess_type].strm_id;
-		payload.app_type =
-			fe_dai_app_type_cfg[fedai_id][sess_type].app_type;
-		payload.acdb_dev_id =
-			fe_dai_app_type_cfg[fedai_id][sess_type].acdb_dev_id;
-		payload.sample_rate =
-			fe_dai_app_type_cfg[fedai_id][sess_type].sample_rate;
-		adm_matrix_map(path_type, payload, perf_mode);
+		adm_matrix_map(path_type, payload, perf_mode, passthr_mode);
 		msm_pcm_routng_cfg_matrix_map_pp(payload, path_type, perf_mode);
 	}
 }
@@ -852,7 +1021,7 @@
 		if (!is_be_dai_extproc(i) &&
 		    (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
 		    (msm_bedais[i].active) &&
-		    (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
+		    (test_bit(fedai_id, &msm_bedais[i].fe_sessions[0]))) {
 			mode = afe_get_port_type(msm_bedais[i].port_id);
 			adm_connect_afe_port(mode, dspst_id,
 					     msm_bedais[i].port_id);
@@ -862,28 +1031,52 @@
 	mutex_unlock(&routing_lock);
 }
 
+static bool route_check_fe_id_adm_support(int fe_id)
+{
+	bool rc = true;
+
+	if ((fe_id >= MSM_FRONTEND_DAI_LSM1) &&
+		 (fe_id <= MSM_FRONTEND_DAI_LSM8)) {
+		/* fe id is listen while port is set to afe */
+		if (lsm_port_index != ADM_LSM_PORT_INDEX) {
+			pr_debug("%s: fe_id %d, lsm mux slim port %d\n",
+				__func__, fe_id, lsm_port_index);
+			rc = false;
+		}
+	}
+
+	return rc;
+}
+
 int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode,
 					  int dspst_id, int stream_type,
-					  uint32_t compr_passthr_mode)
+					  uint32_t passthr_mode)
 {
-	int i, j, session_type, path_type, port_type, topology, num_copps = 0;
+	int i, j, session_type, path_type, port_type, topology;
+	int num_copps = 0;
 	struct route_payload payload;
 	u32 channels, sample_rate;
 	u16 bit_width = 16;
+	bool is_lsm;
 
 	pr_debug("%s:fe_id[%d] perf_mode[%d] id[%d] stream_type[%d] passt[%d]",
 		 __func__, fe_id, perf_mode, dspst_id,
-		 stream_type, compr_passthr_mode);
-
-	if (fe_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
+		 stream_type, passthr_mode);
+	if (!is_mm_lsm_fe_id(fe_id)) {
 		/* bad ID assigned in machine driver */
 		pr_err("%s: bad MM ID %d\n", __func__, fe_id);
 		return -EINVAL;
 	}
 
+	if (!route_check_fe_id_adm_support(fe_id)) {
+		/* ignore adm open if not supported for fe_id */
+		pr_debug("%s: No ADM support for fe id %d\n", __func__, fe_id);
+		return 0;
+	}
+
 	if (stream_type == SNDRV_PCM_STREAM_PLAYBACK) {
 		session_type = SESSION_TYPE_RX;
-		if (compr_passthr_mode != LEGACY_PCM)
+		if (passthr_mode != LEGACY_PCM)
 			path_type = ADM_PATH_COMPRESSED_RX;
 		else
 			path_type = ADM_PATH_PLAYBACK;
@@ -897,6 +1090,8 @@
 		return -EINVAL;
 	}
 
+	is_lsm = (fe_id >= MSM_FRONTEND_DAI_LSM1) &&
+			 (fe_id <= MSM_FRONTEND_DAI_LSM8);
 	mutex_lock(&routing_lock);
 
 	payload.num_copps = 0; /* only RX needs to use payload */
@@ -904,14 +1099,14 @@
 	/* re-enable EQ if active */
 	msm_qti_pp_send_eq_values(fe_id);
 	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
-		if (test_bit(fe_id, &msm_bedais[i].fe_sessions))
-			msm_bedais[i].compr_passthr_mode = compr_passthr_mode;
+		if (test_bit(fe_id, &msm_bedais[i].fe_sessions[0]))
+			msm_bedais[i].passthr_mode = passthr_mode;
 
 		if (!is_be_dai_extproc(i) &&
 			(afe_get_port_type(msm_bedais[i].port_id) ==
 			port_type) &&
 			(msm_bedais[i].active) &&
-			(test_bit(fe_id, &msm_bedais[i].fe_sessions))) {
+			(test_bit(fe_id, &msm_bedais[i].fe_sessions[0]))) {
 			int app_type, app_type_idx, copp_idx, acdb_dev_id;
 
 			/*
@@ -926,25 +1121,36 @@
 			bit_width = msm_routing_get_bit_width(
 						msm_bedais[i].format);
 			app_type =
-			fe_dai_app_type_cfg[fe_id][session_type].app_type;
-			if (app_type) {
+			fe_dai_app_type_cfg[fe_id][session_type][i].app_type;
+			if (app_type && is_lsm) {
+				app_type_idx =
+				msm_pcm_routing_get_lsm_app_type_idx(app_type);
+				sample_rate =
+				fe_dai_app_type_cfg[fe_id][session_type][i]
+					.sample_rate;
+				bit_width =
+				lsm_app_type_cfg[app_type_idx].bit_width;
+			} else if (app_type) {
 				app_type_idx =
 					msm_pcm_routing_get_app_type_idx(
 						app_type);
 				sample_rate =
-			fe_dai_app_type_cfg[fe_id][session_type].sample_rate;
+			fe_dai_app_type_cfg[fe_id][session_type][i].sample_rate;
 				bit_width =
 					app_type_cfg[app_type_idx].bit_width;
 			} else {
 				sample_rate = msm_bedais[i].sample_rate;
 			}
 			acdb_dev_id =
-			fe_dai_app_type_cfg[fe_id][session_type].acdb_dev_id;
-			topology = msm_routing_get_adm_topology(path_type,
-						fe_id, session_type);
-			if (compr_passthr_mode == COMPRESSED_PASSTHROUGH_DSD)
-				topology = COMPRESS_PASSTHROUGH_NONE_TOPOLOGY;
-			pr_err("%s: Before adm open topology %d\n", __func__,
+			fe_dai_app_type_cfg[fe_id][session_type][i].acdb_dev_id;
+			topology = msm_routing_get_adm_topology(fe_id,
+								session_type,
+								i);
+			if ((passthr_mode == COMPRESSED_PASSTHROUGH_DSD)
+			     || (passthr_mode ==
+			     COMPRESSED_PASSTHROUGH_GEN))
+				topology = COMPRESSED_PASSTHROUGH_NONE_TOPOLOGY;
+			pr_debug("%s: Before adm open topology %d\n", __func__,
 				topology);
 
 			copp_idx =
@@ -978,10 +1184,24 @@
 					payload.port_id[num_copps] =
 					msm_bedais[i].port_id;
 					payload.copp_idx[num_copps] = j;
+					payload.app_type[num_copps] =
+						fe_dai_app_type_cfg
+							[fe_id][session_type][i]
+								.app_type;
+					payload.acdb_dev_id[num_copps] =
+						fe_dai_app_type_cfg
+							[fe_id][session_type][i]
+								.acdb_dev_id;
+					payload.sample_rate[num_copps] =
+						fe_dai_app_type_cfg
+							[fe_id][session_type][i]
+								.sample_rate;
 					num_copps++;
 				}
 			}
-			if (compr_passthr_mode != COMPRESSED_PASSTHROUGH_DSD) {
+			if (passthr_mode != COMPRESSED_PASSTHROUGH_DSD
+			    && passthr_mode !=
+			    COMPRESSED_PASSTHROUGH_GEN) {
 				msm_routing_send_device_pp_params(
 				msm_bedais[i].port_id,
 				copp_idx);
@@ -991,11 +1211,7 @@
 	if (num_copps) {
 		payload.num_copps = num_copps;
 		payload.session_id = fe_dai_map[fe_id][session_type].strm_id;
-		payload.app_type =
-			fe_dai_app_type_cfg[fe_id][session_type].app_type;
-		payload.acdb_dev_id =
-			fe_dai_app_type_cfg[fe_id][session_type].acdb_dev_id;
-		adm_matrix_map(path_type, payload, perf_mode);
+		adm_matrix_map(path_type, payload, perf_mode, passthr_mode);
 		msm_pcm_routng_cfg_matrix_map_pp(payload, path_type, perf_mode);
 	}
 	mutex_unlock(&routing_lock);
@@ -1046,6 +1262,7 @@
 	struct route_payload payload;
 	u32 channels, sample_rate;
 	uint16_t bits_per_sample = 16;
+	uint32_t passthr_mode = LEGACY_PCM;
 
 	if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
 		/* bad ID assigned in machine driver */
@@ -1075,7 +1292,7 @@
 		if (!is_be_dai_extproc(i) &&
 		   (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
 		   (msm_bedais[i].active) &&
-		   (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
+		   (test_bit(fedai_id, &msm_bedais[i].fe_sessions[0]))) {
 			int app_type, app_type_idx, copp_idx, acdb_dev_id;
 			/*
 			 * check if ADM needs to be configured with different
@@ -1085,29 +1302,31 @@
 				channels = msm_bedais[i].channel;
 			else
 				channels = msm_bedais[i].adm_override_ch;
-			msm_bedais[i].compr_passthr_mode =
+			msm_bedais[i].passthr_mode =
 				LEGACY_PCM;
 
 			bits_per_sample = msm_routing_get_bit_width(
 						msm_bedais[i].format);
 
 			app_type =
-			fe_dai_app_type_cfg[fedai_id][session_type].app_type;
+			fe_dai_app_type_cfg[fedai_id][session_type][i].app_type;
 			if (app_type) {
 				app_type_idx =
 				msm_pcm_routing_get_app_type_idx(app_type);
 				sample_rate =
-				fe_dai_app_type_cfg[fedai_id][session_type].
-					sample_rate;
+				fe_dai_app_type_cfg[fedai_id][session_type][i]
+					.sample_rate;
 				bits_per_sample =
 					app_type_cfg[app_type_idx].bit_width;
 			} else
 				sample_rate = msm_bedais[i].sample_rate;
 
 			acdb_dev_id =
-			fe_dai_app_type_cfg[fedai_id][session_type].acdb_dev_id;
-			topology = msm_routing_get_adm_topology(path_type,
-						fedai_id, session_type);
+			fe_dai_app_type_cfg[fedai_id][session_type][i]
+				.acdb_dev_id;
+			topology = msm_routing_get_adm_topology(fedai_id,
+								session_type,
+								i);
 			copp_idx = adm_open(msm_bedais[i].port_id, path_type,
 					    sample_rate, channels, topology,
 					    perf_mode, bits_per_sample,
@@ -1138,11 +1357,23 @@
 					payload.port_id[num_copps] =
 							msm_bedais[i].port_id;
 					payload.copp_idx[num_copps] = j;
+					payload.app_type[num_copps] =
+						fe_dai_app_type_cfg
+							[fedai_id][session_type]
+							[i].app_type;
+					payload.acdb_dev_id[num_copps] =
+						fe_dai_app_type_cfg
+							[fedai_id][session_type]
+							[i].acdb_dev_id;
+					payload.sample_rate[num_copps] =
+						fe_dai_app_type_cfg
+							[fedai_id][session_type]
+							[i].sample_rate;
 					num_copps++;
 				}
 			}
 			if ((perf_mode == LEGACY_PCM_MODE) &&
-				(msm_bedais[i].compr_passthr_mode ==
+				(msm_bedais[i].passthr_mode ==
 				LEGACY_PCM))
 				msm_pcm_routing_cfg_pp(msm_bedais[i].port_id,
 						       copp_idx, topology,
@@ -1152,13 +1383,7 @@
 	if (num_copps) {
 		payload.num_copps = num_copps;
 		payload.session_id = fe_dai_map[fedai_id][session_type].strm_id;
-		payload.app_type =
-			fe_dai_app_type_cfg[fedai_id][session_type].app_type;
-		payload.acdb_dev_id =
-			fe_dai_app_type_cfg[fedai_id][session_type].acdb_dev_id;
-		payload.sample_rate =
-			fe_dai_app_type_cfg[fedai_id][session_type].sample_rate;
-		adm_matrix_map(path_type, payload, perf_mode);
+		adm_matrix_map(path_type, payload, perf_mode, passthr_mode);
 		msm_pcm_routng_cfg_matrix_map_pp(payload, path_type, perf_mode);
 	}
 	mutex_unlock(&routing_lock);
@@ -1187,7 +1412,7 @@
 	int i, port_type, session_type, path_type, topology;
 	struct msm_pcm_routing_fdai_data *fdai;
 
-	if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
+	if (!is_mm_lsm_fe_id(fedai_id)) {
 		/* bad ID assigned in machine driver */
 		pr_err("%s: bad MM ID\n", __func__);
 		return;
@@ -1208,7 +1433,7 @@
 		if (!is_be_dai_extproc(i) &&
 		   (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
 		   (msm_bedais[i].active) &&
-		   (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
+		   (test_bit(fedai_id, &msm_bedais[i].fe_sessions[0]))) {
 			int idx;
 			unsigned long copp =
 				session_copp_map[fedai_id][session_type][i];
@@ -1233,7 +1458,7 @@
 			if ((topology == DOLBY_ADM_COPP_TOPOLOGY_ID ||
 				topology == DS2_ADM_COPP_TOPOLOGY_ID) &&
 			    (fdai->perf_mode == LEGACY_PCM_MODE) &&
-			    (msm_bedais[i].compr_passthr_mode ==
+			    (msm_bedais[i].passthr_mode ==
 					LEGACY_PCM))
 				msm_pcm_routing_deinit_pp(msm_bedais[i].port_id,
 							  topology);
@@ -1250,13 +1475,13 @@
 {
 	bool rc = false;
 
-	if (fe_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
+	if (!is_mm_lsm_fe_id(fe_id)) {
 		/* recheck FE ID in the mixer control defined in this file */
 		pr_err("%s: bad MM ID\n", __func__);
 		return rc;
 	}
 
-	if (test_bit(fe_id, &msm_bedais[be_id].fe_sessions))
+	if (test_bit(fe_id, &msm_bedais[be_id].fe_sessions[0]))
 		rc = true;
 
 	return rc;
@@ -1268,19 +1493,27 @@
 	u32 channels, sample_rate;
 	uint16_t bits_per_sample = 16;
 	struct msm_pcm_routing_fdai_data *fdai;
+	uint32_t passthr_mode = msm_bedais[reg].passthr_mode;
+	bool is_lsm;
 
 	pr_debug("%s: reg %x val %x set %x\n", __func__, reg, val, set);
 
-	if (val > MSM_FRONTEND_DAI_MM_MAX_ID) {
+	if (!is_mm_lsm_fe_id(val)) {
 		/* recheck FE ID in the mixer control defined in this file */
 		pr_err("%s: bad MM ID\n", __func__);
 		return;
 	}
 
+	if (!route_check_fe_id_adm_support(val)) {
+		/* ignore adm open if not supported for fe_id */
+		pr_debug("%s: No ADM support for fe id %d\n", __func__, val);
+		return;
+	}
+
 	if (afe_get_port_type(msm_bedais[reg].port_id) ==
 		MSM_AFE_PORT_TYPE_RX) {
 		session_type = SESSION_TYPE_RX;
-		if (msm_bedais[reg].compr_passthr_mode != LEGACY_PCM)
+		if (passthr_mode != LEGACY_PCM)
 			path_type = ADM_PATH_COMPRESSED_RX;
 		else
 			path_type = ADM_PATH_PLAYBACK;
@@ -1288,15 +1521,17 @@
 		session_type = SESSION_TYPE_TX;
 		path_type = ADM_PATH_LIVE_REC;
 	}
+	is_lsm = (val >= MSM_FRONTEND_DAI_LSM1) &&
+			 (val <= MSM_FRONTEND_DAI_LSM8);
 
 	mutex_lock(&routing_lock);
 	if (set) {
-		if (!test_bit(val, &msm_bedais[reg].fe_sessions) &&
+		if (!test_bit(val, &msm_bedais[reg].fe_sessions[0]) &&
 			((msm_bedais[reg].port_id == VOICE_PLAYBACK_TX) ||
 			(msm_bedais[reg].port_id == VOICE2_PLAYBACK_TX)))
 			voc_start_playback(set, msm_bedais[reg].port_id);
 
-		set_bit(val, &msm_bedais[reg].fe_sessions);
+		set_bit(val, &msm_bedais[reg].fe_sessions[0]);
 		fdai = &fe_dai_map[val][session_type];
 		if (msm_bedais[reg].active && fdai->strm_id !=
 			INVALID_SESSION) {
@@ -1326,22 +1561,31 @@
 						msm_bedais[reg].format);
 
 			app_type =
-				fe_dai_app_type_cfg[val][session_type].app_type;
-			if (app_type) {
+			fe_dai_app_type_cfg[val][session_type][reg].app_type;
+			if (app_type && is_lsm) {
+				app_type_idx =
+				msm_pcm_routing_get_lsm_app_type_idx(app_type);
+				sample_rate =
+				fe_dai_app_type_cfg[val][session_type][reg]
+					.sample_rate;
+				bits_per_sample =
+				lsm_app_type_cfg[app_type_idx].bit_width;
+			} else if (app_type) {
 				app_type_idx =
 				msm_pcm_routing_get_app_type_idx(app_type);
 				sample_rate =
-				fe_dai_app_type_cfg[val][session_type].
-					sample_rate;
+				fe_dai_app_type_cfg[val][session_type][reg]
+					.sample_rate;
 				bits_per_sample =
 					app_type_cfg[app_type_idx].bit_width;
 			} else
 				sample_rate = msm_bedais[reg].sample_rate;
 
-			topology = msm_routing_get_adm_topology(path_type, val,
-						session_type);
+			topology = msm_routing_get_adm_topology(val,
+								session_type,
+								reg);
 			acdb_dev_id =
-			fe_dai_app_type_cfg[val][session_type].acdb_dev_id;
+			fe_dai_app_type_cfg[val][session_type][reg].acdb_dev_id;
 			copp_idx = adm_open(msm_bedais[reg].port_id, path_type,
 					    sample_rate, channels, topology,
 					    fdai->perf_mode, bits_per_sample,
@@ -1372,20 +1616,20 @@
 
 			msm_pcm_routing_build_matrix(val, session_type,
 						     path_type,
-						     fdai->perf_mode);
+						     fdai->perf_mode,
+						     passthr_mode);
 			if ((fdai->perf_mode == LEGACY_PCM_MODE) &&
-				(msm_bedais[reg].compr_passthr_mode ==
-					LEGACY_PCM))
+				(passthr_mode == LEGACY_PCM))
 				msm_pcm_routing_cfg_pp(msm_bedais[reg].port_id,
 						       copp_idx, topology,
 						       channels);
 		}
 	} else {
-		if (test_bit(val, &msm_bedais[reg].fe_sessions) &&
+		if (test_bit(val, &msm_bedais[reg].fe_sessions[0]) &&
 			((msm_bedais[reg].port_id == VOICE_PLAYBACK_TX) ||
 			(msm_bedais[reg].port_id == VOICE2_PLAYBACK_TX)))
 			voc_start_playback(set, msm_bedais[reg].port_id);
-		clear_bit(val, &msm_bedais[reg].fe_sessions);
+		clear_bit(val, &msm_bedais[reg].fe_sessions[0]);
 		fdai = &fe_dai_map[val][session_type];
 		if (msm_bedais[reg].active && fdai->strm_id !=
 			INVALID_SESSION) {
@@ -1410,14 +1654,14 @@
 			if ((topology == DOLBY_ADM_COPP_TOPOLOGY_ID ||
 				topology == DS2_ADM_COPP_TOPOLOGY_ID) &&
 			    (fdai->perf_mode == LEGACY_PCM_MODE) &&
-			    (msm_bedais[reg].compr_passthr_mode ==
-				LEGACY_PCM))
+			    (passthr_mode == LEGACY_PCM))
 				msm_pcm_routing_deinit_pp(
 						msm_bedais[reg].port_id,
 						topology);
 			msm_pcm_routing_build_matrix(val, session_type,
 						     path_type,
-						     fdai->perf_mode);
+						     fdai->perf_mode,
+						     passthr_mode);
 		}
 	}
 	if ((msm_bedais[reg].port_id == VOICE_RECORD_RX)
@@ -1433,7 +1677,7 @@
 	struct soc_mixer_control *mc =
 	(struct soc_mixer_control *)kcontrol->private_value;
 
-	if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions))
+	if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions[0]))
 		ucontrol->value.integer.value[0] = 1;
 	else
 		ucontrol->value.integer.value[0] = 0;
@@ -1469,10 +1713,56 @@
 	return 1;
 }
 
+static int msm_routing_get_listen_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_mixer_control *mc =
+	(struct soc_mixer_control *)kcontrol->private_value;
+
+	if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions[0]))
+		ucontrol->value.integer.value[0] = 1;
+	else
+		ucontrol->value.integer.value[0] = 0;
+
+	pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
+		ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static int msm_routing_put_listen_mixer(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	struct snd_soc_dapm_update *update = NULL;
+
+	pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
+		ucontrol->value.integer.value[0]);
+
+	if (ucontrol->value.integer.value[0]) {
+		if (msm_pcm_routing_route_is_set(mc->reg, mc->shift) == false)
+			msm_pcm_routing_process_audio(mc->reg, mc->shift, 1);
+		snd_soc_dapm_mixer_update_power(widget->dapm,
+						kcontrol, 1, update);
+	} else if (!ucontrol->value.integer.value[0]) {
+		if (msm_pcm_routing_route_is_set(mc->reg, mc->shift) == true)
+			msm_pcm_routing_process_audio(mc->reg, mc->shift, 0);
+		snd_soc_dapm_mixer_update_power(widget->dapm,
+						kcontrol, 0, update);
+	}
+
+	return 1;
+}
+
 static void msm_pcm_routing_process_voice(u16 reg, u16 val, int set)
 {
 	u32 session_id = 0;
 	u16 path_type;
+	struct media_format_info voc_be_media_format;
 
 	pr_debug("%s: reg %x val %x set %x\n", __func__, reg, val, set);
 
@@ -1484,9 +1774,9 @@
 	mutex_lock(&routing_lock);
 
 	if (set)
-		set_bit(val, &msm_bedais[reg].fe_sessions);
+		set_bit(val, &msm_bedais[reg].fe_sessions[0]);
 	else
-		clear_bit(val, &msm_bedais[reg].fe_sessions);
+		clear_bit(val, &msm_bedais[reg].fe_sessions[0]);
 
 	if (val == MSM_FRONTEND_DAI_DTMF_RX &&
 	    afe_get_port_type(msm_bedais[reg].port_id) ==
@@ -1505,8 +1795,22 @@
 	if (set) {
 		if (msm_bedais[reg].active) {
 			voc_set_route_flag(session_id, path_type, 1);
+
+			memset(&voc_be_media_format, 0,
+			       sizeof(struct media_format_info));
+
+			voc_be_media_format.port_id = msm_bedais[reg].port_id;
+			voc_be_media_format.num_channels =
+						msm_bedais[reg].channel;
+			voc_be_media_format.sample_rate =
+						msm_bedais[reg].sample_rate;
+			voc_be_media_format.bits_per_sample =
+						msm_bedais[reg].format;
+			/* Defaulting this to 1 for voice call usecases */
+			voc_be_media_format.channel_mapping[0] = 1;
+
 			voc_set_device_config(session_id, path_type,
-			   msm_bedais[reg].channel, msm_bedais[reg].port_id);
+					      &voc_be_media_format);
 
 			if (voc_get_route_flag(session_id, TX_PATH) &&
 				voc_get_route_flag(session_id, RX_PATH))
@@ -1531,7 +1835,7 @@
 
 	mutex_lock(&routing_lock);
 
-	if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions))
+	if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions[0]))
 		ucontrol->value.integer.value[0] = 1;
 	else
 		ucontrol->value.integer.value[0] = 0;
@@ -1575,7 +1879,7 @@
 
 	mutex_lock(&routing_lock);
 
-	if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions))
+	if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions[0]))
 		ucontrol->value.integer.value[0] = 1;
 	else
 		ucontrol->value.integer.value[0] = 0;
@@ -1600,14 +1904,14 @@
 
 	if (ucontrol->value.integer.value[0]) {
 		mutex_lock(&routing_lock);
-		set_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions);
+		set_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions[0]);
 		mutex_unlock(&routing_lock);
 
 		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 1,
 						update);
 	} else {
 		mutex_lock(&routing_lock);
-		clear_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions);
+		clear_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions[0]);
 		mutex_unlock(&routing_lock);
 
 		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 0,
@@ -1620,6 +1924,68 @@
 	return 1;
 }
 
+/*
+ * Return the mapping between port ID and backend ID to enable the AFE callback
+ * to determine the acdb_dev_id from the port id
+ */
+int msm_pcm_get_be_id_from_port_id(int port_id)
+{
+	int i;
+	int be_id = -EINVAL;
+
+	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+		if (msm_bedais[i].port_id == port_id) {
+			be_id = i;
+			break;
+		}
+	}
+
+	return be_id;
+}
+
+/*
+ * Return the registered dev_acdb_id given a port ID to enable identifying the
+ * correct AFE calibration information by comparing the header information.
+ */
+static int msm_pcm_get_dev_acdb_id_by_port_id(int port_id)
+{
+	int acdb_id = -EINVAL;
+	int i = 0;
+	int session;
+	int port_type = afe_get_port_type(port_id);
+	int be_id = msm_pcm_get_be_id_from_port_id(port_id);
+
+	pr_debug("%s:port_id %d be_id %d, port_type 0x%x\n",
+		  __func__, port_id, be_id, port_type);
+
+	if (port_type == MSM_AFE_PORT_TYPE_TX) {
+		session = SESSION_TYPE_TX;
+	} else if (port_type == MSM_AFE_PORT_TYPE_RX) {
+		session = SESSION_TYPE_RX;
+	} else {
+		pr_err("%s: Invalid port type %d\n", __func__, port_type);
+		acdb_id = -EINVAL;
+		goto exit;
+	}
+
+	if (be_id < 0) {
+		pr_err("%s: Error getting backend id %d\n", __func__, be_id);
+		goto exit;
+	}
+
+	mutex_lock(&routing_lock);
+	i = find_first_bit(&msm_bedais[be_id].fe_sessions[0],
+			   MSM_FRONTEND_DAI_MAX);
+	if (i < MSM_FRONTEND_DAI_MAX)
+		acdb_id = fe_dai_app_type_cfg[i][session][be_id].acdb_dev_id;
+
+	pr_debug("%s: FE[%d] session[%d] BE[%d] acdb_id(%d)\n",
+		 __func__, i, session, be_id, acdb_id);
+	mutex_unlock(&routing_lock);
+exit:
+	return acdb_id;
+}
+
 static int msm_routing_get_switch_mixer(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_value *ucontrol)
 {
@@ -1678,6 +2044,93 @@
 	return 1;
 }
 
+static int msm_routing_get_int0_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = int0_mi2s_switch_enable;
+	pr_debug("%s: INT0 MI2S Switch enable %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_routing_put_int0_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_dapm_update *update = NULL;
+
+	pr_debug("%s: INT0 MI2S Switch enable %ld\n", __func__,
+			ucontrol->value.integer.value[0]);
+	if (ucontrol->value.integer.value[0])
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 1,
+						update);
+	else
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 0,
+						update);
+	int0_mi2s_switch_enable = ucontrol->value.integer.value[0];
+	return 1;
+}
+
+static int msm_routing_get_int4_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = int4_mi2s_switch_enable;
+	pr_debug("%s: INT4 MI2S Switch enable %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_routing_put_int4_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_dapm_update *update = NULL;
+
+	pr_debug("%s: INT4 MI2S Switch enable %ld\n", __func__,
+			ucontrol->value.integer.value[0]);
+	if (ucontrol->value.integer.value[0])
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 1,
+						update);
+	else
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 0,
+						update);
+	int4_mi2s_switch_enable = ucontrol->value.integer.value[0];
+	return 1;
+}
+
+static int msm_routing_get_usb_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = usb_switch_enable;
+	pr_debug("%s: HFP Switch enable %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_routing_put_usb_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist =
+					dapm_kcontrol_get_wlist(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct snd_soc_dapm_update *update = NULL;
+
+	pr_debug("%s: USB Switch enable %ld\n", __func__,
+			ucontrol->value.integer.value[0]);
+	if (ucontrol->value.integer.value[0])
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol,
+						1, update);
+	else
+		snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol,
+						0, update);
+	usb_switch_enable = ucontrol->value.integer.value[0];
+	return 1;
+}
+
 static int msm_routing_get_pri_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_value *ucontrol)
 {
@@ -1827,23 +2280,24 @@
 	return 1;
 }
 
-static int msm_routing_lsm_mux_get(struct snd_kcontrol *kcontrol,
+static int msm_routing_lsm_port_get(struct snd_kcontrol *kcontrol,
 				   struct snd_ctl_elem_value *ucontrol)
 {
-	ucontrol->value.integer.value[0] = lsm_mux_slim_port;
+	ucontrol->value.integer.value[0] = lsm_port_index;
 	return 0;
 }
 
-static int msm_routing_lsm_mux_put(struct snd_kcontrol *kcontrol,
+static int msm_routing_lsm_port_put(struct snd_kcontrol *kcontrol,
 				   struct snd_ctl_elem_value *ucontrol)
 {
-	struct snd_soc_dapm_widget_list *wlist =
-					dapm_kcontrol_get_wlist(kcontrol);
-	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
 	int mux = ucontrol->value.enumerated.item[0];
 	int lsm_port = AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_TX;
-	struct snd_soc_dapm_update *update = NULL;
+
+	if (mux >= e->items) {
+		pr_err("%s: Invalid mux value %d\n", __func__, mux);
+		return -EINVAL;
+	}
 
 	pr_debug("%s: LSM enable %ld\n", __func__,
 			ucontrol->value.integer.value[0]);
@@ -1869,21 +2323,21 @@
 	case 7:
 		lsm_port = AFE_PORT_ID_TERTIARY_MI2S_TX;
 		break;
+	case 8:
+		lsm_port = AFE_PORT_ID_QUATERNARY_MI2S_TX;
+		break;
+	case 9:
+		lsm_port = ADM_LSM_PORT_ID;
+		break;
+	case 10:
+		lsm_port = AFE_PORT_ID_INT3_MI2S_TX;
+		break;
 	default:
 		pr_err("Default lsm port");
 		break;
 	}
 	set_lsm_port(lsm_port);
-
-	if (ucontrol->value.integer.value[0]) {
-		lsm_mux_slim_port = ucontrol->value.integer.value[0];
-		snd_soc_dapm_mux_update_power(widget->dapm, kcontrol, mux, e,
-						update);
-	} else {
-		snd_soc_dapm_mux_update_power(widget->dapm, kcontrol, mux, e,
-						update);
-		lsm_mux_slim_port = ucontrol->value.integer.value[0];
-	}
+	lsm_port_index = ucontrol->value.integer.value[0];
 
 	return 0;
 }
@@ -1896,22 +2350,31 @@
 	enum afe_mad_type mad_type;
 
 	pr_debug("%s: enter\n", __func__);
-	for (i = 0; i < ARRAY_SIZE(mad_audio_mux_text); i++)
-		if (!strcmp(kcontrol->id.name, mad_audio_mux_text[i]))
+	for (i = 0; i < ARRAY_SIZE(lsm_port_text); i++)
+		if (!strnstr(kcontrol->id.name, lsm_port_text[i],
+			    strlen(lsm_port_text[i])))
 			break;
 
-	if (i-- == ARRAY_SIZE(mad_audio_mux_text)) {
+	if (i-- == ARRAY_SIZE(lsm_port_text)) {
 		WARN(1, "Invalid id name %s\n", kcontrol->id.name);
 		return -EINVAL;
 	}
 
-	/*Check for Tertiary TX port*/
-	if (!strcmp(kcontrol->id.name, mad_audio_mux_text[7])) {
-		ucontrol->value.integer.value[0] = MADSWAUDIO;
-		return 0;
-	}
-
 	port_id = i * 2 + 1 + SLIMBUS_0_RX;
+
+	/*Check for Tertiary/Quaternary/INT3 TX port*/
+	if (strnstr(kcontrol->id.name, lsm_port_text[7],
+			strlen(lsm_port_text[7])))
+		port_id = AFE_PORT_ID_TERTIARY_MI2S_TX;
+
+	if (strnstr(kcontrol->id.name, lsm_port_text[8],
+			strlen(lsm_port_text[8])))
+		port_id = AFE_PORT_ID_QUATERNARY_MI2S_TX;
+
+	if (strnstr(kcontrol->id.name, lsm_port_text[10],
+			strlen(lsm_port_text[10])))
+		port_id = AFE_PORT_ID_INT3_MI2S_TX;
+
 	mad_type = afe_port_get_mad_type(port_id);
 	pr_debug("%s: port_id 0x%x, mad_type %d\n", __func__, port_id,
 		 mad_type);
@@ -1946,11 +2409,12 @@
 	enum afe_mad_type mad_type;
 
 	pr_debug("%s: enter\n", __func__);
-	for (i = 0; i < ARRAY_SIZE(mad_audio_mux_text); i++)
-		if (!strcmp(kcontrol->id.name, mad_audio_mux_text[i]))
+	for (i = 0; i < ARRAY_SIZE(lsm_port_text); i++)
+		if (strnstr(kcontrol->id.name, lsm_port_text[i],
+			    strlen(lsm_port_text[i])))
 			break;
 
-	if (i-- == ARRAY_SIZE(mad_audio_mux_text)) {
+	if (i-- == ARRAY_SIZE(lsm_port_text)) {
 		WARN(1, "Invalid id name %s\n", kcontrol->id.name);
 		return -EINVAL;
 	}
@@ -1977,11 +2441,18 @@
 		return -EINVAL;
 	}
 
-	/*Check for Tertiary TX port*/
-	if (!strcmp(kcontrol->id.name, mad_audio_mux_text[7])) {
+	/*Check for Tertiary/Quaternary/INT3 TX port*/
+	if (strnstr(kcontrol->id.name, lsm_port_text[7],
+			strlen(lsm_port_text[7])))
 		port_id = AFE_PORT_ID_TERTIARY_MI2S_TX;
-		mad_type = MAD_SW_AUDIO;
-	}
+
+	if (strnstr(kcontrol->id.name, lsm_port_text[8],
+			strlen(lsm_port_text[8])))
+		port_id = AFE_PORT_ID_QUATERNARY_MI2S_TX;
+
+	if (strnstr(kcontrol->id.name, lsm_port_text[10],
+			strlen(lsm_port_text[10])))
+		port_id = AFE_PORT_ID_INT3_MI2S_TX;
 
 	pr_debug("%s: port_id 0x%x, mad_type %d\n", __func__, port_id,
 		 mad_type);
@@ -2147,6 +2618,144 @@
 	return 1;
 }
 
+static int msm_ec_ref_ch_get(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = msm_ec_ref_ch;
+	pr_debug("%s: msm_ec_ref_ch = %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_ec_ref_ch_put(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	msm_ec_ref_ch = ucontrol->value.integer.value[0];
+	pr_debug("%s: msm_ec_ref_ch = %d\n", __func__, msm_ec_ref_ch);
+	adm_num_ec_ref_rx_chans(msm_ec_ref_ch);
+	return 0;
+}
+
+static const char *const ec_ref_ch_text[] = {"Zero", "One", "Two", "Three",
+	"Four", "Five", "Six", "Seven", "Eight"};
+
+static int msm_ec_ref_bit_format_get(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	switch (msm_ec_ref_bit_format) {
+	case SNDRV_PCM_FORMAT_S24_LE:
+		ucontrol->value.integer.value[0] = 2;
+		break;
+	case SNDRV_PCM_FORMAT_S16_LE:
+		ucontrol->value.integer.value[0] = 1;
+		break;
+	default:
+		ucontrol->value.integer.value[0] = 0;
+		break;
+	}
+	pr_debug("%s: msm_ec_ref_bit_format = %ld\n",
+		 __func__, ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_ec_ref_bit_format_put(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	u16 bit_width = 0;
+
+	switch (ucontrol->value.integer.value[0]) {
+	case 2:
+		msm_ec_ref_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+		break;
+	case 1:
+		msm_ec_ref_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+		break;
+	default:
+		msm_ec_ref_bit_format = 0;
+		break;
+	}
+
+	if (msm_ec_ref_bit_format == SNDRV_PCM_FORMAT_S16_LE)
+		bit_width = 16;
+	else if (msm_ec_ref_bit_format == SNDRV_PCM_FORMAT_S24_LE)
+		bit_width = 24;
+
+	pr_debug("%s: msm_ec_ref_bit_format = %d\n",
+		 __func__, msm_ec_ref_bit_format);
+	adm_ec_ref_rx_bit_width(bit_width);
+	return 0;
+}
+
+static char const *ec_ref_bit_format_text[] = {"0", "S16_LE", "S24_LE"};
+
+static int msm_ec_ref_rate_get(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = msm_ec_ref_sampling_rate;
+	pr_debug("%s: msm_ec_ref_sampling_rate = %ld\n",
+		 __func__, ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_ec_ref_rate_put(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	switch (ucontrol->value.integer.value[0]) {
+	case 0:
+		msm_ec_ref_sampling_rate = 0;
+		break;
+	case 1:
+		msm_ec_ref_sampling_rate = 8000;
+		break;
+	case 2:
+		msm_ec_ref_sampling_rate = 16000;
+		break;
+	case 3:
+		msm_ec_ref_sampling_rate = 32000;
+		break;
+	case 4:
+		msm_ec_ref_sampling_rate = 44100;
+		break;
+	case 5:
+		msm_ec_ref_sampling_rate = 48000;
+		break;
+	case 6:
+		msm_ec_ref_sampling_rate = 96000;
+		break;
+	case 7:
+		msm_ec_ref_sampling_rate = 192000;
+		break;
+	case 8:
+		msm_ec_ref_sampling_rate = 384000;
+		break;
+	default:
+		msm_ec_ref_sampling_rate = 48000;
+		break;
+	}
+	pr_debug("%s: msm_ec_ref_sampling_rate = %d\n",
+		 __func__, msm_ec_ref_sampling_rate);
+	adm_ec_ref_rx_sampling_rate(msm_ec_ref_sampling_rate);
+	return 0;
+}
+
+static const char *const ec_ref_rate_text[] = {"0", "8000", "16000",
+	"32000", "44100", "48000", "96000", "192000", "384000"};
+
+static const struct soc_enum msm_route_ec_ref_params_enum[] = {
+	SOC_ENUM_SINGLE_EXT(9, ec_ref_ch_text),
+	SOC_ENUM_SINGLE_EXT(3, ec_ref_bit_format_text),
+	SOC_ENUM_SINGLE_EXT(9, ec_ref_rate_text),
+};
+
+static const struct snd_kcontrol_new ec_ref_param_controls[] = {
+	SOC_ENUM_EXT("EC Reference Channels", msm_route_ec_ref_params_enum[0],
+		msm_ec_ref_ch_get, msm_ec_ref_ch_put),
+	SOC_ENUM_EXT("EC Reference Bit Format", msm_route_ec_ref_params_enum[1],
+		msm_ec_ref_bit_format_get, msm_ec_ref_bit_format_put),
+	SOC_ENUM_EXT("EC Reference SampleRate", msm_route_ec_ref_params_enum[2],
+		msm_ec_ref_rate_get, msm_ec_ref_rate_put),
+};
+
 static int msm_routing_ec_ref_rx_get(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_value *ucontrol)
 {
@@ -2164,10 +2773,10 @@
 	struct snd_soc_dapm_widget_list *wlist =
 					dapm_kcontrol_get_wlist(kcontrol);
 	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
-	int mux = ucontrol->value.enumerated.item[0];
 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
 	struct snd_soc_dapm_update *update = NULL;
 
+
 	mutex_lock(&routing_lock);
 	switch (ucontrol->value.integer.value[0]) {
 	case 0:
@@ -2246,6 +2855,18 @@
 		msm_route_ec_ref_rx = 19;
 		ec_ref_port_id = AFE_PORT_ID_USB_RX;
 		break;
+	case 20:
+		msm_route_ec_ref_rx = 20;
+		ec_ref_port_id = AFE_PORT_ID_INT0_MI2S_RX;
+		break;
+	case 21:
+		msm_route_ec_ref_rx = 21;
+		ec_ref_port_id = AFE_PORT_ID_INT4_MI2S_RX;
+		break;
+	case 22:
+		msm_route_ec_ref_rx = 22;
+		ec_ref_port_id = AFE_PORT_ID_INT3_MI2S_TX;
+		break;
 	default:
 		msm_route_ec_ref_rx = 0; /* NONE */
 		pr_err("%s EC ref rx %ld not valid\n",
@@ -2257,7 +2878,8 @@
 	pr_debug("%s: msm_route_ec_ref_rx = %d\n",
 	    __func__, msm_route_ec_ref_rx);
 	mutex_unlock(&routing_lock);
-	snd_soc_dapm_mux_update_power(widget->dapm, kcontrol, mux, e, update);
+	snd_soc_dapm_mux_update_power(widget->dapm, kcontrol,
+					msm_route_ec_ref_rx, e, update);
 	return 0;
 }
 
@@ -2266,7 +2888,8 @@
 	"TERT_MI2S_TX", "QUAT_MI2S_TX", "SEC_I2S_RX", "PROXY_RX",
 	"SLIM_5_RX", "SLIM_1_TX", "QUAT_TDM_TX_1",
 	"QUAT_TDM_RX_0", "QUAT_TDM_RX_1", "QUAT_TDM_RX_2", "SLIM_6_RX",
-	"TERT_MI2S_RX", "QUAT_MI2S_RX", "TERT_TDM_TX_0", "USB_AUDIO_RX"};
+	"TERT_MI2S_RX", "QUAT_MI2S_RX", "TERT_TDM_TX_0", "USB_AUDIO_RX",
+	"INT0_MI2S_RX", "INT4_MI2S_RX", "INT3_MI2S_TX"};
 
 static const struct soc_enum msm_route_ec_ref_rx_enum[] = {
 	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(ec_ref_rx), ec_ref_rx),
@@ -2351,6 +2974,11 @@
 	uint16_t ext_ec_ref_port_id;
 	struct snd_soc_dapm_update *update = NULL;
 
+	if (mux >= e->items) {
+		pr_err("%s: Invalid mux value %d\n", __func__, mux);
+		return -EINVAL;
+	}
+
 	mutex_lock(&routing_lock);
 	msm_route_ext_ec_ref = ucontrol->value.integer.value[0];
 
@@ -2381,9 +3009,9 @@
 	}
 
 	pr_debug("%s: val = %d ext_ec_ref_port_id = 0x%0x state = %d\n",
-		__func__, msm_route_ext_ec_ref, ext_ec_ref_port_id, state);
+		 __func__, msm_route_ext_ec_ref, ext_ec_ref_port_id, state);
 
-	if (!voc_set_ext_ec_ref(ext_ec_ref_port_id, state)) {
+	if (!voc_set_ext_ec_ref_port_id(ext_ec_ref_port_id, state)) {
 		mutex_unlock(&routing_lock);
 		snd_soc_dapm_mux_update_power(widget->dapm, kcontrol, mux, e,
 						update);
@@ -4036,6 +4664,159 @@
 	msm_routing_put_audio_mixer),
 };
 
+static const struct snd_kcontrol_new pri_tdm_rx_1_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new pri_tdm_rx_2_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new pri_tdm_rx_3_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
 static const struct snd_kcontrol_new pri_tdm_tx_0_mixer_controls[] = {
 	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_PRI_TDM_TX_0,
 	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
@@ -4138,6 +4919,159 @@
 	msm_routing_put_audio_mixer),
 };
 
+static const struct snd_kcontrol_new sec_tdm_rx_1_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new sec_tdm_rx_2_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new sec_tdm_rx_3_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
 static const struct snd_kcontrol_new sec_tdm_tx_0_mixer_controls[] = {
 	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SEC_TDM_TX_0,
 	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
@@ -4763,6 +5697,30 @@
 	SOC_SINGLE_EXT("QUIN_MI2S_TX", MSM_BACKEND_DAI_QUINARY_MI2S_TX,
 		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
 		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
 	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
 		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
 		msm_routing_put_audio_mixer),
@@ -4826,9 +5784,39 @@
 	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
 	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
 	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SLIM_6_TX", MSM_BACKEND_DAI_SLIMBUS_6_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
 	SOC_SINGLE_EXT("QUIN_MI2S_TX", MSM_BACKEND_DAI_QUINARY_MI2S_TX,
 	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
 	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
 	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
 	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
 	msm_routing_put_audio_mixer),
@@ -4901,6 +5889,30 @@
 	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
 	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
 	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
 	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
 	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
 	msm_routing_put_audio_mixer),
@@ -4952,6 +5964,30 @@
 	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
 	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
 	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
 	SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_TX_1,
 	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
 	msm_routing_put_audio_mixer),
@@ -5033,6 +6069,30 @@
 	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
 	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
 	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
 	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
 	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
 	msm_routing_put_audio_mixer),
@@ -5111,6 +6171,30 @@
 	SOC_SINGLE_EXT("QUAT_AUXPCM_UL_TX", MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
 	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
 	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
 	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
 	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
 	msm_routing_put_audio_mixer),
@@ -5180,6 +6264,30 @@
 	SOC_SINGLE_EXT("SLIM_6_TX", MSM_BACKEND_DAI_SLIMBUS_6_TX,
 	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
 	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
 	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
 	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
 	msm_routing_put_audio_mixer),
@@ -5212,6 +6320,57 @@
 	msm_routing_put_audio_mixer),
 };
 
+static const struct snd_kcontrol_new mmul9_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SLIM_6_TX", MSM_BACKEND_DAI_SLIMBUS_6_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
 static const struct snd_kcontrol_new mmul17_mixer_controls[] = {
 	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
 	MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
@@ -6039,6 +7198,12 @@
 	msm_routing_put_voice_mixer),
 };
 
+static const struct snd_kcontrol_new quat_tdm_rx_2_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
 static const struct snd_kcontrol_new stub_rx_mixer_controls[] = {
 	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_EXTPROC_RX,
 	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
@@ -6304,6 +7469,9 @@
 	SOC_SINGLE_EXT("USB_AUDIO_TX_MMode1", MSM_BACKEND_DAI_USB_TX,
 	MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0_MMode1",
+	MSM_BACKEND_DAI_QUAT_TDM_TX_0, MSM_FRONTEND_DAI_VOICEMMODE1,
+	1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
 };
 
 static const struct snd_kcontrol_new tx_voicemmode2_mixer_controls[] = {
@@ -6597,6 +7765,66 @@
 	msm_routing_put_voice_mixer),
 };
 
+static const struct snd_kcontrol_new int0_mi2s_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_BACKEND_DAI_TERTIARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_BACKEND_DAI_INT3_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_BACKEND_DAI_INT_FM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_7_TX", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_7_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX", MSM_BACKEND_DAI_INT0_MI2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_8_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new int4_mi2s_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_BACKEND_DAI_TERTIARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_BACKEND_DAI_INT3_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_BACKEND_DAI_INT_FM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_7_TX", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_7_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX", MSM_BACKEND_DAI_INT4_MI2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_8_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
 static const struct snd_kcontrol_new sbus_0_rx_port_mixer_controls[] = {
 	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
 	MSM_BACKEND_DAI_INT_FM_TX, 1, 0, msm_routing_get_port_mixer,
@@ -6859,6 +8087,12 @@
 	msm_routing_put_port_mixer),
 };
 
+static const struct snd_kcontrol_new usb_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("USB_AUDIO_TX", MSM_BACKEND_DAI_USB_RX,
+	MSM_BACKEND_DAI_USB_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
 static const struct snd_kcontrol_new quat_mi2s_rx_port_mixer_controls[] = {
 	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
 	MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
@@ -6883,6 +8117,542 @@
 	msm_routing_put_port_mixer),
 };
 
+static const struct snd_kcontrol_new pri_tdm_rx_0_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_PRI_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_PRI_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_PRI_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_PRI_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new pri_tdm_rx_1_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_PRI_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_PRI_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_PRI_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_PRI_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new pri_tdm_rx_2_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_PRI_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_PRI_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_PRI_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_PRI_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new pri_tdm_rx_3_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_PRI_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_PRI_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_PRI_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_PRI_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new sec_tdm_rx_0_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_SEC_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_SEC_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_SEC_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_SEC_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new sec_tdm_rx_1_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_SEC_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_SEC_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_SEC_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_SEC_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new sec_tdm_rx_2_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_SEC_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_SEC_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_SEC_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_SEC_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new sec_tdm_rx_3_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_INT_FM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_SEC_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_SEC_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_SEC_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_SEC_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_1, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_2, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+		MSM_BACKEND_DAI_QUAT_TDM_TX_3, 1, 0,
+		msm_routing_get_port_mixer,
+		msm_routing_put_port_mixer),
+};
+
 static const struct snd_kcontrol_new tert_tdm_rx_0_port_mixer_controls[] = {
 	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_TERT_TDM_RX_0,
 		MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0,
@@ -7458,6 +9228,222 @@
 	msm_routing_put_port_mixer),
 };
 
+static const struct snd_kcontrol_new lsm1_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+		MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+		MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+		MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+		MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+};
+
+static const struct snd_kcontrol_new lsm2_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+		MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+		MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+		MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+		MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+};
+
+static const struct snd_kcontrol_new lsm3_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+		MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+		MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+		MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+		MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+};
+
+static const struct snd_kcontrol_new lsm4_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+		MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+		MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+		MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+		MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+};
+
+static const struct snd_kcontrol_new lsm5_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+		MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+		MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+		MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+		MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+};
+
+static const struct snd_kcontrol_new lsm6_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+		MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+		MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+		MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+		MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+};
+
+static const struct snd_kcontrol_new lsm7_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+		MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+		MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+		MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+		MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+};
+
+static const struct snd_kcontrol_new lsm8_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+		MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+		MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+		MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+		MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+	SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+		MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+		msm_routing_put_listen_mixer),
+};
+
 static const struct snd_kcontrol_new slim_fm_switch_mixer_controls =
 	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
 	0, 1, 0, msm_routing_get_switch_mixer,
@@ -7488,6 +9474,16 @@
 	0, 1, 0, msm_routing_get_fm_pcmrx_switch_mixer,
 	msm_routing_put_fm_pcmrx_switch_mixer);
 
+static const struct snd_kcontrol_new int0_mi2s_rx_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_int0_mi2s_switch_mixer,
+	msm_routing_put_int0_mi2s_switch_mixer);
+
+static const struct snd_kcontrol_new int4_mi2s_rx_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_int4_mi2s_switch_mixer,
+	msm_routing_put_int4_mi2s_switch_mixer);
+
 static const struct snd_kcontrol_new pri_mi2s_rx_switch_mixer_controls =
 	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
 	0, 1, 0, msm_routing_get_pri_mi2s_switch_mixer,
@@ -7523,53 +9519,27 @@
 	0, 1, 0, msm_routing_get_hfp_switch_mixer,
 	msm_routing_put_hfp_switch_mixer);
 
-static const struct soc_enum lsm_mux_enum =
-	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(mad_audio_mux_text), mad_audio_mux_text);
+static const struct snd_kcontrol_new hfp_slim7_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_hfp_switch_mixer,
+	msm_routing_put_hfp_switch_mixer);
 
-static const struct snd_kcontrol_new lsm1_mux =
-	SOC_DAPM_ENUM_EXT("LSM1 MUX", lsm_mux_enum,
-			  msm_routing_lsm_mux_get,
-			  msm_routing_lsm_mux_put);
+static const struct snd_kcontrol_new usb_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_usb_switch_mixer,
+	msm_routing_put_usb_switch_mixer);
 
-static const struct snd_kcontrol_new lsm2_mux =
-	SOC_DAPM_ENUM_EXT("LSM2 MUX", lsm_mux_enum,
-			  msm_routing_lsm_mux_get,
-			  msm_routing_lsm_mux_put);
-static const struct snd_kcontrol_new lsm3_mux =
-	SOC_DAPM_ENUM_EXT("LSM3 MUX", lsm_mux_enum,
-			  msm_routing_lsm_mux_get,
-			  msm_routing_lsm_mux_put);
-
-static const struct snd_kcontrol_new lsm4_mux =
-	SOC_DAPM_ENUM_EXT("LSM4 MUX", lsm_mux_enum,
-			  msm_routing_lsm_mux_get,
-			  msm_routing_lsm_mux_put);
-static const struct snd_kcontrol_new lsm5_mux =
-	SOC_DAPM_ENUM_EXT("LSM5 MUX", lsm_mux_enum,
-			  msm_routing_lsm_mux_get,
-			  msm_routing_lsm_mux_put);
-
-static const struct snd_kcontrol_new lsm6_mux =
-	SOC_DAPM_ENUM_EXT("LSM6 MUX", lsm_mux_enum,
-			  msm_routing_lsm_mux_get,
-			  msm_routing_lsm_mux_put);
-static const struct snd_kcontrol_new lsm7_mux =
-	SOC_DAPM_ENUM_EXT("LSM7 MUX", lsm_mux_enum,
-			  msm_routing_lsm_mux_get,
-			  msm_routing_lsm_mux_put);
-
-static const struct snd_kcontrol_new lsm8_mux =
-	SOC_DAPM_ENUM_EXT("LSM8 MUX", lsm_mux_enum,
-			  msm_routing_lsm_mux_get,
-			  msm_routing_lsm_mux_put);
-
+static const struct soc_enum lsm_port_enum =
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(lsm_port_text), lsm_port_text);
 
 static const char * const lsm_func_text[] = {
 	"None", "AUDIO", "BEACON", "ULTRASOUND", "SWAUDIO",
 };
 static const struct soc_enum lsm_func_enum =
 	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(lsm_func_text), lsm_func_text);
-static const struct snd_kcontrol_new lsm_function[] = {
+
+static const struct snd_kcontrol_new lsm_controls[] = {
+	/* kcontrol of lsm_function */
 	SOC_ENUM_EXT(SLIMBUS_0_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum,
 		     msm_routing_lsm_func_get, msm_routing_lsm_func_put),
 	SOC_ENUM_EXT(SLIMBUS_1_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum,
@@ -7584,6 +9554,35 @@
 		     msm_routing_lsm_func_get, msm_routing_lsm_func_put),
 	SOC_ENUM_EXT(TERT_MI2S_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum,
 		    msm_routing_lsm_func_get, msm_routing_lsm_func_put),
+	SOC_ENUM_EXT(QUAT_MI2S_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum,
+		    msm_routing_lsm_func_get, msm_routing_lsm_func_put),
+	SOC_ENUM_EXT(INT3_MI2S_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum,
+		    msm_routing_lsm_func_get, msm_routing_lsm_func_put),
+	/* kcontrol of lsm_port */
+	SOC_ENUM_EXT("LSM1 Port", lsm_port_enum,
+			  msm_routing_lsm_port_get,
+			  msm_routing_lsm_port_put),
+	SOC_ENUM_EXT("LSM2 Port", lsm_port_enum,
+			  msm_routing_lsm_port_get,
+			  msm_routing_lsm_port_put),
+	SOC_ENUM_EXT("LSM3 Port", lsm_port_enum,
+			  msm_routing_lsm_port_get,
+			  msm_routing_lsm_port_put),
+	SOC_ENUM_EXT("LSM4 Port", lsm_port_enum,
+			  msm_routing_lsm_port_get,
+			  msm_routing_lsm_port_put),
+	SOC_ENUM_EXT("LSM5 Port", lsm_port_enum,
+			  msm_routing_lsm_port_get,
+			  msm_routing_lsm_port_put),
+	SOC_ENUM_EXT("LSM6 Port", lsm_port_enum,
+			  msm_routing_lsm_port_get,
+			  msm_routing_lsm_port_put),
+	SOC_ENUM_EXT("LSM7 Port", lsm_port_enum,
+			  msm_routing_lsm_port_get,
+			  msm_routing_lsm_port_put),
+	SOC_ENUM_EXT("LSM8 Port", lsm_port_enum,
+			  msm_routing_lsm_port_get,
+			  msm_routing_lsm_port_put),
 };
 
 static const char * const aanc_slim_0_rx_text[] = {
@@ -7637,10 +9636,11 @@
 			continue;
 		if ((port_id != SLIMBUS_0_RX) &&
 		     (port_id != RT_PROXY_PORT_001_RX) &&
-			(port_id != AFE_PORT_ID_PRIMARY_MI2S_RX))
+			(port_id != AFE_PORT_ID_PRIMARY_MI2S_RX) &&
+			(port_id != AFE_PORT_ID_INT4_MI2S_RX))
 			continue;
 
-		for_each_set_bit(i, &msm_bedais[be_index].fe_sessions,
+		for_each_set_bit(i, &msm_bedais[be_index].fe_sessions[0],
 				MSM_FRONTEND_DAI_MM_SIZE) {
 			if (fe_dai_map[i][SESSION_TYPE_RX].perf_mode !=
 			    LEGACY_PCM_MODE)
@@ -7746,6 +9746,45 @@
 	msm_routing_put_app_type_cfg_control),
 };
 
+static int msm_routing_get_lsm_app_type_cfg_control(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	return 0;
+}
+
+static int msm_routing_put_lsm_app_type_cfg_control(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int i = 0, j;
+	int num_app_types = ucontrol->value.integer.value[i++];
+
+	memset(lsm_app_type_cfg, 0, MAX_APP_TYPES*
+				sizeof(struct msm_pcm_routing_app_type_data));
+	if (num_app_types > MAX_APP_TYPES) {
+		pr_err("%s: number of app types exceed the max supported\n",
+			__func__);
+		return -EINVAL;
+	}
+	for (j = 0; j < num_app_types; j++) {
+		lsm_app_type_cfg[j].app_type =
+				ucontrol->value.integer.value[i++];
+		lsm_app_type_cfg[j].sample_rate =
+				ucontrol->value.integer.value[i++];
+		lsm_app_type_cfg[j].bit_width =
+				ucontrol->value.integer.value[i++];
+	}
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new lsm_app_type_cfg_controls[] = {
+	SOC_SINGLE_MULTI_EXT("Listen App Type Config", SND_SOC_NOPM, 0,
+	0xFFFFFFFF, 0, 128, msm_routing_get_lsm_app_type_cfg_control,
+	msm_routing_put_lsm_app_type_cfg_control),
+};
+
 static int msm_routing_get_use_ds1_or_ds2_control(
 					struct snd_kcontrol *kcontrol,
 					struct snd_ctl_elem_value *ucontrol)
@@ -7777,7 +9816,7 @@
 	uint32_t param_length = sizeof(uint32_t);
 	uint32_t param_payload_len = RMS_PAYLOAD_LEN * sizeof(uint32_t);
 
-	param_value = kzalloc(param_length, GFP_KERNEL);
+	param_value = kzalloc(param_length + param_payload_len, GFP_KERNEL);
 	if (!param_value)
 		return -ENOMEM;
 
@@ -7941,7 +9980,7 @@
 		goto done;
 	}
 
-	for_each_set_bit(i, &msm_bedais[be_idx].fe_sessions,
+	for_each_set_bit(i, &msm_bedais[be_idx].fe_sessions[0],
 			 MSM_FRONTEND_DAI_MM_SIZE) {
 		for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++) {
 			copp = session_copp_map[i]
@@ -7985,6 +10024,9 @@
 	} else if (!strcmp(kcontrol->id.name + strlen(prefix),
 					"TERT_MI2S")) {
 		*port_id = AFE_PORT_ID_TERTIARY_MI2S_TX;
+	} else if (!strcmp(kcontrol->id.name + strlen(prefix),
+					"INT3_MI2S")) {
+		*port_id = AFE_PORT_ID_INT3_MI2S_TX;
 	} else {
 		pr_err("%s: mixer ctl name=%s, could not derive valid port id\n",
 			__func__, kcontrol->id.name);
@@ -8189,6 +10231,36 @@
 		.info	= msm_source_tracking_info,
 		.get	= msm_audio_source_tracking_get,
 	},
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.iface	= SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name	= "Sound Focus Voice Tx INT3_MI2S",
+		.info	= msm_sound_focus_info,
+		.get	= msm_voice_sound_focus_get,
+		.put	= msm_voice_sound_focus_put,
+	},
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ,
+		.iface	= SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name	= "Source Tracking Voice Tx INT3_MI2S",
+		.info	= msm_source_tracking_info,
+		.get	= msm_voice_source_tracking_get,
+	},
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.iface	= SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name	= "Sound Focus Audio Tx INT3_MI2S",
+		.info	= msm_sound_focus_info,
+		.get	= msm_audio_sound_focus_get,
+		.put	= msm_audio_sound_focus_put,
+	},
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ,
+		.iface	= SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name	= "Source Tracking Audio Tx INT3_MI2S",
+		.info	= msm_source_tracking_info,
+		.get	= msm_audio_source_tracking_get,
+	},
 };
 
 static int spkr_prot_put_vi_lch_port(struct snd_kcontrol *kcontrol,
@@ -8294,6 +10366,14 @@
 	"ZERO", "SENARY_TX"
 };
 
+static const char * const int4_mi2s_rx_vi_fb_tx_mono_mux_text[] = {
+	"ZERO", "INT5_MI2S_TX"
+};
+
+static const char * const int4_mi2s_rx_vi_fb_tx_stereo_mux_text[] = {
+	"ZERO", "INT5_MI2S_TX"
+};
+
 static const int const slim0_rx_vi_fb_tx_lch_value[] = {
 	MSM_BACKEND_DAI_MAX, MSM_BACKEND_DAI_SLIMBUS_4_TX
 };
@@ -8306,6 +10386,14 @@
 	MSM_BACKEND_DAI_MAX, MSM_BACKEND_DAI_SENARY_MI2S_TX
 };
 
+static const int const int4_mi2s_rx_vi_fb_tx_mono_ch_value[] = {
+	MSM_BACKEND_DAI_MAX, MSM_BACKEND_DAI_INT5_MI2S_TX
+};
+
+static const int const int4_mi2s_rx_vi_fb_tx_stereo_ch_value[] = {
+	MSM_BACKEND_DAI_MAX, MSM_BACKEND_DAI_INT5_MI2S_TX
+};
+
 static const struct soc_enum slim0_rx_vi_fb_lch_mux_enum =
 	SOC_VALUE_ENUM_DOUBLE(0, MSM_BACKEND_DAI_SLIMBUS_0_RX, 0, 0,
 	ARRAY_SIZE(slim0_rx_vi_fb_tx_lch_mux_text),
@@ -8321,6 +10409,18 @@
 	ARRAY_SIZE(mi2s_rx_vi_fb_tx_mux_text),
 	mi2s_rx_vi_fb_tx_mux_text, mi2s_rx_vi_fb_tx_value);
 
+static const struct soc_enum int4_mi2s_rx_vi_fb_mono_ch_mux_enum =
+	SOC_VALUE_ENUM_DOUBLE(0, MSM_BACKEND_DAI_INT4_MI2S_RX, 0, 0,
+	ARRAY_SIZE(int4_mi2s_rx_vi_fb_tx_mono_mux_text),
+	int4_mi2s_rx_vi_fb_tx_mono_mux_text,
+	int4_mi2s_rx_vi_fb_tx_mono_ch_value);
+
+static const struct soc_enum int4_mi2s_rx_vi_fb_stereo_ch_mux_enum =
+	SOC_VALUE_ENUM_DOUBLE(0, MSM_BACKEND_DAI_INT4_MI2S_RX, 0, 0,
+	ARRAY_SIZE(int4_mi2s_rx_vi_fb_tx_stereo_mux_text),
+	int4_mi2s_rx_vi_fb_tx_stereo_mux_text,
+	int4_mi2s_rx_vi_fb_tx_stereo_ch_value);
+
 static const struct snd_kcontrol_new slim0_rx_vi_fb_lch_mux =
 	SOC_DAPM_ENUM_EXT("SLIM0_RX_VI_FB_LCH_MUX",
 	slim0_rx_vi_fb_lch_mux_enum, spkr_prot_get_vi_lch_port,
@@ -8336,6 +10436,16 @@
 	mi2s_rx_vi_fb_mux_enum, spkr_prot_get_vi_lch_port,
 	spkr_prot_put_vi_lch_port);
 
+static const struct snd_kcontrol_new int4_mi2s_rx_vi_fb_mono_ch_mux =
+	SOC_DAPM_ENUM_EXT("INT4_MI2S_RX_VI_FB_MONO_CH_MUX",
+	int4_mi2s_rx_vi_fb_mono_ch_mux_enum, spkr_prot_get_vi_lch_port,
+	spkr_prot_put_vi_lch_port);
+
+static const struct snd_kcontrol_new int4_mi2s_rx_vi_fb_stereo_ch_mux =
+	SOC_DAPM_ENUM_EXT("INT4_MI2S_RX_VI_FB_STEREO_CH_MUX",
+	int4_mi2s_rx_vi_fb_stereo_ch_mux_enum, spkr_prot_get_vi_rch_port,
+	spkr_prot_put_vi_rch_port);
+
 static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
 	/* Frontend AIF */
 	/* Widget name equals to Front-End DAI name<Need confirmation>,
@@ -8408,6 +10518,10 @@
 		0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("SLIM6_UL_HL", "SLIMBUS6_HOSTLESS Capture",
 		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIM7_DL_HL", "SLIMBUS7_HOSTLESS Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SLIM7_UL_HL", "SLIMBUS7_HOSTLESS Capture",
+		0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("SLIM8_DL_HL", "SLIMBUS8_HOSTLESS Playback",
 		0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("SLIM8_UL_HL", "SLIMBUS8_HOSTLESS Capture",
@@ -8419,6 +10533,10 @@
 	SND_SOC_DAPM_AIF_IN("INTHFP_DL_HL", "INT_HFP_BT_HOSTLESS Playback",
 		0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("INTHFP_UL_HL", "INT_HFP_BT_HOSTLESS Capture",
+	0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("USBAUDIO_DL_HL", "USBAUDIO_HOSTLESS Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("USBAUDIO_UL_HL", "USBAUDIO_HOSTLESS Capture",
 		0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("HDMI_DL_HL", "HDMI_HOSTLESS Playback", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("SEC_I2S_DL_HL", "SEC_I2S_RX_HOSTLESS Playback",
@@ -8448,6 +10566,9 @@
 		0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("MI2S_UL_HL", "MI2S_TX_HOSTLESS Capture",
 		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("INT3_MI2S_UL_HL",
+		"INT3 MI2S_TX Hostless Capture",
+		0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("TERT_MI2S_UL_HL",
 		"Tertiary MI2S_TX Hostless Capture",
 		0, 0, 0, 0),
@@ -8884,6 +11005,8 @@
 				0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("SENARY_TX", "Senary_mi2s Capture",
 				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("INT5_MI2S_TX", "INT5 MI2S Capture",
+				0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("SLIMBUS_5_TX", "Slimbus5 Capture", 0, 0, 0, 0),
 
 	SND_SOC_DAPM_AIF_OUT("AUX_PCM_RX", "AUX PCM Playback", 0, 0, 0, 0),
@@ -8940,6 +11063,10 @@
 				&slim6_fm_switch_mixer_controls),
 	SND_SOC_DAPM_SWITCH("PCM_RX_DL_HL", SND_SOC_NOPM, 0, 0,
 				&pcm_rx_switch_mixer_controls),
+	SND_SOC_DAPM_SWITCH("INT0_MI2S_RX_DL_HL", SND_SOC_NOPM, 0, 0,
+				&int0_mi2s_rx_switch_mixer_controls),
+	SND_SOC_DAPM_SWITCH("INT4_MI2S_RX_DL_HL", SND_SOC_NOPM, 0, 0,
+				&int4_mi2s_rx_switch_mixer_controls),
 	SND_SOC_DAPM_SWITCH("PRI_MI2S_RX_DL_HL", SND_SOC_NOPM, 0, 0,
 				&pri_mi2s_rx_switch_mixer_controls),
 	SND_SOC_DAPM_SWITCH("SEC_MI2S_RX_DL_HL", SND_SOC_NOPM, 0, 0,
@@ -8954,16 +11081,10 @@
 				&hfp_aux_switch_mixer_controls),
 	SND_SOC_DAPM_SWITCH("HFP_INT_UL_HL", SND_SOC_NOPM, 0, 0,
 				&hfp_int_switch_mixer_controls),
-
-	/* Mux Definitions */
-	SND_SOC_DAPM_MUX("LSM1 MUX", SND_SOC_NOPM, 0, 0, &lsm1_mux),
-	SND_SOC_DAPM_MUX("LSM2 MUX", SND_SOC_NOPM, 0, 0, &lsm2_mux),
-	SND_SOC_DAPM_MUX("LSM3 MUX", SND_SOC_NOPM, 0, 0, &lsm3_mux),
-	SND_SOC_DAPM_MUX("LSM4 MUX", SND_SOC_NOPM, 0, 0, &lsm4_mux),
-	SND_SOC_DAPM_MUX("LSM5 MUX", SND_SOC_NOPM, 0, 0, &lsm5_mux),
-	SND_SOC_DAPM_MUX("LSM6 MUX", SND_SOC_NOPM, 0, 0, &lsm6_mux),
-	SND_SOC_DAPM_MUX("LSM7 MUX", SND_SOC_NOPM, 0, 0, &lsm7_mux),
-	SND_SOC_DAPM_MUX("LSM8 MUX", SND_SOC_NOPM, 0, 0, &lsm8_mux),
+	SND_SOC_DAPM_SWITCH("HFP_SLIM7_UL_HL", SND_SOC_NOPM, 0, 0,
+				&hfp_slim7_switch_mixer_controls),
+	SND_SOC_DAPM_SWITCH("USB_DL_HL", SND_SOC_NOPM, 0, 0,
+				&usb_switch_mixer_controls),
 
 	/* Mixer definitions */
 	SND_SOC_DAPM_MIXER("PRI_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
@@ -9013,12 +11134,30 @@
 	SND_SOC_DAPM_MIXER("PRI_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
 				pri_tdm_rx_0_mixer_controls,
 				ARRAY_SIZE(pri_tdm_rx_0_mixer_controls)),
+	SND_SOC_DAPM_MIXER("PRI_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				pri_tdm_rx_1_mixer_controls,
+				ARRAY_SIZE(pri_tdm_rx_1_mixer_controls)),
+	SND_SOC_DAPM_MIXER("PRI_TDM_RX_2 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				pri_tdm_rx_2_mixer_controls,
+				ARRAY_SIZE(pri_tdm_rx_2_mixer_controls)),
+	SND_SOC_DAPM_MIXER("PRI_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				pri_tdm_rx_3_mixer_controls,
+				ARRAY_SIZE(pri_tdm_rx_3_mixer_controls)),
 	SND_SOC_DAPM_MIXER("PRI_TDM_TX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
 				pri_tdm_tx_0_mixer_controls,
 				ARRAY_SIZE(pri_tdm_tx_0_mixer_controls)),
 	SND_SOC_DAPM_MIXER("SEC_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
 				sec_tdm_rx_0_mixer_controls,
 				ARRAY_SIZE(sec_tdm_rx_0_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_TDM_RX_1 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				sec_tdm_rx_1_mixer_controls,
+				ARRAY_SIZE(sec_tdm_rx_1_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_TDM_RX_2 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				sec_tdm_rx_2_mixer_controls,
+				ARRAY_SIZE(sec_tdm_rx_2_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				sec_tdm_rx_3_mixer_controls,
+				ARRAY_SIZE(sec_tdm_rx_3_mixer_controls)),
 	SND_SOC_DAPM_MIXER("SEC_TDM_TX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
 				sec_tdm_tx_0_mixer_controls,
 				ARRAY_SIZE(sec_tdm_tx_0_mixer_controls)),
@@ -9066,6 +11205,8 @@
 	mmul6_mixer_controls, ARRAY_SIZE(mmul6_mixer_controls)),
 	SND_SOC_DAPM_MIXER("MultiMedia8 Mixer", SND_SOC_NOPM, 0, 0,
 	mmul8_mixer_controls, ARRAY_SIZE(mmul8_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MultiMedia9 Mixer", SND_SOC_NOPM, 0, 0,
+	mmul9_mixer_controls, ARRAY_SIZE(mmul9_mixer_controls)),
 	SND_SOC_DAPM_MIXER("MultiMedia17 Mixer", SND_SOC_NOPM, 0, 0,
 	mmul17_mixer_controls, ARRAY_SIZE(mmul17_mixer_controls)),
 	SND_SOC_DAPM_MIXER("MultiMedia18 Mixer", SND_SOC_NOPM, 0, 0,
@@ -9170,6 +11311,10 @@
 				SND_SOC_NOPM, 0, 0,
 				quin_mi2s_rx_voice_mixer_controls,
 				ARRAY_SIZE(quin_mi2s_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("QUAT_TDM_RX_2_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				quat_tdm_rx_2_voice_mixer_controls,
+				ARRAY_SIZE(quat_tdm_rx_2_voice_mixer_controls)),
 	SND_SOC_DAPM_MIXER("Voice_Tx Mixer",
 				SND_SOC_NOPM, 0, 0, tx_voice_mixer_controls,
 				ARRAY_SIZE(tx_voice_mixer_controls)),
@@ -9277,6 +11422,30 @@
 	SND_SOC_DAPM_MIXER("QUAT_MI2S_RX Port Mixer", SND_SOC_NOPM, 0, 0,
 	quat_mi2s_rx_port_mixer_controls,
 	ARRAY_SIZE(quat_mi2s_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("PRI_TDM_RX_0 Port Mixer", SND_SOC_NOPM, 0, 0,
+	pri_tdm_rx_0_port_mixer_controls,
+	ARRAY_SIZE(pri_tdm_rx_0_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("PRI_TDM_RX_1 Port Mixer", SND_SOC_NOPM, 0, 0,
+	pri_tdm_rx_1_port_mixer_controls,
+	ARRAY_SIZE(pri_tdm_rx_1_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("PRI_TDM_RX_2 Port Mixer", SND_SOC_NOPM, 0, 0,
+	pri_tdm_rx_2_port_mixer_controls,
+	ARRAY_SIZE(pri_tdm_rx_2_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("PRI_TDM_RX_3 Port Mixer", SND_SOC_NOPM, 0, 0,
+	pri_tdm_rx_3_port_mixer_controls,
+	ARRAY_SIZE(pri_tdm_rx_3_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_TDM_RX_0 Port Mixer", SND_SOC_NOPM, 0, 0,
+	sec_tdm_rx_0_port_mixer_controls,
+	ARRAY_SIZE(sec_tdm_rx_0_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_TDM_RX_1 Port Mixer", SND_SOC_NOPM, 0, 0,
+	sec_tdm_rx_1_port_mixer_controls,
+	ARRAY_SIZE(sec_tdm_rx_1_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_TDM_RX_2 Port Mixer", SND_SOC_NOPM, 0, 0,
+	sec_tdm_rx_2_port_mixer_controls,
+	ARRAY_SIZE(sec_tdm_rx_2_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_TDM_RX_3 Port Mixer", SND_SOC_NOPM, 0, 0,
+	sec_tdm_rx_3_port_mixer_controls,
+	ARRAY_SIZE(sec_tdm_rx_3_port_mixer_controls)),
 	SND_SOC_DAPM_MIXER("TERT_TDM_RX_0 Port Mixer", SND_SOC_NOPM, 0, 0,
 	tert_tdm_rx_0_port_mixer_controls,
 	ARRAY_SIZE(tert_tdm_rx_0_port_mixer_controls)),
@@ -9301,12 +11470,38 @@
 	SND_SOC_DAPM_MIXER("QUAT_TDM_RX_3 Port Mixer", SND_SOC_NOPM, 0, 0,
 	quat_tdm_rx_3_port_mixer_controls,
 	ARRAY_SIZE(quat_tdm_rx_3_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("INT0_MI2S_RX Port Mixer", SND_SOC_NOPM, 0, 0,
+	int0_mi2s_rx_port_mixer_controls,
+	ARRAY_SIZE(int0_mi2s_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("INT4_MI2S_RX Port Mixer", SND_SOC_NOPM, 0, 0,
+	int4_mi2s_rx_port_mixer_controls,
+	ARRAY_SIZE(int4_mi2s_rx_port_mixer_controls)),
 	SND_SOC_DAPM_MIXER("QCHAT_Tx Mixer",
 	SND_SOC_NOPM, 0, 0, tx_qchat_mixer_controls,
 	ARRAY_SIZE(tx_qchat_mixer_controls)),
 	SND_SOC_DAPM_MIXER("USB_AUDIO_RX_Voice Mixer",
 	SND_SOC_NOPM, 0, 0, usb_audio_rx_voice_mixer_controls,
 	ARRAY_SIZE(usb_audio_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("USB_AUDIO_RX Port Mixer",
+	SND_SOC_NOPM, 0, 0, usb_rx_port_mixer_controls,
+	ARRAY_SIZE(usb_rx_port_mixer_controls)),
+	/* lsm mixer definitions */
+	SND_SOC_DAPM_MIXER("LSM1 Mixer", SND_SOC_NOPM, 0, 0,
+	lsm1_mixer_controls, ARRAY_SIZE(lsm1_mixer_controls)),
+	SND_SOC_DAPM_MIXER("LSM2 Mixer", SND_SOC_NOPM, 0, 0,
+	lsm2_mixer_controls, ARRAY_SIZE(lsm2_mixer_controls)),
+	SND_SOC_DAPM_MIXER("LSM3 Mixer", SND_SOC_NOPM, 0, 0,
+	lsm3_mixer_controls, ARRAY_SIZE(lsm3_mixer_controls)),
+	SND_SOC_DAPM_MIXER("LSM4 Mixer", SND_SOC_NOPM, 0, 0,
+	lsm4_mixer_controls, ARRAY_SIZE(lsm4_mixer_controls)),
+	SND_SOC_DAPM_MIXER("LSM5 Mixer", SND_SOC_NOPM, 0, 0,
+	lsm5_mixer_controls, ARRAY_SIZE(lsm5_mixer_controls)),
+	SND_SOC_DAPM_MIXER("LSM6 Mixer", SND_SOC_NOPM, 0, 0,
+	lsm6_mixer_controls, ARRAY_SIZE(lsm6_mixer_controls)),
+	SND_SOC_DAPM_MIXER("LSM7 Mixer", SND_SOC_NOPM, 0, 0,
+	lsm7_mixer_controls, ARRAY_SIZE(lsm7_mixer_controls)),
+	SND_SOC_DAPM_MIXER("LSM8 Mixer", SND_SOC_NOPM, 0, 0,
+	lsm8_mixer_controls, ARRAY_SIZE(lsm8_mixer_controls)),
 	/* Virtual Pins to force backends ON atm */
 	SND_SOC_DAPM_OUTPUT("BE_OUT"),
 	SND_SOC_DAPM_INPUT("BE_IN"),
@@ -9317,6 +11512,10 @@
 				&slim0_rx_vi_fb_rch_mux),
 	SND_SOC_DAPM_MUX("PRI_MI2S_RX_VI_FB_MUX", SND_SOC_NOPM, 0, 0,
 				&mi2s_rx_vi_fb_mux),
+	SND_SOC_DAPM_MUX("INT4_MI2S_RX_VI_FB_MONO_CH_MUX", SND_SOC_NOPM, 0, 0,
+				&int4_mi2s_rx_vi_fb_mono_ch_mux),
+	SND_SOC_DAPM_MUX("INT4_MI2S_RX_VI_FB_STEREO_CH_MUX", SND_SOC_NOPM, 0, 0,
+				&int4_mi2s_rx_vi_fb_stereo_ch_mux),
 
 	SND_SOC_DAPM_MUX("VOC_EXT_EC MUX", SND_SOC_NOPM, 0, 0,
 			 &voc_ext_ec_mux),
@@ -9525,10 +11724,14 @@
 	{"SLIMBUS_6_RX", NULL, "SLIMBUS_6_RX Audio Mixer"},
 
 	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
 	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
 	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
 	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
 	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia9", "MM_DL9"},
 	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
 	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
 	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
@@ -9579,6 +11782,7 @@
 	{"MultiMedia18 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
 	{"MultiMedia19 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
 	{"MultiMedia8 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"MultiMedia8 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
 	{"MultiMedia3 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
 	{"MultiMedia5 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
 	{"MultiMedia5 Mixer", "SLIM_7_TX", "SLIMBUS_7_TX"},
@@ -9744,6 +11948,60 @@
 	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
 	{"PRI_TDM_RX_0", NULL, "PRI_TDM_RX_0 Audio Mixer"},
 
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"PRI_TDM_RX_1", NULL, "PRI_TDM_RX_1 Audio Mixer"},
+
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"PRI_TDM_RX_2", NULL, "PRI_TDM_RX_2 Audio Mixer"},
+
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"PRI_TDM_RX_3", NULL, "PRI_TDM_RX_3 Audio Mixer"},
+
 	{"PRI_TDM_TX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
 	{"PRI_TDM_TX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
 	{"PRI_TDM_TX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
@@ -9780,6 +12038,60 @@
 	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
 	{"SEC_TDM_RX_0", NULL, "SEC_TDM_RX_0 Audio Mixer"},
 
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"SEC_TDM_RX_1", NULL, "SEC_TDM_RX_1 Audio Mixer"},
+
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"SEC_TDM_RX_2", NULL, "SEC_TDM_RX_2 Audio Mixer"},
+
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"SEC_TDM_RX_3", NULL, "SEC_TDM_RX_3 Audio Mixer"},
+
 	{"SEC_TDM_TX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
 	{"SEC_TDM_TX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
 	{"SEC_TDM_TX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
@@ -9906,6 +12218,42 @@
 	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
 	{"QUAT_TDM_RX_0", NULL, "QUAT_TDM_RX_0 Audio Mixer"},
 
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"PRI_TDM_RX_0", NULL, "PRI_TDM_RX_0 Audio Mixer"},
+
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"SEC_TDM_RX_0", NULL, "SEC_TDM_RX_0 Audio Mixer"},
+
 	{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
 	{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
 	{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
@@ -10008,6 +12356,8 @@
 	{"MultiMedia3 Mixer", "QUAT_AUX_PCM_TX", "QUAT_AUX_PCM_TX"},
 	{"MultiMedia5 Mixer", "QUAT_AUX_PCM_TX", "QUAT_AUX_PCM_TX"},
 	{"MultiMedia2 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"MultiMedia2 Mixer", "SLIM_6_TX", "SLIMBUS_6_TX"},
+	{"MultiMedia2 Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
 	{"MultiMedia2 Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
 	{"MultiMedia1 Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
 	{"MultiMedia1 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
@@ -10025,6 +12375,14 @@
 	{"MultiMedia6 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
 	{"MultiMedia6 Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
 
+	{"MultiMedia1 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"MultiMedia1 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"MultiMedia1 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"MultiMedia1 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"MultiMedia1 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"MultiMedia1 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"MultiMedia1 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"MultiMedia1 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
 	{"MultiMedia1 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
 	{"MultiMedia1 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
 	{"MultiMedia1 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
@@ -10034,6 +12392,14 @@
 	{"MultiMedia1 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
 	{"MultiMedia1 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
 
+	{"MultiMedia2 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"MultiMedia2 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"MultiMedia2 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"MultiMedia2 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"MultiMedia2 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"MultiMedia2 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"MultiMedia2 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"MultiMedia2 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
 	{"MultiMedia2 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
 	{"MultiMedia2 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
 	{"MultiMedia2 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
@@ -10043,6 +12409,14 @@
 	{"MultiMedia2 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
 	{"MultiMedia2 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
 
+	{"MultiMedia3 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"MultiMedia3 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"MultiMedia3 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"MultiMedia3 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"MultiMedia3 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"MultiMedia3 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"MultiMedia3 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"MultiMedia3 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
 	{"MultiMedia3 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
 	{"MultiMedia3 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
 	{"MultiMedia3 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
@@ -10052,6 +12426,14 @@
 	{"MultiMedia3 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
 	{"MultiMedia3 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
 
+	{"MultiMedia4 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"MultiMedia4 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"MultiMedia4 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"MultiMedia4 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"MultiMedia4 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"MultiMedia4 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"MultiMedia4 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"MultiMedia4 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
 	{"MultiMedia4 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
 	{"MultiMedia4 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
 	{"MultiMedia4 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
@@ -10061,6 +12443,14 @@
 	{"MultiMedia4 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
 	{"MultiMedia4 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
 
+	{"MultiMedia5 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"MultiMedia5 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"MultiMedia5 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"MultiMedia5 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"MultiMedia5 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"MultiMedia5 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"MultiMedia5 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"MultiMedia5 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
 	{"MultiMedia5 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
 	{"MultiMedia5 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
 	{"MultiMedia5 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
@@ -10070,6 +12460,14 @@
 	{"MultiMedia5 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
 	{"MultiMedia5 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
 
+	{"MultiMedia6 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"MultiMedia6 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"MultiMedia6 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"MultiMedia6 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"MultiMedia6 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"MultiMedia6 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"MultiMedia6 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"MultiMedia6 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
 	{"MultiMedia6 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
 	{"MultiMedia6 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
 	{"MultiMedia6 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
@@ -10079,6 +12477,14 @@
 	{"MultiMedia6 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
 	{"MultiMedia6 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
 
+	{"MultiMedia8 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"MultiMedia8 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"MultiMedia8 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"MultiMedia8 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"MultiMedia8 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"MultiMedia8 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"MultiMedia8 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"MultiMedia8 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
 	{"MultiMedia8 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
 	{"MultiMedia8 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
 	{"MultiMedia8 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
@@ -10088,6 +12494,15 @@
 	{"MultiMedia8 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
 	{"MultiMedia8 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
 
+	{"MultiMedia9 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+	{"MultiMedia9 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+	{"MultiMedia9 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+	{"MultiMedia9 Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+	{"MultiMedia9 Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"MultiMedia9 Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"MultiMedia9 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"MultiMedia9 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+
 	{"MultiMedia1 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
 	{"MultiMedia2 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
 	{"MultiMedia4 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
@@ -10202,6 +12617,7 @@
 	{"MM_UL5", NULL, "MultiMedia5 Mixer"},
 	{"MM_UL6", NULL, "MultiMedia6 Mixer"},
 	{"MM_UL8", NULL, "MultiMedia8 Mixer"},
+	{"MM_UL9", NULL, "MultiMedia9 Mixer"},
 	{"MM_UL17", NULL, "MultiMedia17 Mixer"},
 	{"MM_UL18", NULL, "MultiMedia18 Mixer"},
 	{"MM_UL19", NULL, "MultiMedia19 Mixer"},
@@ -10526,6 +12942,9 @@
 	{"QUIN_MI2S_RX_Voice Mixer", "VoiceMMode2", "VOICEMMODE2_DL"},
 	{"QUIN_MI2S_RX", NULL, "QUIN_MI2S_RX_Voice Mixer"},
 
+	{"QUAT_TDM_RX_2_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+	{"QUAT_TDM_RX_2", NULL, "QUAT_TDM_RX_2_Voice Mixer"},
+
 	{"VOC_EXT_EC MUX", "PRI_MI2S_TX", "PRI_MI2S_TX"},
 	{"VOC_EXT_EC MUX", "SEC_MI2S_TX", "SEC_MI2S_TX"},
 	{"VOC_EXT_EC MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
@@ -10691,6 +13110,7 @@
 	{"VoiceMMode1_Tx Mixer", "SEC_AUX_PCM_TX_MMode1", "SEC_AUX_PCM_TX"},
 	{"VoiceMMode1_Tx Mixer", "TERT_AUX_PCM_TX_MMode1", "TERT_AUX_PCM_TX"},
 	{"VoiceMMode1_Tx Mixer", "QUAT_AUX_PCM_TX_MMode1", "QUAT_AUX_PCM_TX"},
+	{"VoiceMMode1_Tx Mixer", "QUAT_TDM_TX_0_MMode1", "QUAT_TDM_TX_0"},
 	{"VOICEMMODE1_UL", NULL, "VoiceMMode1_Tx Mixer"},
 
 	{"VoiceMMode2_Tx Mixer", "PRI_TX_MMode2", "PRI_I2S_TX"},
@@ -10743,70 +13163,81 @@
 	{"SLIM4_UL_HL", NULL, "SLIMBUS_4_TX"},
 	{"SLIM8_UL_HL", NULL, "SLIMBUS_8_TX"},
 
-	{"LSM1 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
-	{"LSM1 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
-	{"LSM1 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
-	{"LSM1 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
-	{"LSM1 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
-	{"LSM1 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
-	{"LSM1_UL_HL", NULL, "LSM1 MUX"},
+	{"LSM1 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+	{"LSM1 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+	{"LSM1 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+	{"LSM1 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+	{"LSM1 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+	{"LSM1 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"LSM1 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"LSM1 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+	{"LSM1_UL_HL", NULL, "LSM1 Mixer"},
 
-	{"LSM2 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
-	{"LSM2 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
-	{"LSM2 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
-	{"LSM2 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
-	{"LSM2 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
-	{"LSM2 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
-	{"LSM2_UL_HL", NULL, "LSM2 MUX"},
+	{"LSM2 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+	{"LSM2 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+	{"LSM2 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+	{"LSM2 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+	{"LSM2 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+	{"LSM2 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"LSM2 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"LSM2 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+	{"LSM2_UL_HL", NULL, "LSM2 Mixer"},
 
 
-	{"LSM3 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
-	{"LSM3 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
-	{"LSM3 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
-	{"LSM3 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
-	{"LSM3 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
-	{"LSM3 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
-	{"LSM3_UL_HL", NULL, "LSM3 MUX"},
+	{"LSM3 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+	{"LSM3 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+	{"LSM3 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+	{"LSM3 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+	{"LSM3 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+	{"LSM3 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"LSM3 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"LSM3 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+	{"LSM3_UL_HL", NULL, "LSM3 Mixer"},
 
 
-	{"LSM4 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
-	{"LSM4 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
-	{"LSM4 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
-	{"LSM4 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
-	{"LSM4 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
-	{"LSM4 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
-	{"LSM4_UL_HL", NULL, "LSM4 MUX"},
+	{"LSM4 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+	{"LSM4 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+	{"LSM4 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+	{"LSM4 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+	{"LSM4 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+	{"LSM4 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"LSM4 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"LSM4 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+	{"LSM4_UL_HL", NULL, "LSM4 Mixer"},
 
-	{"LSM5 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
-	{"LSM5 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
-	{"LSM5 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
-	{"LSM5 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
-	{"LSM5 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
-	{"LSM5 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
-	{"LSM5_UL_HL", NULL, "LSM5 MUX"},
+	{"LSM5 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+	{"LSM5 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+	{"LSM5 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+	{"LSM5 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+	{"LSM5 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+	{"LSM5 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"LSM5 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"LSM5 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+	{"LSM5_UL_HL", NULL, "LSM5 Mixer"},
 
-	{"LSM6 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
-	{"LSM6 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
-	{"LSM6 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
-	{"LSM6 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
-	{"LSM6 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
-	{"LSM6_UL_HL", NULL, "LSM6 MUX"},
+	{"LSM6 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+	{"LSM6 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+	{"LSM6 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+	{"LSM6 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+	{"LSM6 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+	{"LSM6 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"LSM6_UL_HL", NULL, "LSM6 Mixer"},
 
+	{"LSM7 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+	{"LSM7 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+	{"LSM7 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+	{"LSM7 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+	{"LSM7 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+	{"LSM7 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"LSM7_UL_HL", NULL, "LSM7 Mixer"},
 
-	{"LSM7 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
-	{"LSM7 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
-	{"LSM7 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
-	{"LSM7 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
-	{"LSM7 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
-	{"LSM7_UL_HL", NULL, "LSM7 MUX"},
-
-
-	{"LSM8 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
-	{"LSM8 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
-	{"LSM8 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
-	{"LSM8 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
-	{"LSM8 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
-	{"LSM8_UL_HL", NULL, "LSM8 MUX"},
+	{"LSM8 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+	{"LSM8 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+	{"LSM8 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+	{"LSM8 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+	{"LSM8 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+	{"LSM8 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"LSM8_UL_HL", NULL, "LSM8 Mixer"},
 
 
 	{"CPE_LSM_UL_HL", NULL, "BE_IN"},
@@ -10835,13 +13266,17 @@
 	{"HFP_AUX_UL_HL", "Switch", "SEC_AUX_PCM_TX"},
 	{"INTHFP_UL_HL", NULL, "HFP_INT_UL_HL"},
 	{"HFP_INT_UL_HL", "Switch", "INT_BT_SCO_TX"},
+	{"SLIM7_UL_HL", NULL, "HFP_SLIM7_UL_HL"},
+	{"HFP_SLIM7_UL_HL", "Switch", "SLIMBUS_7_TX"},
 	{"AUX_PCM_RX", NULL, "AUXPCM_DL_HL"},
 	{"AUXPCM_UL_HL", NULL, "AUX_PCM_TX"},
 	{"MI2S_RX", NULL, "MI2S_DL_HL"},
 	{"MI2S_UL_HL", NULL, "MI2S_TX"},
 	{"PCM_RX_DL_HL", "Switch", "SLIM0_DL_HL"},
 	{"PCM_RX", NULL, "PCM_RX_DL_HL"},
-	{"INT0_MI2S_RX_DL_HL", "Switch", "INT0_MI2S_DL_HL"},
+
+	/* connect to INT4_MI2S_DL_HL since same pcm_id */
+	{"INT0_MI2S_RX_DL_HL", "Switch", "INT4_MI2S_DL_HL"},
 	{"INT0_MI2S_RX", NULL, "INT0_MI2S_RX_DL_HL"},
 	{"INT4_MI2S_RX_DL_HL", "Switch", "INT4_MI2S_DL_HL"},
 	{"INT4_MI2S_RX", NULL, "INT4_MI2S_RX_DL_HL"},
@@ -10855,26 +13290,40 @@
 	{"QUAT_MI2S_RX_DL_HL", "Switch", "QUAT_MI2S_DL_HL"},
 	{"QUAT_MI2S_RX", NULL, "QUAT_MI2S_RX_DL_HL"},
 	{"MI2S_UL_HL", NULL, "TERT_MI2S_TX"},
+	{"INT3_MI2S_UL_HL", NULL, "INT3_MI2S_TX"},
 	{"TERT_MI2S_UL_HL", NULL, "TERT_MI2S_TX"},
 	{"SEC_I2S_RX", NULL, "SEC_I2S_DL_HL"},
 	{"PRI_MI2S_UL_HL", NULL, "PRI_MI2S_TX"},
+	{"SEC_MI2S_UL_HL", NULL, "SEC_MI2S_TX"},
 	{"SEC_MI2S_RX", NULL, "SEC_MI2S_DL_HL"},
 	{"PRI_MI2S_RX", NULL, "PRI_MI2S_DL_HL"},
 	{"TERT_MI2S_RX", NULL, "TERT_MI2S_DL_HL"},
 	{"QUAT_MI2S_UL_HL", NULL, "QUAT_MI2S_TX"},
 
 	{"PRI_TDM_TX_0_UL_HL", NULL, "PRI_TDM_TX_0"},
+	{"PRI_TDM_TX_1_UL_HL", NULL, "PRI_TDM_TX_1"},
+	{"PRI_TDM_TX_2_UL_HL", NULL, "PRI_TDM_TX_2"},
+	{"PRI_TDM_TX_3_UL_HL", NULL, "PRI_TDM_TX_3"},
 	{"PRI_TDM_RX_0", NULL, "PRI_TDM_RX_0_DL_HL"},
+	{"PRI_TDM_RX_1", NULL, "PRI_TDM_RX_1_DL_HL"},
+	{"PRI_TDM_RX_2", NULL, "PRI_TDM_RX_2_DL_HL"},
+	{"PRI_TDM_RX_3", NULL, "PRI_TDM_RX_3_DL_HL"},
 	{"SEC_TDM_TX_0_UL_HL", NULL, "SEC_TDM_TX_0"},
+	{"SEC_TDM_TX_1_UL_HL", NULL, "SEC_TDM_TX_1"},
+	{"SEC_TDM_TX_2_UL_HL", NULL, "SEC_TDM_TX_2"},
+	{"SEC_TDM_TX_3_UL_HL", NULL, "SEC_TDM_TX_3"},
 	{"SEC_TDM_RX_0", NULL, "SEC_TDM_RX_0_DL_HL"},
+	{"SEC_TDM_RX_1", NULL, "SEC_TDM_RX_1_DL_HL"},
+	{"SEC_TDM_RX_2", NULL, "SEC_TDM_RX_2_DL_HL"},
+	{"SEC_TDM_RX_3", NULL, "SEC_TDM_RX_3_DL_HL"},
 	{"TERT_TDM_TX_0_UL_HL", NULL, "TERT_TDM_TX_0"},
 	{"TERT_TDM_TX_1_UL_HL", NULL, "TERT_TDM_TX_1"},
 	{"TERT_TDM_TX_2_UL_HL", NULL, "TERT_TDM_TX_2"},
 	{"TERT_TDM_TX_3_UL_HL", NULL, "TERT_TDM_TX_3"},
 	{"TERT_TDM_RX_0", NULL, "TERT_TDM_RX_0_DL_HL"},
-	{"TERT_TDM_RX_1", NULL, "TERT_TDM_RX_0_DL_HL"},
-	{"TERT_TDM_RX_2", NULL, "TERT_TDM_RX_0_DL_HL"},
-	{"TERT_TDM_RX_3", NULL, "TERT_TDM_RX_0_DL_HL"},
+	{"TERT_TDM_RX_1", NULL, "TERT_TDM_RX_1_DL_HL"},
+	{"TERT_TDM_RX_2", NULL, "TERT_TDM_RX_2_DL_HL"},
+	{"TERT_TDM_RX_3", NULL, "TERT_TDM_RX_3_DL_HL"},
 	{"QUAT_TDM_TX_0_UL_HL", NULL, "QUAT_TDM_TX_0"},
 	{"QUAT_TDM_TX_1_UL_HL", NULL, "QUAT_TDM_TX_1"},
 	{"QUAT_TDM_TX_2_UL_HL", NULL, "QUAT_TDM_TX_2"},
@@ -10884,6 +13333,150 @@
 	{"QUAT_TDM_RX_2", NULL, "QUAT_TDM_RX_2_DL_HL"},
 	{"QUAT_TDM_RX_3", NULL, "QUAT_TDM_RX_3_DL_HL"},
 
+	{"PRI_TDM_RX_0 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"PRI_TDM_RX_0 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"PRI_TDM_RX_0 Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"PRI_TDM_RX_0 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"PRI_TDM_RX_0 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"PRI_TDM_RX_0 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"PRI_TDM_RX_0 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"PRI_TDM_RX_0 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"PRI_TDM_RX_0 Port Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"PRI_TDM_RX_0 Port Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"PRI_TDM_RX_0 Port Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"PRI_TDM_RX_0 Port Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"PRI_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"PRI_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"PRI_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"PRI_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"PRI_TDM_RX_0", NULL, "PRI_TDM_RX_0 Port Mixer"},
+
+	{"PRI_TDM_RX_1 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"PRI_TDM_RX_1 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"PRI_TDM_RX_1 Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"PRI_TDM_RX_1 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"PRI_TDM_RX_1 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"PRI_TDM_RX_1 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"PRI_TDM_RX_1 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"PRI_TDM_RX_1 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"PRI_TDM_RX_1 Port Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"PRI_TDM_RX_1 Port Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"PRI_TDM_RX_1 Port Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"PRI_TDM_RX_1 Port Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"PRI_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"PRI_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"PRI_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"PRI_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"PRI_TDM_RX_1", NULL, "PRI_TDM_RX_1 Port Mixer"},
+
+	{"PRI_TDM_RX_2 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"PRI_TDM_RX_2 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"PRI_TDM_RX_2 Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"PRI_TDM_RX_2 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"PRI_TDM_RX_2 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"PRI_TDM_RX_2 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"PRI_TDM_RX_2 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"PRI_TDM_RX_2 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"PRI_TDM_RX_2 Port Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"PRI_TDM_RX_2 Port Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"PRI_TDM_RX_2 Port Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"PRI_TDM_RX_2 Port Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"PRI_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"PRI_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"PRI_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"PRI_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"PRI_TDM_RX_2", NULL, "PRI_TDM_RX_2 Port Mixer"},
+
+	{"PRI_TDM_RX_3 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"PRI_TDM_RX_3 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"PRI_TDM_RX_3 Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"PRI_TDM_RX_3 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"PRI_TDM_RX_3 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"PRI_TDM_RX_3 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"PRI_TDM_RX_3 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"PRI_TDM_RX_3 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"PRI_TDM_RX_3 Port Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"PRI_TDM_RX_3 Port Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"PRI_TDM_RX_3 Port Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"PRI_TDM_RX_3 Port Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"PRI_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"PRI_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"PRI_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"PRI_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"PRI_TDM_RX_3", NULL, "PRI_TDM_RX_3 Port Mixer"},
+
+	{"SEC_TDM_RX_0 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"SEC_TDM_RX_0 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"SEC_TDM_RX_0 Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"SEC_TDM_RX_0 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"SEC_TDM_RX_0 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"SEC_TDM_RX_0 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"SEC_TDM_RX_0 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"SEC_TDM_RX_0 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"SEC_TDM_RX_0 Port Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"SEC_TDM_RX_0 Port Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"SEC_TDM_RX_0 Port Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"SEC_TDM_RX_0 Port Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+	{"SEC_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"SEC_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"SEC_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"SEC_TDM_RX_0 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"SEC_TDM_RX_0", NULL, "SEC_TDM_RX_0 Port Mixer"},
+
+	{"SEC_TDM_RX_1 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"SEC_TDM_RX_1 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"SEC_TDM_RX_1 Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"SEC_TDM_RX_1 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"SEC_TDM_RX_1 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"SEC_TDM_RX_1 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"SEC_TDM_RX_1 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"SEC_TDM_RX_1 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"SEC_TDM_RX_1 Port Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"SEC_TDM_RX_1 Port Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"SEC_TDM_RX_1 Port Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"SEC_TDM_RX_1 Port Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+	{"SEC_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"SEC_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"SEC_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"SEC_TDM_RX_1 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"SEC_TDM_RX_1", NULL, "SEC_TDM_RX_1 Port Mixer"},
+
+	{"SEC_TDM_RX_2 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"SEC_TDM_RX_2 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"SEC_TDM_RX_2 Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"SEC_TDM_RX_2 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"SEC_TDM_RX_2 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"SEC_TDM_RX_2 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"SEC_TDM_RX_2 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"SEC_TDM_RX_2 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"SEC_TDM_RX_2 Port Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"SEC_TDM_RX_2 Port Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"SEC_TDM_RX_2 Port Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"SEC_TDM_RX_2 Port Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+	{"SEC_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"SEC_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"SEC_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"SEC_TDM_RX_2 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"SEC_TDM_RX_2", NULL, "SEC_TDM_RX_2 Port Mixer"},
+
+	{"SEC_TDM_RX_3 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"SEC_TDM_RX_3 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"SEC_TDM_RX_3 Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"SEC_TDM_RX_3 Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"SEC_TDM_RX_3 Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"SEC_TDM_RX_3 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"SEC_TDM_RX_3 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"SEC_TDM_RX_3 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"SEC_TDM_RX_3 Port Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"SEC_TDM_RX_3 Port Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"SEC_TDM_RX_3 Port Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"SEC_TDM_RX_3 Port Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+	{"SEC_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"SEC_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"SEC_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"SEC_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+	{"SEC_TDM_RX_3", NULL, "SEC_TDM_RX_3 Port Mixer"},
+
 	{"TERT_TDM_RX_0 Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
 	{"TERT_TDM_RX_0 Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
 	{"TERT_TDM_RX_0 Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
@@ -11028,6 +13621,28 @@
 	{"QUAT_TDM_RX_3 Port Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
 	{"QUAT_TDM_RX_3", NULL, "QUAT_TDM_RX_3 Port Mixer"},
 
+	{"INT0_MI2S_RX Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"INT0_MI2S_RX Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"INT0_MI2S_RX Port Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"INT0_MI2S_RX Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"INT0_MI2S_RX Port Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+	{"INT0_MI2S_RX Port Mixer", "SLIM_7_TX", "SLIMBUS_7_TX"},
+	{"INT0_MI2S_RX Port Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
+	{"INT0_MI2S_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"INT0_MI2S_RX Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"INT0_MI2S_RX", NULL, "INT0_MI2S_RX Port Mixer"},
+
+	{"INT4_MI2S_RX Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"INT4_MI2S_RX Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"INT4_MI2S_RX Port Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"INT4_MI2S_RX Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"INT4_MI2S_RX Port Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+	{"INT4_MI2S_RX Port Mixer", "SLIM_7_TX", "SLIMBUS_7_TX"},
+	{"INT4_MI2S_RX Port Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
+	{"INT4_MI2S_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"INT4_MI2S_RX Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"INT4_MI2S_RX", NULL, "INT4_MI2S_RX Port Mixer"},
+
 	{"SLIMBUS_0_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
 	{"SLIMBUS_0_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
 	{"SLIMBUS_0_RX Port Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
@@ -11047,6 +13662,12 @@
 	{"AFE_PCM_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
 	{"AFE_PCM_RX Port Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
 	{"PCM_RX", NULL, "AFE_PCM_RX Port Mixer"},
+	{"USB_AUDIO_RX Port Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
+	{"USB_AUDIO_RX", NULL, "USB_AUDIO_RX Port Mixer"},
+	{"USB_DL_HL", "Switch", "USBAUDIO_DL_HL"},
+	{"USB_AUDIO_RX", NULL, "USB_DL_HL"},
+	{"USBAUDIO_UL_HL", NULL, "USB_AUDIO_TX"},
+
 
 	{"AUX_PCM_RX Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
 	{"AUX_PCM_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
@@ -11275,7 +13896,13 @@
 	{"BE_OUT", NULL, "VOICE_PLAYBACK_TX"},
 	{"BE_OUT", NULL, "VOICE2_PLAYBACK_TX"},
 	{"BE_OUT", NULL, "PRI_TDM_RX_0"},
+	{"BE_OUT", NULL, "PRI_TDM_RX_1"},
+	{"BE_OUT", NULL, "PRI_TDM_RX_2"},
+	{"BE_OUT", NULL, "PRI_TDM_RX_3"},
 	{"BE_OUT", NULL, "SEC_TDM_RX_0"},
+	{"BE_OUT", NULL, "SEC_TDM_RX_1"},
+	{"BE_OUT", NULL, "SEC_TDM_RX_2"},
+	{"BE_OUT", NULL, "SEC_TDM_RX_3"},
 	{"BE_OUT", NULL, "TERT_TDM_RX_0"},
 	{"BE_OUT", NULL, "TERT_TDM_RX_1"},
 	{"BE_OUT", NULL, "TERT_TDM_RX_2"},
@@ -11293,6 +13920,7 @@
 	{"TERT_MI2S_TX", NULL, "BE_IN"},
 	{"INT2_MI2S_TX", NULL, "BE_IN"},
 	{"INT3_MI2S_TX", NULL, "BE_IN"},
+	{"INT5_MI2S_TX", NULL, "BE_IN"},
 	{"SEC_MI2S_TX", NULL, "BE_IN"},
 	{"SENARY_MI2S_TX", NULL, "BE_IN" },
 	{"SLIMBUS_0_TX", NULL, "BE_IN" },
@@ -11321,11 +13949,21 @@
 	{"SLIM0_RX_VI_FB_LCH_MUX", "SLIM4_TX", "SLIMBUS_4_TX"},
 	{"SLIM0_RX_VI_FB_RCH_MUX", "SLIM4_TX", "SLIMBUS_4_TX"},
 	{"PRI_MI2S_RX_VI_FB_MUX", "SENARY_TX", "SENARY_TX"},
+	{"INT4_MI2S_RX_VI_FB_MONO_CH_MUX", "INT5_MI2S_TX", "INT5_MI2S_TX"},
+	{"INT4_MI2S_RX_VI_FB_STEREO_CH_MUX", "INT5_MI2S_TX", "INT5_MI2S_TX"},
 	{"SLIMBUS_0_RX", NULL, "SLIM0_RX_VI_FB_LCH_MUX"},
 	{"SLIMBUS_0_RX", NULL, "SLIM0_RX_VI_FB_RCH_MUX"},
 	{"PRI_MI2S_RX", NULL, "PRI_MI2S_RX_VI_FB_MUX"},
+	{"INT4_MI2S_RX", NULL, "INT4_MI2S_RX_VI_FB_MONO_CH_MUX"},
+	{"INT4_MI2S_RX", NULL, "INT4_MI2S_RX_VI_FB_STEREO_CH_MUX"},
 	{"PRI_TDM_TX_0", NULL, "BE_IN"},
+	{"PRI_TDM_TX_1", NULL, "BE_IN"},
+	{"PRI_TDM_TX_2", NULL, "BE_IN"},
+	{"PRI_TDM_TX_3", NULL, "BE_IN"},
 	{"SEC_TDM_TX_0", NULL, "BE_IN"},
+	{"SEC_TDM_TX_1", NULL, "BE_IN"},
+	{"SEC_TDM_TX_2", NULL, "BE_IN"},
+	{"SEC_TDM_TX_3", NULL, "BE_IN"},
 	{"TERT_TDM_TX_0", NULL, "BE_IN"},
 	{"TERT_TDM_TX_1", NULL, "BE_IN"},
 	{"TERT_TDM_TX_2", NULL, "BE_IN"},
@@ -11383,7 +14021,9 @@
 		path_type = ADM_PATH_LIVE_REC;
 
 	mutex_lock(&routing_lock);
-	for_each_set_bit(i, &bedai->fe_sessions, MSM_FRONTEND_DAI_MM_SIZE) {
+	for_each_set_bit(i, &bedai->fe_sessions[0], MSM_FRONTEND_DAI_MAX) {
+		if (!is_mm_lsm_fe_id(i))
+			continue;
 		fdai = &fe_dai_map[i][session_type];
 		if (fdai->strm_id != INVALID_SESSION) {
 			int idx;
@@ -11404,13 +14044,12 @@
 			clear_bit(idx,
 				  &session_copp_map[i][session_type][be_id]);
 			if ((fdai->perf_mode == LEGACY_PCM_MODE) &&
-				(bedai->compr_passthr_mode == LEGACY_PCM))
+				(bedai->passthr_mode == LEGACY_PCM))
 				msm_pcm_routing_deinit_pp(bedai->port_id,
 							  topology);
 		}
 	}
 
-	bedai->compr_passthr_mode = LEGACY_PCM;
 	bedai->active = 0;
 	bedai->sample_rate = 0;
 	bedai->channel = 0;
@@ -11429,6 +14068,8 @@
 	uint16_t bits_per_sample = 16, voc_path_type;
 	struct msm_pcm_routing_fdai_data *fdai;
 	u32 session_id;
+	struct media_format_info voc_be_media_format;
+	bool is_lsm;
 
 	pr_debug("%s: substream->pcm->id:%s\n",
 		 __func__, substream->pcm->id);
@@ -11441,7 +14082,7 @@
 	bedai = &msm_bedais[be_id];
 
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		if (bedai->compr_passthr_mode != LEGACY_PCM)
+		if (bedai->passthr_mode != LEGACY_PCM)
 			path_type = ADM_PATH_COMPRESSED_RX;
 		else
 			path_type = ADM_PATH_PLAYBACK;
@@ -11462,7 +14103,13 @@
 	 */
 	bedai->active = 1;
 
-	for_each_set_bit(i, &bedai->fe_sessions, MSM_FRONTEND_DAI_MM_SIZE) {
+	for_each_set_bit(i, &bedai->fe_sessions[0], MSM_FRONTEND_DAI_MAX) {
+		if (!(is_mm_lsm_fe_id(i) &&
+				route_check_fe_id_adm_support(i)))
+			continue;
+
+		is_lsm = (i >= MSM_FRONTEND_DAI_LSM1) &&
+				 (i <= MSM_FRONTEND_DAI_LSM8);
 		fdai = &fe_dai_map[i][session_type];
 		if (fdai->strm_id != INVALID_SESSION) {
 			int app_type, app_type_idx, copp_idx, acdb_dev_id;
@@ -11484,13 +14131,21 @@
 						bedai->format);
 
 			app_type =
-				fe_dai_app_type_cfg[i][session_type].app_type;
-			if (app_type) {
+			fe_dai_app_type_cfg[i][session_type][be_id].app_type;
+			if (app_type && is_lsm) {
+				app_type_idx =
+				msm_pcm_routing_get_lsm_app_type_idx(app_type);
+				sample_rate =
+				fe_dai_app_type_cfg[i][session_type][be_id]
+					.sample_rate;
+				bits_per_sample =
+				lsm_app_type_cfg[app_type_idx].bit_width;
+			} else if (app_type) {
 				app_type_idx =
 				msm_pcm_routing_get_app_type_idx(app_type);
 				sample_rate =
-					fe_dai_app_type_cfg[i][session_type].
-						sample_rate;
+					fe_dai_app_type_cfg[i][session_type]
+							   [be_id].sample_rate;
 				bits_per_sample =
 					app_type_cfg[app_type_idx].bit_width;
 			} else
@@ -11504,9 +14159,9 @@
 			else
 				channels = bedai->adm_override_ch;
 			acdb_dev_id =
-			fe_dai_app_type_cfg[i][session_type].acdb_dev_id;
-			topology = msm_routing_get_adm_topology(path_type, i,
-						session_type);
+			fe_dai_app_type_cfg[i][session_type][be_id].acdb_dev_id;
+			topology = msm_routing_get_adm_topology(i, session_type,
+								be_id);
 			copp_idx = adm_open(bedai->port_id, path_type,
 					    sample_rate, channels, topology,
 					    fdai->perf_mode, bits_per_sample,
@@ -11530,20 +14185,20 @@
 					bedai->sample_rate);
 
 			msm_pcm_routing_build_matrix(i, session_type, path_type,
-						     fdai->perf_mode);
+						     fdai->perf_mode,
+						     bedai->passthr_mode);
 			if ((fdai->perf_mode == LEGACY_PCM_MODE) &&
-				(bedai->compr_passthr_mode ==
-					LEGACY_PCM))
+				(bedai->passthr_mode == LEGACY_PCM))
 				msm_pcm_routing_cfg_pp(bedai->port_id, copp_idx,
 						       topology, channels);
 		}
 	}
 
-	for_each_set_bit(i, &bedai->fe_sessions, MSM_FRONTEND_DAI_MAX) {
+	for_each_set_bit(i, &bedai->fe_sessions[0], MSM_FRONTEND_DAI_MAX) {
 		session_id = msm_pcm_routing_get_voc_sessionid(i);
 		if (session_id) {
-			pr_debug("%s voice session_id: 0x%x",
-				 __func__, session_id);
+			pr_debug("%s voice session_id: 0x%x\n", __func__,
+				 session_id);
 
 			if (session_type == SESSION_TYPE_TX)
 				voc_path_type = TX_PATH;
@@ -11551,8 +14206,19 @@
 				voc_path_type = RX_PATH;
 
 			voc_set_route_flag(session_id, voc_path_type, 1);
-			voc_set_device_config(session_id,  voc_path_type,
-					      bedai->channel, bedai->port_id);
+
+			memset(&voc_be_media_format, 0,
+			       sizeof(struct media_format_info));
+
+			voc_be_media_format.port_id = bedai->port_id;
+			voc_be_media_format.num_channels = bedai->channel;
+			voc_be_media_format.sample_rate = bedai->sample_rate;
+			voc_be_media_format.bits_per_sample = bedai->format;
+			/* Defaulting this to 1 for voice call usecases */
+			voc_be_media_format.channel_mapping[0] = 1;
+
+			voc_set_device_config(session_id, voc_path_type,
+					      &voc_be_media_format);
 
 			if (voc_get_route_flag(session_id, RX_PATH) &&
 			    voc_get_route_flag(session_id, TX_PATH))
@@ -11560,6 +14226,27 @@
 		}
 	}
 
+	/* Check if backend is an external ec ref port and set as needed */
+	if (unlikely(bedai->port_id == voc_get_ext_ec_ref_port_id())) {
+
+		memset(&voc_be_media_format, 0,
+		       sizeof(struct media_format_info));
+
+		/* Get format info for ec ref port from msm_bedais[] */
+		voc_be_media_format.port_id = bedai->port_id;
+		voc_be_media_format.num_channels = bedai->channel;
+		voc_be_media_format.bits_per_sample = bedai->format;
+		voc_be_media_format.sample_rate = bedai->sample_rate;
+		/* Defaulting this to 1 for voice call usecases */
+		voc_be_media_format.channel_mapping[0] = 1;
+		voc_set_ext_ec_ref_media_fmt_info(&voc_be_media_format);
+		pr_debug("%s: EC Ref media format info set to port_id=%d, num_channels=%d, bits_per_sample=%d, sample_rate=%d\n",
+			 __func__, voc_be_media_format.port_id,
+			 voc_be_media_format.num_channels,
+			 voc_be_media_format.bits_per_sample,
+			 voc_be_media_format.sample_rate);
+	}
+
 done:
 	mutex_unlock(&routing_lock);
 
@@ -11572,6 +14259,7 @@
 	unsigned long pp_config = 0;
 	bool mute_on;
 	int latency;
+	bool compr_passthr_mode = true;
 
 	pr_debug("%s: port_id %d, copp_idx %d\n", __func__, port_id, copp_idx);
 
@@ -11608,14 +14296,16 @@
 		return -EINVAL;
 	}
 
+	if ((msm_bedais[be_idx].passthr_mode == LEGACY_PCM) ||
+		(msm_bedais[be_idx].passthr_mode == LISTEN))
+		compr_passthr_mode = false;
+
 	pp_config = msm_bedais_pp_params[index].pp_params_config;
 	if (test_bit(ADM_PP_PARAM_MUTE_BIT, &pp_config)) {
 		pr_debug("%s: ADM_PP_PARAM_MUTE\n", __func__);
 		clear_bit(ADM_PP_PARAM_MUTE_BIT, &pp_config);
 		mute_on = msm_bedais_pp_params[index].mute_on;
-		if ((msm_bedais[be_idx].active) &&
-			(msm_bedais[be_idx].compr_passthr_mode !=
-			 LEGACY_PCM))
+		if ((msm_bedais[be_idx].active) && compr_passthr_mode)
 			adm_send_compressed_device_mute(port_id,
 								copp_idx,
 								mute_on);
@@ -11625,9 +14315,7 @@
 		clear_bit(ADM_PP_PARAM_LATENCY_BIT,
 			  &pp_config);
 		latency = msm_bedais_pp_params[index].latency;
-		if ((msm_bedais[be_idx].active) &&
-			(msm_bedais[be_idx].compr_passthr_mode !=
-			 LEGACY_PCM))
+		if ((msm_bedais[be_idx].active) && compr_passthr_mode)
 			adm_send_compressed_device_latency(port_id,
 							   copp_idx,
 							   latency);
@@ -11643,6 +14331,7 @@
 	int index, be_idx, i, topo_id, idx;
 	bool mute;
 	int latency;
+	bool compr_passthr_mode = true;
 
 	pr_debug("%s: pp_id: 0x%x\n", __func__, pp_id);
 
@@ -11667,7 +14356,11 @@
 		return -EINVAL;
 	}
 
-	for_each_set_bit(i, &msm_bedais[be_idx].fe_sessions,
+	if ((msm_bedais[be_idx].passthr_mode == LEGACY_PCM) ||
+		(msm_bedais[be_idx].passthr_mode == LISTEN))
+		compr_passthr_mode = false;
+
+	for_each_set_bit(i, &msm_bedais[be_idx].fe_sessions[0],
 				MSM_FRONTEND_DAI_MM_SIZE) {
 		for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++) {
 			unsigned long copp =
@@ -11681,7 +14374,7 @@
 				continue;
 		pr_debug("%s: port: 0x%x, copp %ld, be active: %d, passt: %d\n",
 			 __func__, port_id, copp, msm_bedais[be_idx].active,
-			 msm_bedais[be_idx].compr_passthr_mode);
+			 msm_bedais[be_idx].passthr_mode);
 		switch (pp_id) {
 		case ADM_PP_PARAM_MUTE_ID:
 			pr_debug("%s: ADM_PP_PARAM_MUTE\n", __func__);
@@ -11689,9 +14382,7 @@
 			msm_bedais_pp_params[index].mute_on = mute;
 			set_bit(ADM_PP_PARAM_MUTE_BIT,
 				&msm_bedais_pp_params[index].pp_params_config);
-			if ((msm_bedais[be_idx].active) &&
-				(msm_bedais[be_idx].compr_passthr_mode !=
-				LEGACY_PCM))
+			if ((msm_bedais[be_idx].active) && compr_passthr_mode)
 				adm_send_compressed_device_mute(port_id,
 					idx, mute);
 			break;
@@ -11703,9 +14394,7 @@
 				&msm_bedais_pp_params[index].pp_params_config);
 			latency = msm_bedais_pp_params[index].latency =
 				ucontrol->value.integer.value[1];
-			if ((msm_bedais[be_idx].active) &&
-				(msm_bedais[be_idx].compr_passthr_mode !=
-				LEGACY_PCM))
+			if ((msm_bedais[be_idx].active) && compr_passthr_mode)
 				adm_send_compressed_device_latency(port_id,
 					idx, latency);
 			break;
@@ -11732,6 +14421,92 @@
 	msm_routing_put_device_pp_params_mixer),
 };
 
+static int msm_aptx_dec_license_control_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] =
+			core_get_license_status(ASM_MEDIA_FMT_APTX);
+	pr_debug("%s: status %ld\n", __func__,
+			ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_aptx_dec_license_control_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int32_t status = 0;
+
+	status = core_set_license(ucontrol->value.integer.value[0],
+				APTX_CLASSIC_DEC_LICENSE_ID);
+	pr_debug("%s: status %d\n", __func__, status);
+	return status;
+}
+
+static const struct snd_kcontrol_new aptx_dec_license_controls[] = {
+	SOC_SINGLE_EXT("APTX Dec License", SND_SOC_NOPM, 0,
+	0xFFFF, 0, msm_aptx_dec_license_control_get,
+	msm_aptx_dec_license_control_put),
+};
+
+static int msm_routing_be_dai_name_table_info(struct snd_kcontrol *kcontrol,
+					      struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+	uinfo->count = sizeof(be_dai_name_table);
+	return 0;
+}
+
+static int msm_routing_be_dai_name_table_tlv_get(struct snd_kcontrol *kcontrol,
+						 unsigned int __user *bytes,
+						 unsigned int size)
+{
+	int i;
+	int ret;
+
+	if (size < sizeof(be_dai_name_table)) {
+		pr_err("%s: invalid size %d requested, returning\n",
+			__func__, size);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/*
+	 * Fill be_dai_name_table from msm_bedais table to reduce code changes
+	 * needed when adding new backends
+	 */
+	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+		be_dai_name_table[i].be_id = i;
+		strlcpy(be_dai_name_table[i].be_name,
+			msm_bedais[i].name,
+			LPASS_BE_NAME_MAX_LENGTH);
+	}
+
+	ret = copy_to_user(bytes, &be_dai_name_table,
+			   sizeof(be_dai_name_table));
+	if (ret) {
+		pr_err("%s: failed to copy be_dai_name_table\n", __func__);
+		ret = -EFAULT;
+	}
+
+done:
+	return ret;
+}
+
+static const struct snd_kcontrol_new
+	msm_routing_be_dai_name_table_mixer_controls[] = {
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |
+			  SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
+		.info = msm_routing_be_dai_name_table_info,
+		.name = "Backend DAI Name Table",
+		.tlv.c = snd_soc_bytes_tlv_callback,
+		.private_value = (unsigned long) &(struct soc_bytes_ext) {
+			.max = sizeof(be_dai_name_table),
+			.get = msm_routing_be_dai_name_table_tlv_get,
+		}
+	},
+};
+
 static const struct snd_pcm_ops msm_routing_pcm_ops = {
 	.hw_params	= msm_pcm_routing_hw_params,
 	.close          = msm_pcm_routing_close,
@@ -11748,8 +14523,8 @@
 
 	snd_soc_dapm_new_widgets(platform->component.dapm.card);
 
-	snd_soc_add_platform_controls(platform, lsm_function,
-				      ARRAY_SIZE(lsm_function));
+	snd_soc_add_platform_controls(platform, lsm_controls,
+				      ARRAY_SIZE(lsm_controls));
 
 	snd_soc_add_platform_controls(platform, aanc_slim_0_rx_mux,
 				      ARRAY_SIZE(aanc_slim_0_rx_mux));
@@ -11760,10 +14535,16 @@
 	snd_soc_add_platform_controls(platform, app_type_cfg_controls,
 				      ARRAY_SIZE(app_type_cfg_controls));
 
+	snd_soc_add_platform_controls(platform, lsm_app_type_cfg_controls,
+				      ARRAY_SIZE(lsm_app_type_cfg_controls));
+
 	snd_soc_add_platform_controls(platform,
 				stereo_to_custom_stereo_controls,
 			ARRAY_SIZE(stereo_to_custom_stereo_controls));
 
+	snd_soc_add_platform_controls(platform, ec_ref_param_controls,
+				ARRAY_SIZE(ec_ref_param_controls));
+
 	msm_qti_pp_add_controls(platform);
 
 	msm_dts_srs_tm_add_controls(platform);
@@ -11778,12 +14559,19 @@
 				device_pp_params_mixer_controls,
 				ARRAY_SIZE(device_pp_params_mixer_controls));
 
+	snd_soc_add_platform_controls(platform,
+		msm_routing_be_dai_name_table_mixer_controls,
+		ARRAY_SIZE(msm_routing_be_dai_name_table_mixer_controls));
+
 	msm_dts_eagle_add_controls(platform);
 
 	snd_soc_add_platform_controls(platform, msm_source_tracking_controls,
 				ARRAY_SIZE(msm_source_tracking_controls));
 	snd_soc_add_platform_controls(platform, adm_channel_config_controls,
 				ARRAY_SIZE(adm_channel_config_controls));
+
+	snd_soc_add_platform_controls(platform, aptx_dec_license_controls,
+					ARRAY_SIZE(aptx_dec_license_controls));
 	return 0;
 }
 
@@ -11844,21 +14632,12 @@
 		return 0;
 	}
 	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
-		if (test_bit(fedai_id, &msm_bedais[i].fe_sessions))
+		if (test_bit(fedai_id, &msm_bedais[i].fe_sessions[0]))
 			return msm_bedais[i].active;
 	}
 	return 0;
 }
 
-static int get_cal_path(int path_type)
-{
-	if (path_type == ADM_PATH_PLAYBACK ||
-	    path_type == ADM_PATH_COMPRESSED_RX)
-		return RX_DEVICE;
-	else
-		return TX_DEVICE;
-}
-
 static int msm_routing_set_cal(int32_t cal_type,
 					size_t data_size, void *data)
 {
@@ -11916,6 +14695,11 @@
 	if (msm_routing_init_cal_data())
 		pr_err("%s: could not init cal data!\n", __func__);
 
+	afe_set_routing_callback(
+		(routing_cb)msm_pcm_get_dev_acdb_id_by_port_id);
+
+	memset(&be_dai_name_table, 0, sizeof(be_dai_name_table));
+
 	return platform_driver_register(&msm_routing_pcm_driver);
 }
 module_init(msm_soc_routing_platform_init);
@@ -11923,6 +14707,8 @@
 static void __exit msm_soc_routing_platform_exit(void)
 {
 	msm_routing_delete_cal_data();
+	memset(&be_dai_name_table, 0, sizeof(be_dai_name_table));
+	mutex_destroy(&routing_lock);
 	platform_driver_unregister(&msm_routing_pcm_driver);
 }
 module_exit(msm_soc_routing_platform_exit);
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
index d64fd64..fcd155e 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,6 +13,12 @@
 #define _MSM_PCM_ROUTING_H
 #include <sound/apr_audio-v2.h>
 
+/*
+ * These names are used by HAL to specify the BE. If any changes are
+ * made to the string names or the max name length corresponding
+ * changes need to be made in the HAL to ensure they still match.
+ */
+#define LPASS_BE_NAME_MAX_LENGTH 24
 #define LPASS_BE_PRI_I2S_RX "PRIMARY_I2S_RX"
 #define LPASS_BE_PRI_I2S_TX "PRIMARY_I2S_TX"
 #define LPASS_BE_SLIMBUS_0_RX "SLIMBUS_0_RX"
@@ -64,6 +70,7 @@
 #define LPASS_BE_SLIMBUS_3_TX "SLIMBUS_3_TX"
 #define LPASS_BE_SLIMBUS_4_RX "SLIMBUS_4_RX"
 #define LPASS_BE_SLIMBUS_4_TX "SLIMBUS_4_TX"
+#define LPASS_BE_SLIMBUS_TX_VI "SLIMBUS_TX_VI"
 #define LPASS_BE_SLIMBUS_5_RX "SLIMBUS_5_RX"
 #define LPASS_BE_SLIMBUS_5_TX "SLIMBUS_5_TX"
 #define LPASS_BE_SLIMBUS_6_RX "SLIMBUS_6_RX"
@@ -390,6 +397,7 @@
 #define ADM_PP_PARAM_LATENCY_ID			1
 #define ADM_PP_PARAM_LATENCY_BIT		2
 #define BE_DAI_PORT_SESSIONS_IDX_MAX		4
+#define BE_DAI_FE_SESSIONS_IDX_MAX		2
 
 struct msm_pcm_routing_evt {
 	void (*event_func)(enum msm_pcm_routing_event, void *);
@@ -399,7 +407,9 @@
 struct msm_pcm_routing_bdai_data {
 	u16 port_id; /* AFE port ID */
 	u8 active; /* track if this backend is enabled */
-	unsigned long fe_sessions; /* Front-end sessions */
+
+	/* Front-end sessions */
+	unsigned long fe_sessions[BE_DAI_FE_SESSIONS_IDX_MAX];
 	/*
 	 * Track Tx BE ports -> Rx BE ports.
 	 * port_sessions[0] used to track BE 0 to BE 63.
@@ -413,7 +423,7 @@
 	unsigned int  channel;
 	unsigned int  format;
 	unsigned int  adm_override_ch;
-	u32 compr_passthr_mode;
+	u32 passthr_mode;
 	char *name;
 };
 
@@ -465,8 +475,10 @@
 void msm_pcm_routing_acquire_lock(void);
 void msm_pcm_routing_release_lock(void);
 
-void msm_pcm_routing_reg_stream_app_type_cfg(int fedai_id, int app_type,
-			int acdb_dev_id, int sample_rate, int session_type);
+int msm_pcm_routing_reg_stream_app_type_cfg(int fedai_id, int session_type,
+					     int be_id, int app_type,
+					     int acdb_dev_id, int sample_rate);
 int msm_pcm_routing_get_stream_app_type_cfg(int fedai_id, int session_type,
-			int *app_type, int *acdb_dev_id, int *sample_rate);
+					    int be_id, int *app_type,
+					    int *acdb_dev_id, int *sample_rate);
 #endif /*_MSM_PCM_H*/
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
index fa71eea..e39e642 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, 2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -390,6 +390,32 @@
 	return ret;
 }
 
+static int msm_voice_sidetone_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int ret;
+	bool sidetone_enable = ucontrol->value.integer.value[0];
+	uint32_t session_id = ALL_SESSION_VSID;
+
+	if (sidetone_enable < 0) {
+		pr_err("%s: Invalid arguments sidetone enable %d\n",
+			 __func__, sidetone_enable);
+		ret = -EINVAL;
+		return ret;
+	}
+	ret = voc_set_afe_sidetone(session_id, sidetone_enable);
+	pr_debug("%s: AFE Sidetone enable=%d session_id=0x%x ret=%d\n",
+		 __func__, sidetone_enable, session_id, ret);
+	return ret;
+}
+
+static int msm_voice_sidetone_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = voc_get_afe_sidetone();
+	return 0;
+}
+
 static int msm_voice_gain_put(struct snd_kcontrol *kcontrol,
 			      struct snd_ctl_elem_value *ucontrol)
 {
@@ -632,6 +658,8 @@
 		.info	= msm_voice_cvd_version_info,
 		.get	= msm_voice_cvd_version_get,
 	},
+	SOC_SINGLE_MULTI_EXT("Voice Sidetone Enable", SND_SOC_NOPM, 0, 1, 0, 1,
+			     msm_voice_sidetone_get, msm_voice_sidetone_put),
 };
 
 static const struct snd_pcm_ops msm_pcm_ops = {
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
index a82d1cb..02225f0 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
@@ -815,10 +815,20 @@
 			if (prtd->mode == MODE_PCM) {
 				ret = copy_from_user(&buf_node->frame.voc_pkt,
 							buf, count);
+				if (ret) {
+					pr_err("%s: copy from user failed %d\n",
+					       __func__, ret);
+					return -EFAULT;
+				}
 				buf_node->frame.pktlen = count;
 			} else {
 				ret = copy_from_user(&buf_node->frame,
 							buf, count);
+				if (ret) {
+					pr_err("%s: copy from user failed %d\n",
+					       __func__, ret);
+					return -EFAULT;
+				}
 				if (buf_node->frame.pktlen >= count)
 					buf_node->frame.pktlen = count -
 					(sizeof(buf_node->frame.frm_hdr) +
diff --git a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
index 9276231..c60b27f 100644
--- a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
+++ b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
@@ -81,6 +81,10 @@
 static const DECLARE_TLV_DB_LINEAR(hfp_rx_vol_gain, 0,
 				INT_RX_VOL_MAX_STEPS);
 
+static int msm_route_icc_vol_control;
+static const DECLARE_TLV_DB_LINEAR(icc_rx_vol_gain, 0,
+				INT_RX_VOL_MAX_STEPS);
+
 static int msm_route_pri_auxpcm_lb_vol_ctrl;
 static const DECLARE_TLV_DB_LINEAR(pri_auxpcm_lb_vol_gain, 0,
 				INT_RX_VOL_MAX_STEPS);
@@ -89,6 +93,8 @@
 static const DECLARE_TLV_DB_LINEAR(sec_auxpcm_lb_vol_gain, 0,
 				INT_RX_VOL_MAX_STEPS);
 
+static int msm_multichannel_ec_primary_mic_ch;
+
 static void msm_qti_pp_send_eq_values_(int eq_idx)
 {
 	int result;
@@ -342,7 +348,7 @@
 	uint32_t param_payload_len = RMS_PAYLOAD_LEN * sizeof(uint32_t);
 	struct msm_pcm_routing_bdai_data msm_bedai;
 
-	param_value = kzalloc(param_length, GFP_KERNEL);
+	param_value = kzalloc(param_length + param_payload_len, GFP_KERNEL);
 	if (!param_value)
 		return -ENOMEM;
 
@@ -399,6 +405,7 @@
 static int msm_afe_sec_mi2s_lb_vol_ctrl;
 static int msm_afe_tert_mi2s_lb_vol_ctrl;
 static int msm_afe_quat_mi2s_lb_vol_ctrl;
+static int msm_afe_slimbus_7_lb_vol_ctrl;
 static int msm_afe_slimbus_8_lb_vol_ctrl;
 static const DECLARE_TLV_DB_LINEAR(fm_rx_vol_gain, 0, INT_RX_VOL_MAX_STEPS);
 static const DECLARE_TLV_DB_LINEAR(afe_lb_vol_gain, 0, INT_RX_VOL_MAX_STEPS);
@@ -471,6 +478,29 @@
 	return 0;
 }
 
+static int msm_qti_pp_get_slimbus_7_lb_vol_mixer(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = msm_afe_slimbus_7_lb_vol_ctrl;
+	return 0;
+}
+
+static int msm_qti_pp_set_slimbus_7_lb_vol_mixer(struct snd_kcontrol *kcontrol,
+			    struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = afe_loopback_gain(SLIMBUS_7_TX,
+				ucontrol->value.integer.value[0]);
+
+	if (ret)
+		pr_err("%s: failed to set LB vol for SLIMBUS_7_TX, err %d\n",
+			__func__, ret);
+	else
+		msm_afe_slimbus_7_lb_vol_ctrl =
+				ucontrol->value.integer.value[0];
+
+	return ret;
+}
+
 static int msm_qti_pp_get_slimbus_8_lb_vol_mixer(struct snd_kcontrol *kcontrol,
 				       struct snd_ctl_elem_value *ucontrol)
 {
@@ -495,6 +525,23 @@
 	return ret;
 }
 
+static int msm_qti_pp_get_icc_vol_mixer(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = msm_route_icc_vol_control;
+	return 0;
+}
+
+static int msm_qti_pp_set_icc_vol_mixer(struct snd_kcontrol *kcontrol,
+			    struct snd_ctl_elem_value *ucontrol)
+{
+	adm_set_mic_gain(AFE_PORT_ID_QUATERNARY_TDM_TX,
+		adm_get_default_copp_idx(AFE_PORT_ID_QUATERNARY_TDM_TX),
+		ucontrol->value.integer.value[0]);
+	msm_route_icc_vol_control = ucontrol->value.integer.value[0];
+	return 0;
+}
+
 static int msm_qti_pp_get_quat_mi2s_fm_vol_mixer(struct snd_kcontrol *kcontrol,
 				       struct snd_ctl_elem_value *ucontrol)
 {
@@ -581,7 +628,7 @@
 static int msm_qti_pp_get_channel_map_mixer(struct snd_kcontrol *kcontrol,
 					    struct snd_ctl_elem_value *ucontrol)
 {
-	char channel_map[PCM_FORMAT_MAX_NUM_CHANNEL];
+	char channel_map[PCM_FORMAT_MAX_NUM_CHANNEL] = {0};
 	int i;
 
 	adm_get_multi_ch_map(channel_map, ADM_PATH_PLAYBACK);
@@ -774,6 +821,43 @@
 	return 0;
 }
 
+static int msm_multichannel_ec_primary_mic_ch_put(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	int copp_idx = 0;
+	int port_id = AFE_PORT_ID_QUATERNARY_TDM_TX;
+
+	msm_multichannel_ec_primary_mic_ch = ucontrol->value.integer.value[0];
+	pr_debug("%s: msm_multichannel_ec_primary_mic_ch = %u\n",
+		__func__, msm_multichannel_ec_primary_mic_ch);
+	copp_idx = adm_get_default_copp_idx(port_id);
+	if ((copp_idx < 0) || (copp_idx > MAX_COPPS_PER_PORT)) {
+		pr_err("%s : no active copp to query multichannel ec copp_idx: %u\n",
+			__func__, copp_idx);
+		return -EINVAL;
+	}
+	adm_send_set_multichannel_ec_primary_mic_ch(port_id, copp_idx,
+		msm_multichannel_ec_primary_mic_ch);
+
+	return ret;
+}
+
+static int msm_multichannel_ec_primary_mic_ch_get(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = msm_multichannel_ec_primary_mic_ch;
+	pr_debug("%s: msm_multichannel_ec_primary_mic_ch = %lu\n",
+		__func__, ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static const struct  snd_kcontrol_new msm_multichannel_ec_controls[] = {
+	SOC_SINGLE_EXT("Multichannel EC Primary Mic Ch", SND_SOC_NOPM, 0,
+		0xFFFFFFFF, 0, msm_multichannel_ec_primary_mic_ch_get,
+		msm_multichannel_ec_primary_mic_ch_put),
+};
+
 static const struct snd_kcontrol_new int_fm_vol_mixer_controls[] = {
 	SOC_SINGLE_EXT_TLV("Internal FM RX Volume", SND_SOC_NOPM, 0,
 	INT_RX_VOL_GAIN, 0, msm_qti_pp_get_fm_vol_mixer,
@@ -801,6 +885,14 @@
 	msm_qti_pp_set_tert_mi2s_lb_vol_mixer, afe_lb_vol_gain),
 };
 
+static const struct snd_kcontrol_new slimbus_7_lb_vol_mixer_controls[] = {
+	SOC_SINGLE_EXT_TLV("SLIMBUS_7 LOOPBACK Volume", SND_SOC_NOPM, 0,
+				INT_RX_VOL_GAIN, 0,
+				msm_qti_pp_get_slimbus_7_lb_vol_mixer,
+				msm_qti_pp_set_slimbus_7_lb_vol_mixer,
+				afe_lb_vol_gain),
+};
+
 static const struct snd_kcontrol_new slimbus_8_lb_vol_mixer_controls[] = {
 	SOC_SINGLE_EXT_TLV("SLIMBUS_8 LOOPBACK Volume", SND_SOC_NOPM, 0,
 	INT_RX_VOL_GAIN, 0, msm_qti_pp_get_slimbus_8_lb_vol_mixer,
@@ -813,6 +905,12 @@
 	msm_qti_pp_set_hfp_vol_mixer, hfp_rx_vol_gain),
 };
 
+static const struct snd_kcontrol_new int_icc_vol_mixer_controls[] = {
+	SOC_SINGLE_EXT_TLV("Internal ICC Volume", SND_SOC_NOPM, 0,
+	INT_RX_VOL_GAIN, 0, msm_qti_pp_get_icc_vol_mixer,
+	msm_qti_pp_set_icc_vol_mixer, icc_rx_vol_gain),
+};
+
 static const struct snd_kcontrol_new pri_auxpcm_lb_vol_mixer_controls[] = {
 	SOC_SINGLE_EXT_TLV("PRI AUXPCM LOOPBACK Volume",
 	AFE_PORT_ID_PRIMARY_PCM_TX, 0, INT_RX_VOL_GAIN, 0,
@@ -999,12 +1097,18 @@
 	snd_soc_add_platform_controls(platform, tert_mi2s_lb_vol_mixer_controls,
 			ARRAY_SIZE(tert_mi2s_lb_vol_mixer_controls));
 
+	snd_soc_add_platform_controls(platform, slimbus_7_lb_vol_mixer_controls,
+			ARRAY_SIZE(slimbus_7_lb_vol_mixer_controls));
+
 	snd_soc_add_platform_controls(platform, slimbus_8_lb_vol_mixer_controls,
 			ARRAY_SIZE(slimbus_8_lb_vol_mixer_controls));
 
 	snd_soc_add_platform_controls(platform, int_hfp_vol_mixer_controls,
 			ARRAY_SIZE(int_hfp_vol_mixer_controls));
 
+	snd_soc_add_platform_controls(platform, int_icc_vol_mixer_controls,
+			ARRAY_SIZE(int_icc_vol_mixer_controls));
+
 	snd_soc_add_platform_controls(platform,
 			pri_auxpcm_lb_vol_mixer_controls,
 			ARRAY_SIZE(pri_auxpcm_lb_vol_mixer_controls));
@@ -1031,5 +1135,8 @@
 
 	snd_soc_add_platform_controls(platform, asphere_mixer_controls,
 			ARRAY_SIZE(asphere_mixer_controls));
+
+	snd_soc_add_platform_controls(platform, msm_multichannel_ec_controls,
+			ARRAY_SIZE(msm_multichannel_ec_controls));
 }
 #endif /* CONFIG_QTI_PP */
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index 26d2b80..90d640d 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -39,6 +39,16 @@
 #define ULL_SUPPORTED_BITS_PER_SAMPLE 16
 #define ULL_SUPPORTED_SAMPLE_RATE 48000
 
+#ifndef CONFIG_DOLBY_DAP
+#undef DOLBY_ADM_COPP_TOPOLOGY_ID
+#define DOLBY_ADM_COPP_TOPOLOGY_ID 0xFFFFFFFE
+#endif
+
+#ifndef CONFIG_DOLBY_DS2
+#undef DS2_ADM_COPP_TOPOLOGY_ID
+#define DS2_ADM_COPP_TOPOLOGY_ID 0xFFFFFFFF
+#endif
+
 /* ENUM for adm_status */
 enum adm_cal_status {
 	ADM_STATUS_CALIBRATION_REQUIRED = 0,
@@ -92,6 +102,9 @@
 
 	int set_custom_topology;
 	int ec_ref_rx;
+	int num_ec_ref_rx_chans;
+	int ec_ref_rx_bit_width;
+	int ec_ref_rx_sampling_rate;
 };
 
 static struct adm_ctl			this_adm;
@@ -976,9 +989,10 @@
 		      char *params, uint32_t client_id)
 {
 	struct adm_cmd_get_pp_params_v5 *adm_params = NULL;
-	int sz, rc = 0, i = 0;
+	int rc = 0, i = 0;
 	int port_idx, idx;
 	int *params_data = (int *)params;
+	uint64_t sz = 0;
 
 	port_id = afe_convert_virtual_to_portid(port_id);
 	port_idx = adm_validate_and_get_port_index(port_id);
@@ -987,7 +1001,16 @@
 		return -EINVAL;
 	}
 
-	sz = sizeof(struct adm_cmd_get_pp_params_v5) + params_length;
+	sz = (uint64_t)sizeof(struct adm_cmd_get_pp_params_v5) +
+				(uint64_t)params_length;
+	/*
+	 * Check if the value of "sz" (which is ultimately assigned to
+	 * "hdr.pkt_size") crosses U16_MAX.
+	 */
+	if (sz > U16_MAX) {
+		pr_err("%s: Invalid params_length\n", __func__);
+		return -EINVAL;
+	}
 	adm_params = kzalloc(sz, GFP_KERNEL);
 	if (!adm_params) {
 		pr_err("%s: adm params memory alloc failed", __func__);
@@ -1357,6 +1380,7 @@
 				 */
 			case ADM_CMD_DEVICE_OPEN_V5:
 			case ADM_CMD_DEVICE_CLOSE_V5:
+			case ADM_CMD_DEVICE_OPEN_V6:
 				pr_debug("%s: Basic callback received, wake up.\n",
 					__func__);
 				atomic_set(&this_adm.copp.stat[port_idx]
@@ -1452,7 +1476,8 @@
 		}
 
 		switch (data->opcode) {
-		case ADM_CMDRSP_DEVICE_OPEN_V5: {
+		case ADM_CMDRSP_DEVICE_OPEN_V5:
+		case ADM_CMDRSP_DEVICE_OPEN_V6: {
 			struct adm_cmd_rsp_device_open_v5 *open =
 			(struct adm_cmd_rsp_device_open_v5 *)data->payload;
 
@@ -2261,10 +2286,64 @@
 	return rc;
 }
 
+int adm_arrange_mch_ep2_map(struct adm_cmd_device_open_v6 *open_v6,
+			 int channel_mode)
+{
+	int rc = 0;
+
+	memset(open_v6->dev_channel_mapping_eid2, 0,
+	       PCM_FORMAT_MAX_NUM_CHANNEL);
+
+	if (channel_mode == 1)	{
+		open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FC;
+	} else if (channel_mode == 2) {
+		open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
+		open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
+	} else if (channel_mode == 3)	{
+		open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
+		open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
+		open_v6->dev_channel_mapping_eid2[2] = PCM_CHANNEL_FC;
+	} else if (channel_mode == 4) {
+		open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
+		open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
+		open_v6->dev_channel_mapping_eid2[2] = PCM_CHANNEL_LS;
+		open_v6->dev_channel_mapping_eid2[3] = PCM_CHANNEL_RS;
+	} else if (channel_mode == 5) {
+		open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
+		open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
+		open_v6->dev_channel_mapping_eid2[2] = PCM_CHANNEL_FC;
+		open_v6->dev_channel_mapping_eid2[3] = PCM_CHANNEL_LS;
+		open_v6->dev_channel_mapping_eid2[4] = PCM_CHANNEL_RS;
+	} else if (channel_mode == 6) {
+		open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
+		open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
+		open_v6->dev_channel_mapping_eid2[2] = PCM_CHANNEL_LFE;
+		open_v6->dev_channel_mapping_eid2[3] = PCM_CHANNEL_FC;
+		open_v6->dev_channel_mapping_eid2[4] = PCM_CHANNEL_LS;
+		open_v6->dev_channel_mapping_eid2[5] = PCM_CHANNEL_RS;
+	} else if (channel_mode == 8) {
+		open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
+		open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
+		open_v6->dev_channel_mapping_eid2[2] = PCM_CHANNEL_LFE;
+		open_v6->dev_channel_mapping_eid2[3] = PCM_CHANNEL_FC;
+		open_v6->dev_channel_mapping_eid2[4] = PCM_CHANNEL_LS;
+		open_v6->dev_channel_mapping_eid2[5] = PCM_CHANNEL_RS;
+		open_v6->dev_channel_mapping_eid2[6] = PCM_CHANNEL_LB;
+		open_v6->dev_channel_mapping_eid2[7] = PCM_CHANNEL_RB;
+	} else {
+		pr_err("%s: invalid num_chan %d\n", __func__,
+			channel_mode);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
 int adm_open(int port_id, int path, int rate, int channel_mode, int topology,
 	     int perf_mode, uint16_t bit_width, int app_type, int acdb_id)
 {
 	struct adm_cmd_device_open_v5	open;
+	struct adm_cmd_device_open_v6	open_v6;
 	int ret = 0;
 	int port_idx, copp_idx, flags;
 	int tmp_port = q6audio_get_port_id(port_id);
@@ -2399,6 +2478,7 @@
 				pr_err("%s: DTS_EAGLE mmap did not work!",
 					__func__);
 		}
+		memset(&open, 0, sizeof(struct adm_cmd_device_open_v5));
 		open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
 						   APR_HDR_LEN(APR_HDR_SIZE),
 						   APR_PKT_VER);
@@ -2414,10 +2494,9 @@
 		open.flags = flags;
 		open.mode_of_operation = path;
 		open.endpoint_id_1 = tmp_port;
+		open.endpoint_id_2 = 0xFFFF;
 
-		if (this_adm.ec_ref_rx == -1) {
-			open.endpoint_id_2 = 0xFFFF;
-		} else if (this_adm.ec_ref_rx && (path != 1)) {
+		if (this_adm.ec_ref_rx && (path != 1)) {
 			open.endpoint_id_2 = this_adm.ec_ref_rx;
 			this_adm.ec_ref_rx = -1;
 		}
@@ -2441,7 +2520,49 @@
 
 		atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
 
-		ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
+		if ((this_adm.num_ec_ref_rx_chans != 0) && (path != 1) &&
+			(open.endpoint_id_2 != 0xFFFF)) {
+			memset(&open_v6, 0,
+				sizeof(struct adm_cmd_device_open_v6));
+			memcpy(&open_v6, &open,
+				sizeof(struct adm_cmd_device_open_v5));
+			open_v6.hdr.opcode = ADM_CMD_DEVICE_OPEN_V6;
+			open_v6.hdr.pkt_size = sizeof(open_v6);
+			open_v6.dev_num_channel_eid2 =
+				this_adm.num_ec_ref_rx_chans;
+			this_adm.num_ec_ref_rx_chans = 0;
+
+			if (this_adm.ec_ref_rx_bit_width != 0) {
+				open_v6.bit_width_eid2 =
+					this_adm.ec_ref_rx_bit_width;
+				this_adm.ec_ref_rx_bit_width = 0;
+			} else {
+				open_v6.bit_width_eid2 = bit_width;
+			}
+
+			if (this_adm.ec_ref_rx_sampling_rate != 0) {
+				open_v6.sample_rate_eid2 =
+					this_adm.ec_ref_rx_sampling_rate;
+				this_adm.ec_ref_rx_sampling_rate = 0;
+			} else {
+				open_v6.sample_rate_eid2 = rate;
+			}
+
+			pr_debug("%s: eid2_channels=%d eid2_bit_width=%d eid2_rate=%d\n",
+				__func__, open_v6.dev_num_channel_eid2,
+				open_v6.bit_width_eid2,
+				open_v6.sample_rate_eid2);
+
+			ret = adm_arrange_mch_ep2_map(&open_v6,
+				open_v6.dev_num_channel_eid2);
+
+			if (ret)
+				return ret;
+
+			ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open_v6);
+		} else {
+			ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
+		}
 		if (ret < 0) {
 			pr_err("%s: port_id: 0x%x for[0x%x] failed %d\n",
 			__func__, tmp_port, port_id, ret);
@@ -2471,7 +2592,6 @@
 	return copp_idx;
 }
 
-
 void adm_copp_mfc_cfg(int port_id, int copp_idx, int dst_sample_rate)
 {
 	struct audproc_mfc_output_media_fmt mfc_cfg;
@@ -2574,8 +2694,43 @@
 	return;
 }
 
+static void route_set_opcode_matrix_id(
+			struct adm_cmd_matrix_map_routings_v5 **route_addr,
+			int path, uint32_t passthr_mode)
+{
+	struct adm_cmd_matrix_map_routings_v5 *route = *route_addr;
 
-int adm_matrix_map(int path, struct route_payload payload_map, int perf_mode)
+	switch (path) {
+	case ADM_PATH_PLAYBACK:
+		route->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5;
+		route->matrix_id = ADM_MATRIX_ID_AUDIO_RX;
+		break;
+	case ADM_PATH_LIVE_REC:
+		if (passthr_mode == LISTEN) {
+			route->hdr.opcode =
+				ADM_CMD_STREAM_DEVICE_MAP_ROUTINGS_V5;
+			route->matrix_id = ADM_MATRIX_ID_LISTEN_TX;
+			break;
+		}
+		/* fall through to set matrix id for non-listen case */
+	case ADM_PATH_NONLIVE_REC:
+		route->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5;
+		route->matrix_id = ADM_MATRIX_ID_AUDIO_TX;
+		break;
+	case ADM_PATH_COMPRESSED_RX:
+		route->hdr.opcode = ADM_CMD_STREAM_DEVICE_MAP_ROUTINGS_V5;
+		route->matrix_id = ADM_MATRIX_ID_COMPRESSED_AUDIO_RX;
+		break;
+	default:
+		pr_err("%s: Wrong path set[%d]\n", __func__, path);
+		break;
+	}
+	pr_debug("%s: opcode 0x%x, matrix id %d\n",
+		 __func__, route->hdr.opcode, route->matrix_id);
+}
+
+int adm_matrix_map(int path, struct route_payload payload_map, int perf_mode,
+			uint32_t passthr_mode)
 {
 	struct adm_cmd_matrix_map_routings_v5	*route;
 	struct adm_session_map_node_v5 *node;
@@ -2608,32 +2763,9 @@
 	route->hdr.dest_domain = APR_DOMAIN_ADSP;
 	route->hdr.dest_port = 0; /* Ignored */;
 	route->hdr.token = 0;
-	if (path == ADM_PATH_COMPRESSED_RX) {
-		pr_debug("%s: ADM_CMD_STREAM_DEVICE_MAP_ROUTINGS_V5 0x%x\n",
-			 __func__, ADM_CMD_STREAM_DEVICE_MAP_ROUTINGS_V5);
-		route->hdr.opcode = ADM_CMD_STREAM_DEVICE_MAP_ROUTINGS_V5;
-	} else {
-		pr_debug("%s: DM_CMD_MATRIX_MAP_ROUTINGS_V5 0x%x\n",
-			 __func__, ADM_CMD_MATRIX_MAP_ROUTINGS_V5);
-		route->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5;
-	}
 	route->num_sessions = 1;
+	route_set_opcode_matrix_id(&route, path, passthr_mode);
 
-	switch (path) {
-	case ADM_PATH_PLAYBACK:
-		route->matrix_id = ADM_MATRIX_ID_AUDIO_RX;
-		break;
-	case ADM_PATH_LIVE_REC:
-	case ADM_PATH_NONLIVE_REC:
-		route->matrix_id = ADM_MATRIX_ID_AUDIO_TX;
-		break;
-	case ADM_PATH_COMPRESSED_RX:
-		route->matrix_id = ADM_MATRIX_ID_COMPRESSED_AUDIO_RX;
-		break;
-	default:
-		pr_err("%s: Wrong path set[%d]\n", __func__, path);
-		break;
-	}
 	payload = ((u8 *)matrix_map +
 			sizeof(struct adm_cmd_matrix_map_routings_v5));
 	node = (struct adm_session_map_node_v5 *)payload;
@@ -2701,8 +2833,8 @@
 							[port_idx][copp_idx]),
 					    get_cal_path(path),
 					    payload_map.session_id,
-					    payload_map.app_type,
-					    payload_map.acdb_dev_id);
+					    payload_map.app_type[i],
+					    payload_map.acdb_dev_id[i]);
 
 			if (!test_bit(ADM_STATUS_CALIBRATION_REQUIRED,
 				(void *)&this_adm.copp.adm_status[port_idx]
@@ -2713,9 +2845,9 @@
 			}
 			send_adm_cal(payload_map.port_id[i], copp_idx,
 				     get_cal_path(path), perf_mode,
-				     payload_map.app_type,
-				     payload_map.acdb_dev_id,
-				     payload_map.sample_rate);
+				     payload_map.app_type[i],
+				     payload_map.acdb_dev_id[i],
+				     payload_map.sample_rate[i]);
 			/* ADM COPP calibration is already sent */
 			clear_bit(ADM_STATUS_CALIBRATION_REQUIRED,
 				(void *)&this_adm.copp.
@@ -2734,7 +2866,28 @@
 void adm_ec_ref_rx_id(int port_id)
 {
 	this_adm.ec_ref_rx = port_id;
-	pr_debug("%s: ec_ref_rx:%d", __func__, this_adm.ec_ref_rx);
+	pr_debug("%s: ec_ref_rx:%d\n", __func__, this_adm.ec_ref_rx);
+}
+
+void adm_num_ec_ref_rx_chans(int num_chans)
+{
+	this_adm.num_ec_ref_rx_chans = num_chans;
+	pr_debug("%s: num_ec_ref_rx_chans:%d\n",
+		__func__, this_adm.num_ec_ref_rx_chans);
+}
+
+void adm_ec_ref_rx_bit_width(int bit_width)
+{
+	this_adm.ec_ref_rx_bit_width = bit_width;
+	pr_debug("%s: ec_ref_rx_bit_width:%d\n",
+		__func__, this_adm.ec_ref_rx_bit_width);
+}
+
+void adm_ec_ref_rx_sampling_rate(int sampling_rate)
+{
+	this_adm.ec_ref_rx_sampling_rate = sampling_rate;
+	pr_debug("%s: ec_ref_rx_sampling_rate:%d\n",
+		__func__, this_adm.ec_ref_rx_sampling_rate);
 }
 
 int adm_close(int port_id, int perf_mode, int copp_idx)
@@ -3481,6 +3634,172 @@
 	return rc;
 }
 
+int adm_set_mic_gain(int port_id, int copp_idx, int volume)
+{
+	struct adm_set_mic_gain_params	mic_gain_params;
+	int rc = 0;
+	int sz, port_idx;
+
+	pr_debug("%s:\n", __func__);
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
+		return -EINVAL;
+	}
+
+	sz = sizeof(struct adm_set_mic_gain_params);
+
+	mic_gain_params.params.hdr.hdr_field =
+				APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	mic_gain_params.params.hdr.pkt_size = sz;
+	mic_gain_params.params.hdr.src_svc = APR_SVC_ADM;
+	mic_gain_params.params.hdr.src_domain = APR_DOMAIN_APPS;
+	mic_gain_params.params.hdr.src_port = port_id;
+	mic_gain_params.params.hdr.dest_svc = APR_SVC_ADM;
+	mic_gain_params.params.hdr.dest_domain = APR_DOMAIN_ADSP;
+	mic_gain_params.params.hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	mic_gain_params.params.hdr.token = port_idx << 16 | copp_idx;
+	mic_gain_params.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
+	mic_gain_params.params.payload_addr_lsw = 0;
+	mic_gain_params.params.payload_addr_msw = 0;
+	mic_gain_params.params.mem_map_handle = 0;
+	mic_gain_params.params.payload_size =
+		sizeof(struct adm_param_data_v5) +
+		sizeof(struct admx_mic_gain);
+	mic_gain_params.data.module_id = ADM_MODULE_IDX_MIC_GAIN_CTRL;
+	mic_gain_params.data.param_id = ADM_PARAM_IDX_MIC_GAIN;
+	mic_gain_params.data.param_size =
+		sizeof(struct admx_mic_gain);
+	mic_gain_params.data.reserved = 0;
+	mic_gain_params.mic_gain_data.tx_mic_gain = volume;
+	mic_gain_params.mic_gain_data.reserved = 0;
+	pr_debug("%s: Mic Gain set to %d at port_id 0x%x\n",
+		__func__, volume, port_id);
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+	rc = apr_send_pkt(this_adm.apr, (uint32_t *)&mic_gain_params);
+	if (rc < 0) {
+		pr_err("%s: Set params failed port = %#x\n",
+			__func__, port_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	/* Wait for the callback */
+	rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+		atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!rc) {
+		pr_err("%s: Mic Gain Set params timed out port = %#x\n",
+			 __func__, port_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	} else if (atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx])));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.copp.stat
+					[port_idx][copp_idx]));
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
+int adm_send_set_multichannel_ec_primary_mic_ch(int port_id, int copp_idx,
+			int primary_mic_ch)
+{
+	struct adm_set_sec_primary_ch_params sec_primary_ch_params;
+	int rc = 0;
+	int sz, port_idx;
+
+	pr_debug("%s port_id 0x%x, copp_idx 0x%x, primary_mic_ch %d\n",
+			__func__, port_id,  copp_idx,  primary_mic_ch);
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
+		return -EINVAL;
+	}
+
+	if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+		pr_err("%s: Invalid copp_idx 0x%x\n", __func__, copp_idx);
+		return -EINVAL;
+	}
+
+	sz = sizeof(struct adm_set_sec_primary_ch_params);
+
+	sec_primary_ch_params.params.hdr.hdr_field =
+			APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	sec_primary_ch_params.params.hdr.pkt_size = sz;
+	sec_primary_ch_params.params.hdr.src_svc = APR_SVC_ADM;
+	sec_primary_ch_params.params.hdr.src_domain = APR_DOMAIN_APPS;
+	sec_primary_ch_params.params.hdr.src_port = port_id;
+	sec_primary_ch_params.params.hdr.dest_svc = APR_SVC_ADM;
+	sec_primary_ch_params.params.hdr.dest_domain = APR_DOMAIN_ADSP;
+	sec_primary_ch_params.params.hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	sec_primary_ch_params.params.hdr.token = port_idx << 16 | copp_idx;
+	sec_primary_ch_params.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
+	sec_primary_ch_params.params.payload_addr_lsw = 0;
+	sec_primary_ch_params.params.payload_addr_msw = 0;
+	sec_primary_ch_params.params.mem_map_handle = 0;
+	sec_primary_ch_params.params.payload_size =
+			sizeof(struct adm_param_data_v5) +
+			sizeof(struct admx_sec_primary_mic_ch);
+	sec_primary_ch_params.data.module_id =
+			AUDPROC_MODULE_ID_VOICE_TX_SECNS;
+	sec_primary_ch_params.data.param_id =
+			AUDPROC_PARAM_IDX_SEC_PRIMARY_MIC_CH;
+	sec_primary_ch_params.data.param_size =
+			sizeof(struct admx_sec_primary_mic_ch);
+	sec_primary_ch_params.data.reserved = 0;
+	sec_primary_ch_params.sec_primary_mic_ch_data.version = 0;
+	sec_primary_ch_params.sec_primary_mic_ch_data.reserved = 0;
+	sec_primary_ch_params.sec_primary_mic_ch_data.sec_primary_mic_ch =
+			primary_mic_ch;
+	sec_primary_ch_params.sec_primary_mic_ch_data.reserved1 = 0;
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+	rc = apr_send_pkt(this_adm.apr, (uint32_t *)&sec_primary_ch_params);
+	if (rc < 0) {
+		pr_err("%s: Set params failed port = %#x\n",
+				__func__, port_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	/* Wait for the callback */
+	rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+		atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!rc) {
+		pr_err("%s: Mic Set params timed out port = %#x\n",
+				__func__, port_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	} else if (atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx])));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&this_adm.copp.stat
+					[port_idx][copp_idx]));
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
 int adm_param_enable(int port_id, int copp_idx, int module_id,  int enable)
 {
 	struct audproc_enable_param_t adm_mod_enable;
@@ -4355,6 +4674,9 @@
 
 	this_adm.apr = NULL;
 	this_adm.ec_ref_rx = -1;
+	this_adm.num_ec_ref_rx_chans = 0;
+	this_adm.ec_ref_rx_bit_width = 0;
+	this_adm.ec_ref_rx_sampling_rate = 0;
 	atomic_set(&this_adm.matrix_map_stat, 0);
 	init_waitqueue_head(&this_adm.matrix_map_wait);
 	atomic_set(&this_adm.adm_stat, 0);
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index 04dd772..176b8aa 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -36,6 +36,7 @@
 	AFE_FB_SPKR_PROT_CAL,
 	AFE_HW_DELAY_CAL,
 	AFE_SIDETONE_CAL,
+	AFE_SIDETONE_IIR_CAL,
 	AFE_TOPOLOGY_CAL,
 	AFE_CUST_TOPOLOGY_CAL,
 	AFE_FB_SPKR_PROT_TH_VI_CAL,
@@ -112,12 +113,15 @@
 	struct audio_cal_info_sp_ex_vi_ftm_cfg	ex_ftm_cfg;
 	struct afe_sp_th_vi_get_param_resp	th_vi_resp;
 	struct afe_sp_ex_vi_get_param_resp	ex_vi_resp;
+	struct afe_av_dev_drift_get_param_resp	av_dev_drift_resp;
 	int vi_tx_port;
 	int vi_rx_port;
 	uint32_t afe_sample_rates[AFE_MAX_PORTS];
 	struct aanc_data aanc_info;
 	struct mutex afe_cmd_lock;
 	int set_custom_topology;
+	int dev_acdb_id[AFE_MAX_PORTS];
+	routing_cb rt_cb;
 };
 
 static atomic_t afe_ports_mad_type[SLIMBUS_PORT_LAST - SLIMBUS_0_RX];
@@ -144,7 +148,7 @@
 	int topology;
 	int port_index = afe_get_port_index(port_id);
 
-	if ((port_index < 0) || (port_index > AFE_MAX_PORTS)) {
+	if ((port_index < 0) || (port_index >= AFE_MAX_PORTS)) {
 		pr_err("%s: Invalid port index %d\n", __func__, port_index);
 		topology = -EINVAL;
 		goto done;
@@ -187,6 +191,38 @@
 			__func__, data->opcode, data->payload_size);
 }
 
+static void av_dev_drift_afe_cb_handler(uint32_t *payload,
+					uint32_t payload_size)
+{
+	u32 param_id;
+	struct afe_av_dev_drift_get_param_resp *resp =
+		(struct afe_av_dev_drift_get_param_resp *) payload;
+
+	if (!(&(resp->pdata))) {
+		pr_err("%s: Error: resp pdata is NULL\n", __func__);
+		return;
+	}
+
+	param_id = resp->pdata.param_id;
+	if (param_id == AFE_PARAM_ID_DEV_TIMING_STATS) {
+		if (payload_size < sizeof(this_afe.av_dev_drift_resp)) {
+			pr_err("%s: Error: received size %d, resp size %zu\n",
+				__func__, payload_size,
+				sizeof(this_afe.av_dev_drift_resp));
+			return;
+		}
+		memcpy(&this_afe.av_dev_drift_resp, payload,
+				sizeof(this_afe.av_dev_drift_resp));
+		if (!this_afe.av_dev_drift_resp.status) {
+			atomic_set(&this_afe.state, 0);
+		} else {
+			pr_debug("%s: av_dev_drift_resp status: %d", __func__,
+				  this_afe.av_dev_drift_resp.status);
+			atomic_set(&this_afe.state, -1);
+		}
+	}
+}
+
 static int32_t sp_make_afe_callback(uint32_t *payload, uint32_t payload_size)
 {
 	u32 param_id;
@@ -272,6 +308,7 @@
 		mutex_lock(&this_afe.cal_data[AFE_CUST_TOPOLOGY_CAL]->lock);
 		this_afe.set_custom_topology = 1;
 		mutex_unlock(&this_afe.cal_data[AFE_CUST_TOPOLOGY_CAL]->lock);
+		rtac_clear_mapping(AFE_RTAC_CAL);
 
 		if (this_afe.apr) {
 			apr_reset(this_afe.apr);
@@ -306,10 +343,7 @@
 	}
 	afe_callback_debug_print(data);
 	if (data->opcode == AFE_PORT_CMDRSP_GET_PARAM_V2) {
-		u8 *payload = data->payload;
-
-		if (rtac_make_afe_callback(data->payload, data->payload_size))
-			return 0;
+		uint32_t *payload = data->payload;
 
 		if (!payload || (data->token >= AFE_MAX_PORTS)) {
 			pr_err("%s: Error: size %d payload %pK token %d\n",
@@ -317,9 +351,19 @@
 				payload, data->token);
 			return -EINVAL;
 		}
-		if (sp_make_afe_callback(data->payload, data->payload_size))
-			return -EINVAL;
 
+		if (payload[2] == AFE_PARAM_ID_DEV_TIMING_STATS) {
+			av_dev_drift_afe_cb_handler(data->payload,
+						    data->payload_size);
+		} else {
+			if (rtac_make_afe_callback(data->payload,
+						   data->payload_size))
+				return 0;
+
+			if (sp_make_afe_callback(data->payload,
+						 data->payload_size))
+				return -EINVAL;
+		}
 		wake_up(&this_afe.wait[data->token]);
 	} else if (data->payload_size) {
 		uint32_t *payload;
@@ -726,7 +770,7 @@
 	}
 
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		result = -EINVAL;
@@ -870,7 +914,7 @@
 		goto fail_cmd;
 	}
 	index = q6audio_get_port_index(port);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		ret = -EINVAL;
@@ -951,7 +995,7 @@
 		goto fail_cmd;
 	}
 	index = q6audio_get_port_index(src_port);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		ret = -EINVAL;
@@ -1179,6 +1223,7 @@
 
 	pr_debug("%s:\n", __func__);
 
+	memset(&delay_entry, 0, sizeof(delay_entry));
 	delay_entry.sample_rate = rate;
 	if (afe_get_port_type(port_id) == MSM_AFE_PORT_TYPE_TX)
 		ret = afe_get_cal_hw_delay(TX_DEVICE, &delay_entry);
@@ -1197,7 +1242,7 @@
 	}
 
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		ret = -EINVAL;
@@ -1246,6 +1291,10 @@
 	struct cal_block_data	*cal_block = NULL;
 	int32_t path;
 	struct audio_cal_info_afe_top *afe_top;
+	int afe_port_index = q6audio_get_port_index(port_id);
+
+	if (afe_port_index < 0)
+		goto err_exit;
 
 	list_for_each_safe(ptr, next,
 		&cal_type->cal_blocks) {
@@ -1257,12 +1306,25 @@
 		afe_top =
 		(struct audio_cal_info_afe_top *)cal_block->cal_info;
 		if (afe_top->path == path) {
-			pr_debug("%s: top_id:%x acdb_id:%d afe_port:%d\n",
-			__func__, afe_top->topology, afe_top->acdb_id,
-			q6audio_get_port_id(port_id));
-			return cal_block;
+			if (this_afe.dev_acdb_id[afe_port_index] > 0) {
+				if (afe_top->acdb_id ==
+				    this_afe.dev_acdb_id[afe_port_index]) {
+					pr_debug("%s: top_id:%x acdb_id:%d afe_port_id:%d\n",
+						 __func__, afe_top->topology,
+						 afe_top->acdb_id,
+						 q6audio_get_port_id(port_id));
+					return cal_block;
+				}
+			} else {
+				pr_debug("%s: top_id:%x acdb_id:%d afe_port:%d\n",
+				 __func__, afe_top->topology, afe_top->acdb_id,
+				 q6audio_get_port_id(port_id));
+				return cal_block;
+			}
 		}
 	}
+
+err_exit:
 	return NULL;
 }
 
@@ -1319,7 +1381,7 @@
 	u32 topology_id = 0;
 
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS - 1) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -1365,6 +1427,7 @@
 	}
 
 	this_afe.topology[index] = topology_id;
+	rtac_update_afe_topology(port_id);
 done:
 	pr_debug("%s: AFE set topology id 0x%x  enable for port 0x%x ret %d\n",
 			__func__, topology_id, port_id, ret);
@@ -1406,10 +1469,46 @@
 	return ret;
 }
 
+static struct cal_block_data *afe_find_cal(int cal_index, int port_id)
+{
+	struct list_head *ptr, *next;
+	struct cal_block_data *cal_block = NULL;
+	struct audio_cal_info_afe *afe_cal_info = NULL;
+	int afe_port_index = q6audio_get_port_index(port_id);
+
+	pr_debug("%s: cal_index %d port_id %d port_index %d\n", __func__,
+		  cal_index, port_id, afe_port_index);
+	if (afe_port_index < 0) {
+		pr_err("%s: Error getting AFE port index %d\n",
+			__func__, afe_port_index);
+		goto exit;
+	}
+
+	list_for_each_safe(ptr, next,
+			   &this_afe.cal_data[cal_index]->cal_blocks) {
+		cal_block = list_entry(ptr, struct cal_block_data, list);
+		afe_cal_info = cal_block->cal_info;
+		if ((afe_cal_info->acdb_id ==
+		     this_afe.dev_acdb_id[afe_port_index]) &&
+		    (afe_cal_info->sample_rate ==
+		     this_afe.afe_sample_rates[afe_port_index])) {
+			pr_debug("%s: cal block is a match, size is %zd\n",
+				 __func__, cal_block->cal_data.size);
+			goto exit;
+		}
+	}
+	pr_err("%s: no matching cal_block found\n", __func__);
+	cal_block = NULL;
+
+exit:
+	return cal_block;
+}
+
 static void send_afe_cal_type(int cal_index, int port_id)
 {
 	struct cal_block_data		*cal_block = NULL;
 	int ret;
+	int afe_port_index = q6audio_get_port_index(port_id);
 
 	pr_debug("%s:\n", __func__);
 
@@ -1419,8 +1518,22 @@
 		goto done;
 	}
 
+	if (afe_port_index < 0) {
+		pr_err("%s: Error getting AFE port index %d\n",
+			__func__, afe_port_index);
+		goto done;
+	}
+
 	mutex_lock(&this_afe.cal_data[cal_index]->lock);
-	cal_block = cal_utils_get_only_cal_block(this_afe.cal_data[cal_index]);
+
+	if (((cal_index == AFE_COMMON_RX_CAL) ||
+	     (cal_index == AFE_COMMON_TX_CAL)) &&
+	    (this_afe.dev_acdb_id[afe_port_index] > 0))
+		cal_block = afe_find_cal(cal_index, port_id);
+	else
+		cal_block = cal_utils_get_only_cal_block(
+				this_afe.cal_data[cal_index]);
+
 	if (cal_block == NULL) {
 		pr_err("%s cal_block not found!!\n", __func__);
 		goto unlock;
@@ -1677,7 +1790,7 @@
 
 	pr_debug("%s: enter, port_id =  0x%x\n", __func__, port_id);
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -1731,7 +1844,7 @@
 	}
 
 	index = q6audio_get_port_index(tx_port);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -1816,7 +1929,7 @@
 	}
 
 	index = q6audio_get_port_index(tx_port);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -1932,7 +2045,8 @@
 {
 	int i;
 
-	if (port_id == AFE_PORT_ID_TERTIARY_MI2S_TX) {
+	if (port_id == AFE_PORT_ID_TERTIARY_MI2S_TX ||
+		port_id == AFE_PORT_ID_INT3_MI2S_TX) {
 		mad_type = MAD_SW_AUDIO;
 		return 0;
 	}
@@ -1950,7 +2064,8 @@
 {
 	int i;
 
-	if (port_id == AFE_PORT_ID_TERTIARY_MI2S_TX)
+	if (port_id == AFE_PORT_ID_TERTIARY_MI2S_TX ||
+		port_id == AFE_PORT_ID_INT3_MI2S_TX)
 		return MAD_SW_AUDIO;
 
 	i = port_id - SLIMBUS_0_RX;
@@ -2039,7 +2154,7 @@
 		return ret;
 	}
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -2125,7 +2240,7 @@
 		return ret;
 	}
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -2199,7 +2314,7 @@
 
 	pr_debug("%s: enter\n", __func__);
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -2270,7 +2385,7 @@
 	pr_debug("%s: port id: 0x%x\n", __func__, port_id);
 
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -2345,7 +2460,7 @@
 	pr_debug("%s: port id: 0x%x\n", __func__, port_id);
 
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -2424,7 +2539,7 @@
 	pr_debug("%s: port id: 0x%x\n", __func__, port_id);
 
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -2488,7 +2603,7 @@
 }
 
 int afe_tdm_port_start(u16 port_id, struct afe_tdm_port_config *tdm_port,
-		u32 rate)
+		       u32 rate, u16 num_groups)
 {
 	struct afe_audioif_config_command config;
 	int ret = 0;
@@ -2504,7 +2619,7 @@
 	pr_debug("%s: port id: 0x%x\n", __func__, port_id);
 
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -2521,9 +2636,17 @@
 		return ret;
 	}
 
-	/* Also send the topology id here: */
+	if ((index >= 0) && (index < AFE_MAX_PORTS)) {
+		this_afe.afe_sample_rates[index] = rate;
+
+		if (this_afe.rt_cb)
+			this_afe.dev_acdb_id[index] = this_afe.rt_cb(port_id);
+	}
+
+	/* Also send the topology id here if multiple ports: */
 	port_index = afe_get_port_index(port_id);
-	if (!(this_afe.afe_cal_mode[port_index] == AFE_CAL_MODE_NONE)) {
+	if (!(this_afe.afe_cal_mode[port_index] == AFE_CAL_MODE_NONE) &&
+	    num_groups > 1) {
 		/* One time call: only for first time */
 		afe_send_custom_topology();
 		afe_send_port_topology_id(port_id);
@@ -2585,11 +2708,14 @@
 		ret = -EINVAL;
 		goto fail_cmd;
 	}
-
-	ret = afe_send_slot_mapping_cfg(&tdm_port->slot_mapping, port_id);
-	if (ret < 0) {
-		pr_err("%s: afe send failed %d\n", __func__, ret);
-		goto fail_cmd;
+	/* slot mapping is not need if there is only one group */
+	if (num_groups > 1) {
+		ret = afe_send_slot_mapping_cfg(&tdm_port->slot_mapping,
+						port_id);
+		if (ret < 0) {
+			pr_err("%s: afe send failed %d\n", __func__, ret);
+			goto fail_cmd;
+		}
 	}
 
 	if (tdm_port->custom_tdm_header.header_type) {
@@ -2615,6 +2741,11 @@
 	this_afe.afe_cal_mode[port_index] = afe_cal_mode;
 }
 
+void afe_set_routing_callback(routing_cb cb)
+{
+	this_afe.rt_cb = cb;
+}
+
 int afe_port_send_usb_dev_param(u16 port_id, union afe_port_config *afe_config)
 {
 	struct afe_usb_audio_dev_param_command config;
@@ -2626,7 +2757,7 @@
 		goto exit;
 	}
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid! for port ID 0x%x\n",
 				__func__, index, port_id);
 		ret = -EINVAL;
@@ -2660,6 +2791,21 @@
 		ret = -EINVAL;
 		goto exit;
 	}
+
+	config.pdata.param_id = AFE_PARAM_ID_USB_AUDIO_DEV_LPCM_FMT;
+	config.pdata.param_size = sizeof(config.lpcm_fmt);
+	config.lpcm_fmt.cfg_minor_version =
+		AFE_API_MINIOR_VERSION_USB_AUDIO_CONFIG;
+	config.lpcm_fmt.endian = afe_config->usb_audio.endian;
+
+	ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+	if (ret) {
+		pr_err("%s: AFE device param cmd LPCM_FMT failed %d\n",
+			__func__, ret);
+		ret = -EINVAL;
+		goto exit;
+	}
+
 exit:
 	return ret;
 }
@@ -2683,6 +2829,11 @@
 	}
 	memset(&config, 0, sizeof(config));
 	index = q6audio_get_port_index(port_id);
+	if (index < 0) {
+		pr_err("%s: Invalid index number: %d\n", __func__, index);
+		return -EINVAL;
+	}
+
 	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
 				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
 	config.hdr.pkt_size = sizeof(config);
@@ -2820,7 +2971,7 @@
 	pr_debug("%s: port id: 0x%x\n", __func__, port_id);
 
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -2837,6 +2988,13 @@
 		return ret;
 	}
 
+	if ((index >= 0) && (index < AFE_MAX_PORTS)) {
+		this_afe.afe_sample_rates[index] = rate;
+
+		if (this_afe.rt_cb)
+			this_afe.dev_acdb_id[index] = this_afe.rt_cb(port_id);
+	}
+
 	mutex_lock(&this_afe.afe_cmd_lock);
 	/* Also send the topology id here: */
 	port_index = afe_get_port_index(port_id);
@@ -2934,6 +3092,20 @@
 	case AFE_PORT_ID_QUINARY_MI2S_RX:
 	case AFE_PORT_ID_QUINARY_MI2S_TX:
 	case AFE_PORT_ID_SENARY_MI2S_TX:
+	case AFE_PORT_ID_INT0_MI2S_RX:
+	case AFE_PORT_ID_INT0_MI2S_TX:
+	case AFE_PORT_ID_INT1_MI2S_RX:
+	case AFE_PORT_ID_INT1_MI2S_TX:
+	case AFE_PORT_ID_INT2_MI2S_RX:
+	case AFE_PORT_ID_INT2_MI2S_TX:
+	case AFE_PORT_ID_INT3_MI2S_RX:
+	case AFE_PORT_ID_INT3_MI2S_TX:
+	case AFE_PORT_ID_INT4_MI2S_RX:
+	case AFE_PORT_ID_INT4_MI2S_TX:
+	case AFE_PORT_ID_INT5_MI2S_RX:
+	case AFE_PORT_ID_INT5_MI2S_TX:
+	case AFE_PORT_ID_INT6_MI2S_RX:
+	case AFE_PORT_ID_INT6_MI2S_TX:
 		cfg_type = AFE_PARAM_ID_I2S_CONFIG;
 		break;
 	case HDMI_RX:
@@ -3026,7 +3198,6 @@
 
 	port_index = afe_get_port_index(port_id);
 	if ((port_index >= 0) && (port_index < AFE_MAX_PORTS)) {
-		this_afe.afe_sample_rates[port_index] = rate;
 		/*
 		 * If afe_port_start() for tx port called before
 		 * rx port, then aanc rx sample rate is zero. So,
@@ -3311,6 +3482,34 @@
 		return IDX_AFE_PORT_ID_QUATERNARY_TDM_RX_7;
 	case AFE_PORT_ID_QUATERNARY_TDM_TX_7:
 		return IDX_AFE_PORT_ID_QUATERNARY_TDM_TX_7;
+	case AFE_PORT_ID_INT0_MI2S_RX:
+		return IDX_AFE_PORT_ID_INT0_MI2S_RX;
+	case AFE_PORT_ID_INT0_MI2S_TX:
+		return IDX_AFE_PORT_ID_INT0_MI2S_TX;
+	case AFE_PORT_ID_INT1_MI2S_RX:
+		return IDX_AFE_PORT_ID_INT1_MI2S_RX;
+	case AFE_PORT_ID_INT1_MI2S_TX:
+		return IDX_AFE_PORT_ID_INT1_MI2S_TX;
+	case AFE_PORT_ID_INT2_MI2S_RX:
+		return IDX_AFE_PORT_ID_INT2_MI2S_RX;
+	case AFE_PORT_ID_INT2_MI2S_TX:
+		return IDX_AFE_PORT_ID_INT2_MI2S_TX;
+	case AFE_PORT_ID_INT3_MI2S_RX:
+		return IDX_AFE_PORT_ID_INT3_MI2S_RX;
+	case AFE_PORT_ID_INT3_MI2S_TX:
+		return IDX_AFE_PORT_ID_INT3_MI2S_TX;
+	case AFE_PORT_ID_INT4_MI2S_RX:
+		return IDX_AFE_PORT_ID_INT4_MI2S_RX;
+	case AFE_PORT_ID_INT4_MI2S_TX:
+		return IDX_AFE_PORT_ID_INT4_MI2S_TX;
+	case AFE_PORT_ID_INT5_MI2S_RX:
+		return IDX_AFE_PORT_ID_INT5_MI2S_RX;
+	case AFE_PORT_ID_INT5_MI2S_TX:
+		return IDX_AFE_PORT_ID_INT5_MI2S_TX;
+	case AFE_PORT_ID_INT6_MI2S_RX:
+		return IDX_AFE_PORT_ID_INT6_MI2S_RX;
+	case AFE_PORT_ID_INT6_MI2S_TX:
+		return IDX_AFE_PORT_ID_INT6_MI2S_TX;
 	default:
 		pr_err("%s: port 0x%x\n", __func__, port_id);
 		return -EINVAL;
@@ -3335,7 +3534,7 @@
 	pr_err("%s: port_id 0x%x rate %d\n", __func__, port_id, rate);
 
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -3360,6 +3559,14 @@
 		pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
 		return -EINVAL;
 	}
+
+	if ((index >= 0) && (index < AFE_MAX_PORTS)) {
+		this_afe.afe_sample_rates[index] = rate;
+
+		if (this_afe.rt_cb)
+			this_afe.dev_acdb_id[index] = this_afe.rt_cb(port_id);
+	}
+
 	/* Also send the topology id here: */
 	afe_send_custom_topology(); /* One time call: only for first time  */
 	afe_send_port_topology_id(port_id);
@@ -3503,7 +3710,7 @@
 	}
 
 	index = q6audio_get_port_index(rx_port);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -3569,7 +3776,7 @@
 		goto fail_cmd;
 	}
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -3672,7 +3879,7 @@
 	}
 
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -3715,7 +3922,7 @@
 		return -EINVAL;
 	}
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -3876,7 +4083,7 @@
 	}
 
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -4162,7 +4369,7 @@
 		rtac_set_afe_handle(this_afe.apr);
 	}
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -4426,7 +4633,7 @@
 	}
 
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -4766,7 +4973,7 @@
 		goto fail_cmd;
 	}
 	index = q6audio_get_port_index(this_afe.dtmf_gen_rx_portid);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		ret = -EINVAL;
@@ -4795,58 +5002,249 @@
 	return ret;
 }
 
-int afe_sidetone(u16 tx_port_id, u16 rx_port_id, u16 enable, uint16_t gain)
+static int afe_sidetone_iir(u16 tx_port_id)
 {
-	struct afe_loopback_cfg_v1 cmd_sidetone;
-	int ret = 0;
+	struct afe_loopback_iir_cfg_v2 iir_sidetone;
+	int ret;
 	int index = 0;
+	uint16_t size = 0;
+	int cal_index = AFE_SIDETONE_IIR_CAL;
+	int iir_pregain = 0;
+	int iir_num_biquad_stages = 0;
+	int iir_enable;
+	struct cal_block_data *cal_block;
+	int mid;
 
-	pr_info("%s: tx_port_id: 0x%x rx_port_id: 0x%x enable:%d gain:%d\n",
-			__func__, tx_port_id, rx_port_id, enable, gain);
-	index = q6audio_get_port_index(rx_port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
-		pr_err("%s: AFE port index[%d] invalid!\n",
-				__func__, index);
-		return -EINVAL;
+	memset(&iir_sidetone, 0, sizeof(iir_sidetone));
+	index = q6audio_get_port_index(tx_port_id);
+	iir_sidetone.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				     APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	iir_sidetone.hdr.pkt_size = sizeof(iir_sidetone);
+	iir_sidetone.hdr.src_port = 0;
+	iir_sidetone.hdr.dest_port = 0;
+	iir_sidetone.hdr.token = index;
+	iir_sidetone.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	iir_sidetone.param.port_id = tx_port_id;
+	iir_sidetone.param.payload_address_lsw = 0x00;
+	iir_sidetone.param.payload_address_msw = 0x00;
+	iir_sidetone.param.mem_map_handle = 0x00;
+
+	if (this_afe.cal_data[cal_index] == NULL) {
+		pr_err("%s: cal data is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
 	}
-	ret = q6audio_validate_port(rx_port_id);
-	if (ret < 0) {
-		pr_err("%s: Invalid port 0x%x %d", __func__, rx_port_id, ret);
-		return -EINVAL;
+	mutex_lock(&this_afe.cal_data[cal_index]->lock);
+	cal_block = cal_utils_get_only_cal_block(this_afe.cal_data[cal_index]);
+	if (cal_block == NULL) {
+		pr_err("%s: cal_block not found\n ", __func__);
+		mutex_unlock(&this_afe.cal_data[cal_index]->lock);
+		ret = -EINVAL;
+		goto done;
 	}
 
+	iir_pregain = ((struct audio_cal_info_sidetone_iir *)
+			cal_block->cal_info)->pregain;
+	iir_enable = ((struct audio_cal_info_sidetone_iir *)
+			cal_block->cal_info)->iir_enable;
+	iir_num_biquad_stages = ((struct audio_cal_info_sidetone_iir *)
+			cal_block->cal_info)->num_biquad_stages;
+	mid = ((struct audio_cal_info_sidetone_iir *)
+			cal_block->cal_info)->mid;
+
+	/*
+	 * calculate the actual size of payload based on no of stages
+	 * enabled in calibration
+	 */
+	size = (MAX_SIDETONE_IIR_DATA_SIZE / MAX_NO_IIR_FILTER_STAGE) *
+		iir_num_biquad_stages;
+	/*
+	 * For an odd number of stages, 2 bytes of padding are
+	 * required at the end of the payload.
+	 */
+	if (iir_num_biquad_stages % 2) {
+		pr_debug("%s: adding 2 to size:%d\n", __func__, size);
+		size = size + 2;
+	}
+	memcpy(&iir_sidetone.st_iir_filter_config_data.iir_config,
+		&((struct audio_cal_info_sidetone_iir *)
+		cal_block->cal_info)->iir_config,
+		sizeof(iir_sidetone.st_iir_filter_config_data.iir_config));
+	mutex_unlock(&this_afe.cal_data[cal_index]->lock);
+
+	/*
+	 * Calculate the payload size for setparams command
+	 */
+	iir_sidetone.param.payload_size = (sizeof(iir_sidetone) -
+				sizeof(struct apr_hdr) -
+				sizeof(struct afe_port_cmd_set_param_v2) -
+				(MAX_SIDETONE_IIR_DATA_SIZE - size));
+
+	pr_debug("%s: payload size :%d\n", __func__,
+		 iir_sidetone.param.payload_size);
+
+	/*
+	 * Set IIR enable params
+	 */
+	iir_sidetone.st_iir_enable_pdata.module_id = mid;
+	iir_sidetone.st_iir_enable_pdata.param_id =
+			AFE_PARAM_ID_ENABLE;
+	iir_sidetone.st_iir_enable_pdata.param_size =
+			sizeof(iir_sidetone.st_iir_mode_enable_data);
+	iir_sidetone.st_iir_mode_enable_data.enable = iir_enable;
+
+	/*
+	 * Set IIR filter config params
+	 */
+	iir_sidetone.st_iir_filter_config_pdata.module_id = mid;
+	iir_sidetone.st_iir_filter_config_pdata.param_id =
+			AFE_PARAM_ID_SIDETONE_IIR_FILTER_CONFIG;
+	iir_sidetone.st_iir_filter_config_pdata.param_size =
+		sizeof(iir_sidetone.st_iir_filter_config_data.num_biquad_stages)
+		+
+		sizeof(iir_sidetone.st_iir_filter_config_data.pregain) + size;
+	iir_sidetone.st_iir_filter_config_pdata.reserved = 0;
+	iir_sidetone.st_iir_filter_config_data.num_biquad_stages =
+			iir_num_biquad_stages;
+	iir_sidetone.st_iir_filter_config_data.pregain = iir_pregain;
+	pr_debug("%s: tx(0x%x)mid(0x%x)iir_en(%d)stg(%d)gain(0x%x)size(%d)\n",
+		  __func__, tx_port_id, mid,
+		  iir_sidetone.st_iir_mode_enable_data.enable,
+		  iir_sidetone.st_iir_filter_config_data.num_biquad_stages,
+		  iir_sidetone.st_iir_filter_config_data.pregain,
+		  iir_sidetone.st_iir_filter_config_pdata.param_size);
+	ret = afe_apr_send_pkt(&iir_sidetone, &this_afe.wait[index]);
+	if (ret)
+		pr_err("%s: AFE sidetone failed for tx_port(0x%x)\n",
+			 __func__, tx_port_id);
+
+done:
+	return ret;
+
+}
+
+static int afe_sidetone(u16 tx_port_id, u16 rx_port_id, bool enable)
+{
+	struct afe_st_loopback_cfg_v1 cmd_sidetone;
+	int ret;
+	int index;
+	int cal_index = AFE_SIDETONE_CAL;
+	int sidetone_gain;
+	int sidetone_enable;
+	struct cal_block_data *cal_block;
+	int mid = 0;
+
+	memset(&cmd_sidetone, 0, sizeof(cmd_sidetone));
+	if (this_afe.cal_data[cal_index] == NULL) {
+		pr_err("%s: cal data is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	mutex_lock(&this_afe.cal_data[cal_index]->lock);
+	cal_block = cal_utils_get_only_cal_block(this_afe.cal_data[cal_index]);
+	if (cal_block == NULL) {
+		pr_err("%s: cal_block not found\n", __func__);
+		mutex_unlock(&this_afe.cal_data[cal_index]->lock);
+		ret = -EINVAL;
+		goto done;
+	}
+	sidetone_gain = ((struct audio_cal_info_sidetone *)
+			 cal_block->cal_info)->gain;
+	sidetone_enable = ((struct audio_cal_info_sidetone *)
+			 cal_block->cal_info)->enable;
+	mid = ((struct audio_cal_info_sidetone *)
+			 cal_block->cal_info)->mid;
+	mutex_unlock(&this_afe.cal_data[cal_index]->lock);
+
+	index = q6audio_get_port_index(tx_port_id);
 	cmd_sidetone.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
 				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
 	cmd_sidetone.hdr.pkt_size = sizeof(cmd_sidetone);
 	cmd_sidetone.hdr.src_port = 0;
 	cmd_sidetone.hdr.dest_port = 0;
-	cmd_sidetone.hdr.token = 0;
+	cmd_sidetone.hdr.token = index;
 	cmd_sidetone.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
-	/* should it be rx or tx port id ?? , bharath*/
 	cmd_sidetone.param.port_id = tx_port_id;
-	/* size of data param & payload */
 	cmd_sidetone.param.payload_size = (sizeof(cmd_sidetone) -
 			sizeof(struct apr_hdr) -
 			sizeof(struct afe_port_cmd_set_param_v2));
 	cmd_sidetone.param.payload_address_lsw = 0x00;
 	cmd_sidetone.param.payload_address_msw = 0x00;
 	cmd_sidetone.param.mem_map_handle = 0x00;
-	cmd_sidetone.pdata.module_id = AFE_MODULE_LOOPBACK;
-	cmd_sidetone.pdata.param_id = AFE_PARAM_ID_LOOPBACK_CONFIG;
-	/* size of actual payload only */
-	cmd_sidetone.pdata.param_size =  cmd_sidetone.param.payload_size -
-				sizeof(struct afe_port_param_data_v2);
+	cmd_sidetone.gain_pdata.module_id = AFE_MODULE_LOOPBACK;
+	cmd_sidetone.gain_pdata.param_id = AFE_PARAM_ID_LOOPBACK_GAIN_PER_PATH;
+	/*
+	 * size of actual payload only
+	 */
+	cmd_sidetone.gain_pdata.param_size = sizeof(
+					     struct afe_loopback_sidetone_gain);
+	cmd_sidetone.gain_data.rx_port_id = rx_port_id;
+	cmd_sidetone.gain_data.gain = sidetone_gain;
 
-	cmd_sidetone.loopback_cfg_minor_version =
+	cmd_sidetone.cfg_pdata.module_id = AFE_MODULE_LOOPBACK;
+	cmd_sidetone.cfg_pdata.param_id = AFE_PARAM_ID_LOOPBACK_CONFIG;
+	/*
+	 * size of actual payload only
+	 */
+	cmd_sidetone.cfg_pdata.param_size = sizeof(struct loopback_cfg_data);
+	cmd_sidetone.cfg_data.loopback_cfg_minor_version =
 					AFE_API_VERSION_LOOPBACK_CONFIG;
-	cmd_sidetone.dst_port_id = rx_port_id;
-	cmd_sidetone.routing_mode = LB_MODE_SIDETONE;
-	cmd_sidetone.enable = enable;
+	cmd_sidetone.cfg_data.dst_port_id = rx_port_id;
+	cmd_sidetone.cfg_data.routing_mode = LB_MODE_SIDETONE;
+	cmd_sidetone.cfg_data.enable = ((enable == 1) ? sidetone_enable : 0);
+
+	pr_debug("%s rx(0x%x) tx(0x%x) enable(%d) mid(0x%x) gain(%d) sidetone_enable(%d)\n",
+		  __func__, rx_port_id, tx_port_id,
+		  enable, mid, sidetone_gain, sidetone_enable);
 
 	ret = afe_apr_send_pkt(&cmd_sidetone, &this_afe.wait[index]);
 	if (ret)
-		pr_err("%s: sidetone failed tx_port:0x%x rx_port:0x%x ret%d\n",
-		__func__, tx_port_id, rx_port_id, ret);
+		pr_err("%s: AFE sidetone send failed for tx_port:%d rx_port:%d ret:%d\n",
+			__func__, tx_port_id, rx_port_id, ret);
+done:
+	return ret;
+}
+
+int afe_sidetone_enable(u16 tx_port_id, u16 rx_port_id, bool enable)
+{
+	int ret;
+	int index;
+
+	index = q6audio_get_port_index(rx_port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		ret = -EINVAL;
+		goto done;
+	}
+	if (q6audio_validate_port(rx_port_id) < 0) {
+		pr_err("%s: Invalid port 0x%x\n",
+				__func__, rx_port_id);
+		ret = -EINVAL;
+		goto done;
+	}
+	index = q6audio_get_port_index(tx_port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: AFE port index[%d] invalid!\n",
+				__func__, index);
+		ret = -EINVAL;
+		goto done;
+	}
+	if (q6audio_validate_port(tx_port_id) < 0) {
+		pr_err("%s: Invalid port 0x%x\n",
+				__func__, tx_port_id);
+		ret = -EINVAL;
+		goto done;
+	}
+	if (enable) {
+		ret = afe_sidetone_iir(tx_port_id);
+		if (ret)
+			goto done;
+	}
+
+	ret = afe_sidetone(tx_port_id, rx_port_id, enable);
+
+done:
 	return ret;
 }
 
@@ -4979,6 +5377,20 @@
 	case AFE_PORT_ID_QUATERNARY_TDM_TX_6:
 	case AFE_PORT_ID_QUATERNARY_TDM_RX_7:
 	case AFE_PORT_ID_QUATERNARY_TDM_TX_7:
+	case AFE_PORT_ID_INT0_MI2S_RX:
+	case AFE_PORT_ID_INT1_MI2S_RX:
+	case AFE_PORT_ID_INT2_MI2S_RX:
+	case AFE_PORT_ID_INT3_MI2S_RX:
+	case AFE_PORT_ID_INT4_MI2S_RX:
+	case AFE_PORT_ID_INT5_MI2S_RX:
+	case AFE_PORT_ID_INT6_MI2S_RX:
+	case AFE_PORT_ID_INT0_MI2S_TX:
+	case AFE_PORT_ID_INT1_MI2S_TX:
+	case AFE_PORT_ID_INT2_MI2S_TX:
+	case AFE_PORT_ID_INT3_MI2S_TX:
+	case AFE_PORT_ID_INT4_MI2S_TX:
+	case AFE_PORT_ID_INT5_MI2S_TX:
+	case AFE_PORT_ID_INT6_MI2S_TX:
 	{
 		ret = 0;
 		break;
@@ -5099,7 +5511,7 @@
 
 	port_id = q6audio_convert_virtual_to_portid(port_id);
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -5130,6 +5542,7 @@
 	if ((port_index >= 0) && (port_index < AFE_MAX_PORTS)) {
 		this_afe.afe_sample_rates[port_index] = 0;
 		this_afe.topology[port_index] = 0;
+		this_afe.dev_acdb_id[port_index] = 0;
 	} else {
 		pr_err("%s: port %d\n", __func__, port_index);
 		ret = -EINVAL;
@@ -5257,7 +5670,7 @@
 		return ret;
 	}
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -5423,7 +5836,7 @@
 	int ret = 0;
 
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -5456,7 +5869,7 @@
 		return ret;
 	}
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -5537,7 +5950,7 @@
 	int ret = 0;
 
 	index = q6audio_get_port_index(port_id);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		return -EINVAL;
@@ -5798,6 +6211,88 @@
 	return ret;
 }
 
+int afe_get_av_dev_drift(struct afe_param_id_dev_timing_stats *timing_stats,
+			 u16 port)
+{
+	int ret = -EINVAL;
+	int index = 0;
+	struct afe_av_dev_drift_get_param av_dev_drift;
+
+	if (!timing_stats) {
+		pr_err("%s: Invalid params\n", __func__);
+		goto exit;
+	}
+
+	ret = q6audio_validate_port(port);
+	if (ret < 0) {
+		pr_err("%s: invalid port 0x%x ret %d\n", __func__, port, ret);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	index = q6audio_get_port_index(port);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: Invalid AFE port index[%d]\n",
+				__func__, index);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	memset(&av_dev_drift, 0, sizeof(struct afe_av_dev_drift_get_param));
+
+	av_dev_drift.hdr.hdr_field =
+		APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	av_dev_drift.hdr.pkt_size = sizeof(av_dev_drift);
+	av_dev_drift.hdr.src_port = 0;
+	av_dev_drift.hdr.dest_port = 0;
+	av_dev_drift.hdr.token = index;
+	av_dev_drift.hdr.opcode =  AFE_PORT_CMD_GET_PARAM_V2;
+	av_dev_drift.get_param.mem_map_handle = 0;
+	av_dev_drift.get_param.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+	av_dev_drift.get_param.param_id = AFE_PARAM_ID_DEV_TIMING_STATS;
+	av_dev_drift.get_param.payload_address_lsw = 0;
+	av_dev_drift.get_param.payload_address_msw = 0;
+	av_dev_drift.get_param.payload_size = sizeof(av_dev_drift)
+		- sizeof(av_dev_drift.get_param) - sizeof(av_dev_drift.hdr);
+	av_dev_drift.get_param.port_id = q6audio_get_port_id(port);
+	av_dev_drift.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+	av_dev_drift.pdata.param_id = AFE_PARAM_ID_DEV_TIMING_STATS;
+	av_dev_drift.pdata.param_size = sizeof(av_dev_drift.timing_stats);
+	atomic_set(&this_afe.status, 0);
+	atomic_set(&this_afe.state, 1);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *)&av_dev_drift);
+	if (ret < 0) {
+		pr_err("%s: get param port 0x%x param id[0x%x] failed %d\n",
+			__func__, port, av_dev_drift.get_param.param_id, ret);
+		goto exit;
+	}
+
+	ret = wait_event_timeout(this_afe.wait[index],
+			(atomic_read(&this_afe.state) == 0),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (atomic_read(&this_afe.status) > 0) {
+		pr_err("%s: config cmd failed [%s]\n",
+				__func__, adsp_err_get_err_str(
+					atomic_read(&this_afe.status)));
+		ret = adsp_err_get_lnx_err_code(
+				atomic_read(&this_afe.status));
+		goto exit;
+	}
+
+	memcpy(timing_stats, &this_afe.av_dev_drift_resp.timing_stats,
+	       sizeof(this_afe.av_dev_drift_resp.timing_stats));
+	ret = 0;
+exit:
+	return ret;
+}
+
 int afe_spk_prot_get_calib_data(struct afe_spkr_prot_get_vi_calib *calib_resp)
 {
 	int ret = -EINVAL;
@@ -5817,7 +6312,7 @@
 		goto fail_cmd;
 	}
 	index = q6audio_get_port_index(port);
-	if (index < 0 || index > AFE_MAX_PORTS) {
+	if (index < 0 || index >= AFE_MAX_PORTS) {
 		pr_err("%s: AFE port index[%d] invalid!\n",
 				__func__, index);
 		ret = -EINVAL;
@@ -5946,6 +6441,9 @@
 	case AFE_SIDETONE_CAL_TYPE:
 		ret = AFE_SIDETONE_CAL;
 		break;
+	case AFE_SIDETONE_IIR_CAL_TYPE:
+		ret = AFE_SIDETONE_IIR_CAL;
+		break;
 	case AFE_TOPOLOGY_CAL_TYPE:
 		ret = AFE_TOPOLOGY_CAL;
 		break;
@@ -6366,6 +6864,7 @@
 	}
 
 
+	mutex_lock(&this_afe.afe_cmd_lock);
 	atomic_set(&this_afe.mem_map_cal_index, cal_index);
 	ret = afe_cmd_memory_map(cal_block->cal_data.paddr,
 			cal_block->map_data.map_size);
@@ -6378,10 +6877,12 @@
 			__func__,
 			&cal_block->cal_data.paddr,
 			cal_block->map_data.map_size);
+		mutex_unlock(&this_afe.afe_cmd_lock);
 		goto done;
 	}
 	cal_block->map_data.q6map_handle = atomic_read(&this_afe.
 		mem_map_cal_handles[cal_index]);
+	mutex_unlock(&this_afe.afe_cmd_lock);
 done:
 	return ret;
 }
@@ -6473,6 +6974,11 @@
 		afe_set_cal, NULL, NULL} },
 		{NULL, NULL, cal_utils_match_buf_num} },
 
+		{{AFE_SIDETONE_IIR_CAL_TYPE,
+		{NULL, NULL, NULL,
+		afe_set_cal, NULL, NULL} },
+		{NULL, NULL, cal_utils_match_buf_num} },
+
 		{{AFE_TOPOLOGY_CAL_TYPE,
 		{NULL, NULL, NULL,
 		afe_set_cal, NULL, NULL} },
@@ -6602,6 +7108,8 @@
 	mutex_init(&this_afe.afe_cmd_lock);
 	for (i = 0; i < AFE_MAX_PORTS; i++) {
 		this_afe.afe_cal_mode[i] = AFE_CAL_MODE_DEFAULT;
+		this_afe.afe_sample_rates[i] = 0;
+		this_afe.dev_acdb_id[i] = 0;
 		init_waitqueue_head(&this_afe.wait[i]);
 	}
 	wakeup_source_init(&wl.ws, "spkr-prot");
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 779bdd5..b52c83b 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -1703,6 +1703,7 @@
 		case ASM_STREAM_CMD_OPEN_PUSH_MODE_READ:
 		case ASM_STREAM_CMD_OPEN_READWRITE_V2:
 		case ASM_STREAM_CMD_OPEN_LOOPBACK_V2:
+		case ASM_STREAM_CMD_OPEN_TRANSCODE_LOOPBACK:
 		case ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2:
 		case ASM_STREAM_CMD_SET_ENCDEC_PARAM:
 		case ASM_DATA_CMD_REMOVE_INITIAL_SILENCE:
@@ -2431,13 +2432,14 @@
  * @ac: Client session handle
  * @format: encoder format
  * @bits_per_sample: bit width of capture session
+ * @ts_mode: timestamp mode
  */
 int q6asm_open_read_v4(struct audio_client *ac, uint32_t format,
-			uint16_t bits_per_sample)
+			uint16_t bits_per_sample, bool ts_mode)
 {
 	return __q6asm_open_read(ac, format, bits_per_sample,
 				 PCM_MEDIA_FORMAT_V4 /*media fmt block ver*/,
-				 true/*ts_mode*/);
+				 ts_mode);
 }
 EXPORT_SYMBOL(q6asm_open_read_v4);
 
@@ -2478,6 +2480,10 @@
 	case FORMAT_DSD:
 		open.fmt_id = ASM_MEDIA_FMT_DSD;
 		break;
+	case FORMAT_GEN_COMPR:
+		open.fmt_id = ASM_MEDIA_FMT_GENERIC_COMPRESSED;
+		break;
+
 	default:
 		pr_err("%s: Invalid format[%d]\n", __func__, format);
 		rc = -EINVAL;
@@ -2487,7 +2493,8 @@
 	 * stream is not IEC 61937 or IEC 60958 packetizied
 	 */
 	if (passthrough_flag == COMPRESSED_PASSTHROUGH ||
-		passthrough_flag == COMPRESSED_PASSTHROUGH_DSD) {
+		passthrough_flag == COMPRESSED_PASSTHROUGH_DSD ||
+		passthrough_flag == COMPRESSED_PASSTHROUGH_GEN) {
 		open.flags = 0x0;
 		pr_debug("%s: Flag 0 COMPRESSED_PASSTHROUGH\n", __func__);
 	} else if (passthrough_flag == COMPRESSED_PASSTHROUGH_CONVERT) {
@@ -2651,6 +2658,12 @@
 	case FORMAT_DSD:
 		open.dec_fmt_id = ASM_MEDIA_FMT_DSD;
 		break;
+	case FORMAT_APTX:
+		open.dec_fmt_id = ASM_MEDIA_FMT_APTX;
+		break;
+	case FORMAT_GEN_COMPR:
+		open.dec_fmt_id = ASM_MEDIA_FMT_GENERIC_COMPRESSED;
+		break;
 	default:
 		pr_err("%s: Invalid format 0x%x\n", __func__, format);
 		rc = -EINVAL;
@@ -2868,6 +2881,7 @@
 		break;
 	case FORMAT_DSD:
 		open.dec_fmt_id = ASM_MEDIA_FMT_DSD;
+		break;
 	case FORMAT_G711_ALAW_FS:
 		open.dec_fmt_id = ASM_MEDIA_FMT_G711_ALAW_FS;
 		break;
@@ -2973,7 +2987,6 @@
 int q6asm_open_loopback_v2(struct audio_client *ac, uint16_t bits_per_sample)
 {
 	int rc = 0x00;
-	struct asm_stream_cmd_open_loopback_v2 open;
 
 	if (ac == NULL) {
 		pr_err("%s: APR handle NULL\n", __func__);
@@ -2985,29 +2998,67 @@
 	}
 	pr_debug("%s: session[%d]\n", __func__, ac->session);
 
-	q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
-	atomic_set(&ac->cmd_state, -1);
-	open.hdr.opcode = ASM_STREAM_CMD_OPEN_LOOPBACK_V2;
+	if (ac->perf_mode == LOW_LATENCY_PCM_MODE) {
+		struct asm_stream_cmd_open_transcode_loopback_t open;
 
-	open.mode_flags = 0;
-	open.src_endpointype = 0;
-	open.sink_endpointype = 0;
-	/* source endpoint : matrix */
-	open.postprocopo_id = q6asm_get_asm_topology_cal();
+		q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+		atomic_set(&ac->cmd_state, -1);
+		open.hdr.opcode = ASM_STREAM_CMD_OPEN_TRANSCODE_LOOPBACK;
 
-	ac->app_type = q6asm_get_asm_app_type_cal();
-	ac->topology = open.postprocopo_id;
-	open.bits_per_sample = bits_per_sample;
-	open.reserved = 0;
+		open.mode_flags = 0;
+		open.src_endpoint_type = 0;
+		open.sink_endpoint_type = 0;
+		open.src_format_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+		open.sink_format_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+		/* source endpoint : matrix */
+		open.audproc_topo_id = q6asm_get_asm_topology_cal();
 
-	rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
-	if (rc < 0) {
-		pr_err("%s: open failed op[0x%x]rc[%d]\n", __func__,
-				open.hdr.opcode, rc);
-		rc = -EINVAL;
-		goto fail_cmd;
+		ac->app_type = q6asm_get_asm_app_type_cal();
+		if (ac->perf_mode == LOW_LATENCY_PCM_MODE)
+			open.mode_flags |= ASM_LOW_LATENCY_STREAM_SESSION;
+		else
+			open.mode_flags |= ASM_LEGACY_STREAM_SESSION;
+		ac->topology = open.audproc_topo_id;
+		open.bits_per_sample = bits_per_sample;
+		open.reserved = 0;
+		pr_debug("%s: opening a transcode_loopback with mode_flags =[%d] session[%d]\n",
+				__func__, open.mode_flags, ac->session);
+
+		rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+		if (rc < 0) {
+			pr_err("%s: open failed op[0x%x]rc[%d]\n",
+					__func__, open.hdr.opcode, rc);
+			rc = -EINVAL;
+			goto fail_cmd;
+		}
+	} else {/*if(ac->perf_mode == LEGACY_PCM_MODE)*/
+		struct asm_stream_cmd_open_loopback_v2 open;
+
+		q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+		atomic_set(&ac->cmd_state, -1);
+		open.hdr.opcode = ASM_STREAM_CMD_OPEN_LOOPBACK_V2;
+
+		open.mode_flags = 0;
+		open.src_endpointype = 0;
+		open.sink_endpointype = 0;
+		/* source endpoint : matrix */
+		open.postprocopo_id = q6asm_get_asm_topology_cal();
+
+		ac->app_type = q6asm_get_asm_app_type_cal();
+		ac->topology = open.postprocopo_id;
+		open.bits_per_sample = bits_per_sample;
+		open.reserved = 0;
+		pr_debug("%s: opening a loopback_v2 with mode_flags =[%d] session[%d]\n",
+				__func__, open.mode_flags, ac->session);
+
+		rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+		if (rc < 0) {
+			pr_err("%s: open failed op[0x%x]rc[%d]\n",
+					__func__, open.hdr.opcode, rc);
+			rc = -EINVAL;
+			goto fail_cmd;
+		}
 	}
-
 	rc = wait_event_timeout(ac->cmd_wait,
 			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
 	if (!rc) {
@@ -5141,6 +5192,82 @@
 }
 EXPORT_SYMBOL(q6asm_media_format_block_multi_ch_pcm_v4);
 
+/*
+ * q6asm_media_format_block_gen_compr - set up generic compress format params
+ *
+ * @ac: Client session handle
+ * @rate: sample rate
+ * @channels: number of channels
+ * @use_default_chmap: true if default channel map to be used
+ * @channel_map: input channel map
+ * @bits_per_sample: bit width of gen compress stream
+ */
+int q6asm_media_format_block_gen_compr(struct audio_client *ac,
+				uint32_t rate, uint32_t channels,
+				bool use_default_chmap, char *channel_map,
+				uint16_t bits_per_sample)
+{
+	struct asm_generic_compressed_fmt_blk_t fmt;
+	u8 *channel_mapping;
+	int rc = 0;
+
+	pr_debug("%s: session[%d]rate[%d]ch[%d]bps[%d]\n",
+		 __func__, ac->session, rate,
+		 channels, bits_per_sample);
+
+	memset(&fmt, 0, sizeof(fmt));
+	q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+					sizeof(fmt.fmt_blk);
+	fmt.num_channels = channels;
+	fmt.bits_per_sample = bits_per_sample;
+	fmt.sampling_rate = rate;
+
+	channel_mapping = fmt.channel_mapping;
+
+	memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+	if (use_default_chmap) {
+		if (q6asm_map_channels(channel_mapping, channels, false)) {
+			pr_err("%s: map channels failed %d\n",
+				__func__, channels);
+			return -EINVAL;
+		}
+	} else {
+		memcpy(channel_mapping, channel_map,
+		       PCM_FORMAT_MAX_NUM_CHANNEL);
+	}
+
+	atomic_set(&ac->cmd_state, -1);
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s: Comamnd open failed %d\n", __func__, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for format update\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+EXPORT_SYMBOL(q6asm_media_format_block_gen_compr);
+
 static int __q6asm_media_format_block_multi_aac(struct audio_client *ac,
 				struct asm_aac_cfg *cfg, int stream_id)
 {
@@ -5727,6 +5854,57 @@
 }
 EXPORT_SYMBOL(q6asm_media_format_block_dsd);
 
+int q6asm_stream_media_format_block_aptx_dec(struct audio_client *ac,
+						uint32_t srate, int stream_id)
+{
+	struct asm_aptx_dec_fmt_blk_v2 aptx_fmt;
+	int rc = 0;
+
+	if (!ac->session) {
+		pr_err("%s: ac session invalid\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	pr_debug("%s :session[%d] rate[%d] stream_id[%d]\n",
+		__func__, ac->session, srate, stream_id);
+
+	q6asm_stream_add_hdr(ac, &aptx_fmt.hdr, sizeof(aptx_fmt), TRUE,
+				stream_id);
+	atomic_set(&ac->cmd_state, -1);
+
+	aptx_fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	aptx_fmt.fmtblk.fmt_blk_size = sizeof(aptx_fmt) - sizeof(aptx_fmt.hdr) -
+						sizeof(aptx_fmt.fmtblk);
+
+	aptx_fmt.sample_rate = srate;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &aptx_fmt);
+	if (rc < 0) {
+		pr_err("%s :Comamnd media format update failed %d\n",
+				__func__, rc);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+				(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s :timeout. waited for FORMAT_UPDATE\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
 static int __q6asm_ds1_set_endp_params(struct audio_client *ac, int param_id,
 				int param_value, int stream_id)
 {
@@ -6746,6 +6924,69 @@
 	return __q6asm_set_volume(ac, volume, instance);
 }
 
+int q6asm_set_aptx_dec_bt_addr(struct audio_client *ac,
+				struct aptx_dec_bt_addr_cfg *cfg)
+{
+	struct aptx_dec_bt_dev_addr paylod;
+	int sz = 0;
+	int rc = 0;
+
+	pr_debug("%s: BT addr nap %d, uap %d, lap %d\n", __func__, cfg->nap,
+			cfg->uap, cfg->lap);
+
+	if (ac == NULL) {
+		pr_err("%s: AC handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	sz = sizeof(struct aptx_dec_bt_dev_addr);
+	q6asm_add_hdr_async(ac, &paylod.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	paylod.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	paylod.encdec.param_id = APTX_DECODER_BT_ADDRESS;
+	paylod.encdec.param_size = sz - sizeof(paylod.hdr)
+					- sizeof(paylod.encdec);
+	paylod.bt_addr_cfg.lap = cfg->lap;
+	paylod.bt_addr_cfg.uap = cfg->uap;
+	paylod.bt_addr_cfg.nap = cfg->nap;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &paylod);
+	if (rc < 0) {
+		pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
+				__func__, paylod.encdec.param_id, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
+			paylod.encdec.param_id);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)),
+				paylod.encdec.param_id);
+		rc = adsp_err_get_lnx_err_code(
+			atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	pr_debug("%s: set BT addr is success\n", __func__);
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
 int q6asm_set_softpause(struct audio_client *ac,
 			struct asm_softpause_params *pause_param)
 {
@@ -7187,7 +7428,7 @@
 	}
 
 	q6asm_stream_add_hdr_async(
-			ac, &write.hdr, sizeof(write), FALSE, ac->stream_id);
+			ac, &write.hdr, sizeof(write), TRUE, ac->stream_id);
 	port = &ac->port[IN];
 	ab = &port->buf[port->dsp_buf];
 
@@ -7208,9 +7449,13 @@
 	else if (ac->io_mode == io_compressed ||
 			ac->io_mode == io_compressed_stream)
 		lbuf_phys_addr = (param->paddr - param->metadata_len);
-	else
-		lbuf_phys_addr = param->paddr;
-
+	else {
+		if (param->flags & SET_TIMESTAMP)
+			lbuf_phys_addr = param->paddr -
+				sizeof(struct snd_codec_metadata);
+		else
+			lbuf_phys_addr = param->paddr;
+	}
 	dev_vdbg(ac->dev, "%s: token[0x%x], buf_addr[%pK], buf_size[0x%x], ts_msw[0x%x], ts_lsw[0x%x], lbuf_phys_addr: 0x[%pK]\n",
 			__func__,
 			write.hdr.token, &param->paddr,
@@ -7348,7 +7593,7 @@
 				   0, /* Stream ID is NA */
 				   port->dsp_buf,
 				   0, /* Direction flag is NA */
-				   WAIT_CMD);
+				   NO_WAIT_CMD);
 		write.hdr.opcode = ASM_DATA_CMD_WRITE_V2;
 		write.buf_addr_lsw = lower_32_bits(ab->phys);
 		write.buf_addr_msw = msm_audio_populate_upper_32_bits(ab->phys);
@@ -7427,7 +7672,7 @@
 				   0, /* Stream ID is NA */
 				   port->dsp_buf,
 				   0, /* Direction flag is NA */
-				   WAIT_CMD);
+				   NO_WAIT_CMD);
 
 		write.hdr.opcode = ASM_DATA_CMD_WRITE_V2;
 		write.buf_addr_lsw = lower_32_bits(ab->phys);
@@ -7676,16 +7921,18 @@
 	matrix.param.data_payload_addr_lsw = 0;
 	matrix.param.data_payload_addr_msw = 0;
 	matrix.param.mem_map_handle = 0;
-	matrix.param.data_payload_size = sizeof(matrix) -
-			sizeof(matrix.hdr) - sizeof(matrix.param);
+	matrix.param.data_payload_size =
+		sizeof(struct asm_stream_param_data_v2) +
+		sizeof(struct asm_session_mtmx_strtr_param_window_v2_t);
 	matrix.param.direction = 0; /* RX */
 	matrix.data.module_id = ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC;
 	matrix.data.param_id = param_id;
-	matrix.data.param_size = matrix.param.data_payload_size -
-			sizeof(matrix.data);
+	matrix.data.param_size =
+		sizeof(struct asm_session_mtmx_strtr_param_window_v2_t);
 	matrix.data.reserved = 0;
-	matrix.window_lsw = window_param->window_lsw;
-	matrix.window_msw = window_param->window_msw;
+	memcpy(&(matrix.config.window_param),
+	       window_param,
+	       sizeof(struct asm_session_mtmx_strtr_param_window_v2_t));
 
 	rc = apr_send_pkt(ac->apr, (uint32_t *) &matrix);
 	if (rc < 0) {
@@ -7715,7 +7962,177 @@
 	rc = 0;
 fail_cmd:
 	return rc;
-};
+}
+
+int q6asm_send_mtmx_strtr_render_mode(struct audio_client *ac,
+		uint32_t render_mode)
+{
+	struct asm_mtmx_strtr_params matrix;
+	struct asm_session_mtmx_strtr_param_render_mode_t render_param;
+	int sz = 0;
+	int rc  = 0;
+
+	pr_debug("%s: render mode is %d\n", __func__, render_mode);
+
+	if (!ac) {
+		pr_err("%s: audio client handle is NULL\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (ac->apr == NULL) {
+		pr_err("%s: ac->apr is NULL\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if ((render_mode != ASM_SESSION_MTMX_STRTR_PARAM_RENDER_DEFAULT) &&
+	    (render_mode != ASM_SESSION_MTMX_STRTR_PARAM_RENDER_LOCAL_STC)) {
+		pr_err("%s: Invalid render mode %d\n", __func__, render_mode);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	memset(&render_param, 0,
+	       sizeof(struct asm_session_mtmx_strtr_param_render_mode_t));
+	render_param.flags = render_mode;
+
+	memset(&matrix, 0, sizeof(struct asm_mtmx_strtr_params));
+	sz = sizeof(struct asm_mtmx_strtr_params);
+	q6asm_add_hdr(ac, &matrix.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	matrix.hdr.opcode = ASM_SESSION_CMD_SET_MTMX_STRTR_PARAMS_V2;
+
+	matrix.param.data_payload_addr_lsw = 0;
+	matrix.param.data_payload_addr_msw = 0;
+	matrix.param.mem_map_handle = 0;
+	matrix.param.data_payload_size =
+		sizeof(struct asm_stream_param_data_v2) +
+		sizeof(struct asm_session_mtmx_strtr_param_render_mode_t);
+	matrix.param.direction = 0; /* RX */
+	matrix.data.module_id = ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC;
+	matrix.data.param_id = ASM_SESSION_MTMX_STRTR_PARAM_RENDER_MODE_CMD;
+	matrix.data.param_size =
+		sizeof(struct asm_session_mtmx_strtr_param_render_mode_t);
+	matrix.data.reserved = 0;
+	memcpy(&(matrix.config.render_param),
+	       &render_param,
+	       sizeof(struct asm_session_mtmx_strtr_param_render_mode_t));
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &matrix);
+	if (rc < 0) {
+		pr_err("%s: Render mode send failed paramid [0x%x]\n",
+			__func__, matrix.data.param_id);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, Render mode send paramid [0x%x]\n",
+			__func__, matrix.data.param_id);
+		rc = -ETIMEDOUT;
+		goto exit;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto exit;
+	}
+	rc = 0;
+exit:
+	return rc;
+}
+
+int q6asm_send_mtmx_strtr_clk_rec_mode(struct audio_client *ac,
+		uint32_t clk_rec_mode)
+{
+	struct asm_mtmx_strtr_params matrix;
+	struct asm_session_mtmx_strtr_param_clk_rec_t clk_rec_param;
+	int sz = 0;
+	int rc  = 0;
+
+	pr_debug("%s: clk rec mode is %d\n", __func__, clk_rec_mode);
+
+	if (!ac) {
+		pr_err("%s: audio client handle is NULL\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (ac->apr == NULL) {
+		pr_err("%s: ac->apr is NULL\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if ((clk_rec_mode != ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_NONE) &&
+	    (clk_rec_mode != ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_AUTO)) {
+		pr_err("%s: Invalid clk rec mode %d\n", __func__, clk_rec_mode);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	memset(&clk_rec_param, 0,
+	       sizeof(struct asm_session_mtmx_strtr_param_clk_rec_t));
+	clk_rec_param.flags = clk_rec_mode;
+
+	memset(&matrix, 0, sizeof(struct asm_mtmx_strtr_params));
+	sz = sizeof(struct asm_mtmx_strtr_params);
+	q6asm_add_hdr(ac, &matrix.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	matrix.hdr.opcode = ASM_SESSION_CMD_SET_MTMX_STRTR_PARAMS_V2;
+
+	matrix.param.data_payload_addr_lsw = 0;
+	matrix.param.data_payload_addr_msw = 0;
+	matrix.param.mem_map_handle = 0;
+	matrix.param.data_payload_size =
+		sizeof(struct asm_stream_param_data_v2) +
+		sizeof(struct asm_session_mtmx_strtr_param_clk_rec_t);
+	matrix.param.direction = 0; /* RX */
+	matrix.data.module_id = ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC;
+	matrix.data.param_id = ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_CMD;
+	matrix.data.param_size =
+		sizeof(struct asm_session_mtmx_strtr_param_clk_rec_t);
+	matrix.data.reserved = 0;
+	memcpy(&(matrix.config.clk_rec_param),
+	       &clk_rec_param,
+	       sizeof(struct asm_session_mtmx_strtr_param_clk_rec_t));
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &matrix);
+	if (rc < 0) {
+		pr_err("%s: clk rec mode send failed paramid [0x%x]\n",
+			__func__, matrix.data.param_id);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, clk rec mode send paramid [0x%x]\n",
+			__func__, matrix.data.param_id);
+		rc = -ETIMEDOUT;
+		goto exit;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto exit;
+	}
+	rc = 0;
+exit:
+	return rc;
+}
 
 static int __q6asm_cmd(struct audio_client *ac, int cmd, uint32_t stream_id)
 {
@@ -7875,7 +8292,7 @@
 				   stream_id,
 				   0, /* Buffer index is NA */
 				   0, /* Direction flag is NA */
-				   WAIT_CMD);
+				   NO_WAIT_CMD);
 
 	pr_debug("%s: token = 0x%x, stream_id  %d, session 0x%x\n",
 			__func__, hdr.token, stream_id, ac->session);
@@ -7939,7 +8356,7 @@
 		return -EINVAL;
 	}
 	pr_debug("%s: session[%d]\n", __func__, ac->session);
-	q6asm_stream_add_hdr_async(ac, &silence.hdr, sizeof(silence), FALSE,
+	q6asm_stream_add_hdr_async(ac, &silence.hdr, sizeof(silence), TRUE,
 			stream_id);
 
 	/*
@@ -7953,7 +8370,7 @@
 				   stream_id,
 				   0, /* Buffer index is NA */
 				   0, /* Direction flag is NA */
-				   WAIT_CMD);
+				   NO_WAIT_CMD);
 	pr_debug("%s: token = 0x%x, stream_id  %d, session 0x%x\n",
 			__func__, silence.hdr.token, stream_id, ac->session);
 
@@ -8169,14 +8586,17 @@
 
 int q6asm_get_asm_topology(int session_id)
 {
-	int topology;
+	int topology = -EINVAL;
 
 	if (session_id <= 0 || session_id > ASM_ACTIVE_STREAMS_ALLOWED) {
 		pr_err("%s: invalid session_id = %d\n", __func__, session_id);
-		topology = -EINVAL;
 		goto done;
 	}
-
+	if (session[session_id] == NULL) {
+		pr_err("%s: session not created for session id = %d\n",
+		       __func__, session_id);
+		goto done;
+	}
 	topology = session[session_id]->topology;
 done:
 	return topology;
@@ -8184,14 +8604,17 @@
 
 int q6asm_get_asm_app_type(int session_id)
 {
-	int app_type;
+	int app_type = -EINVAL;
 
 	if (session_id <= 0 || session_id > ASM_ACTIVE_STREAMS_ALLOWED) {
 		pr_err("%s: invalid session_id = %d\n", __func__, session_id);
-		app_type = -EINVAL;
 		goto done;
 	}
-
+	if (session[session_id] == NULL) {
+		pr_err("%s: session not created for session id = %d\n",
+		       __func__, session_id);
+		goto done;
+	}
 	app_type = session[session_id]->app_type;
 done:
 	return app_type;
@@ -8310,7 +8733,7 @@
 	q6asm_add_hdr_async(ac, &hdr, (sizeof(struct apr_hdr) +
 		sizeof(struct asm_stream_cmd_set_pp_params_v2)), TRUE);
 
-	atomic_set(&ac->cmd_state, 1);
+	atomic_set(&ac->cmd_state, -1);
 	hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
 	payload_params.data_payload_addr_lsw =
 			lower_32_bits(cal_block->cal_data.paddr);
@@ -8336,13 +8759,13 @@
 		goto free;
 	}
 	rc = wait_event_timeout(ac->cmd_wait,
-				(atomic_read(&ac->cmd_state) <= 0), 5 * HZ);
+				(atomic_read(&ac->cmd_state) >= 0), 5 * HZ);
 	if (!rc) {
 		pr_err("%s: timeout, audio audstrm cal send\n", __func__);
 		rc = -ETIMEDOUT;
 		goto free;
 	}
-	if (atomic_read(&ac->cmd_state) < 0) {
+	if (atomic_read(&ac->cmd_state) > 0) {
 		pr_err("%s: DSP returned error[%d] audio audstrm cal send\n",
 				__func__, atomic_read(&ac->cmd_state));
 		rc = -EINVAL;
diff --git a/sound/soc/msm/qdsp6v2/q6core.c b/sound/soc/msm/qdsp6v2/q6core.c
index f544393..d6ad97d 100644
--- a/sound/soc/msm/qdsp6v2/q6core.c
+++ b/sound/soc/msm/qdsp6v2/q6core.c
@@ -192,10 +192,34 @@
 		pr_err("%s: Unable to register CORE\n", __func__);
 }
 
+struct cal_block_data *cal_utils_get_cal_block_by_key(
+		struct cal_type_data *cal_type, uint32_t key)
+{
+	struct list_head                *ptr, *next;
+	struct cal_block_data           *cal_block = NULL;
+	struct audio_cal_info_metainfo  *metainfo;
+
+	list_for_each_safe(ptr, next,
+		&cal_type->cal_blocks) {
+
+		cal_block = list_entry(ptr,
+			struct cal_block_data, list);
+		metainfo = (struct audio_cal_info_metainfo *)
+			cal_block->cal_info;
+		if (metainfo->nKey != key) {
+			pr_debug("%s: metainfo key mismatch!!! found:%x, needed:%x\n",
+				__func__, metainfo->nKey, key);
+		} else {
+			pr_debug("%s: metainfo key match found", __func__);
+			return cal_block;
+		}
+	}
+	return NULL;
+}
+
 int32_t core_set_license(uint32_t key, uint32_t module_id)
 {
 	struct avcs_cmd_set_license *cmd_setl = NULL;
-	struct audio_cal_info_metainfo *metainfo = NULL;
 	struct cal_block_data *cal_block = NULL;
 	int rc = 0, packet_size = 0;
 
@@ -209,8 +233,8 @@
 	}
 
 	mutex_lock(&((q6core_lcl.cal_data[META_CAL])->lock));
-	cal_block =
-		cal_utils_get_only_cal_block(q6core_lcl.cal_data[META_CAL]);
+	cal_block = cal_utils_get_cal_block_by_key(
+				q6core_lcl.cal_data[META_CAL], key);
 	if (cal_block == NULL ||
 		cal_block->cal_data.kvaddr == NULL ||
 		cal_block->cal_data.size <= 0) {
@@ -218,21 +242,6 @@
 		rc = -EINVAL;
 		goto cal_data_unlock;
 	}
-	metainfo = (struct audio_cal_info_metainfo *)cal_block->cal_info;
-	if (metainfo == NULL) {
-		pr_err("%s: No metainfo!!!", __func__);
-		rc = -EINVAL;
-		goto cal_data_unlock;
-	}
-	if (metainfo->nKey != key) {
-		pr_err("%s: metainfo key mismatch!!! found:%x, needed:%x\n",
-				__func__, metainfo->nKey, key);
-		rc = -EINVAL;
-		goto cal_data_unlock;
-	} else if (key == 0) {
-		pr_err("%s: metainfo key is %d a invalid key", __func__, key);
-		goto cal_data_unlock;
-	}
 
 	packet_size = sizeof(struct avcs_cmd_set_license) +
 					cal_block->cal_data.size;
@@ -265,9 +274,9 @@
 	memcpy((uint8_t *)cmd_setl + sizeof(struct avcs_cmd_set_license),
 		cal_block->cal_data.kvaddr,
 		cal_block->cal_data.size);
-	pr_info("%s: Set license opcode=0x%x ,key=0x%x, id =0x%x, size = %d\n",
+	pr_info("%s: Set license opcode=0x%x, id =0x%x, size = %d\n",
 			__func__, cmd_setl->hdr.opcode,
-			metainfo->nKey, cmd_setl->id, cmd_setl->size);
+			cmd_setl->id, cmd_setl->size);
 	rc = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)cmd_setl);
 	if (rc < 0)
 		pr_err("%s: SET_LICENSE failed op[0x%x]rc[%d]\n",
diff --git a/sound/soc/msm/qdsp6v2/q6lsm.c b/sound/soc/msm/qdsp6v2/q6lsm.c
index d761cf5..08ddde4 100644
--- a/sound/soc/msm/qdsp6v2/q6lsm.c
+++ b/sound/soc/msm/qdsp6v2/q6lsm.c
@@ -38,6 +38,8 @@
 #define LSM_ALIGN_BOUNDARY 512
 #define LSM_SAMPLE_RATE 16000
 #define QLSM_PARAM_ID_MINOR_VERSION 1
+#define QLSM_PARAM_ID_MINOR_VERSION_2 2
+
 static int lsm_afe_port;
 
 enum {
@@ -706,29 +708,28 @@
 	return rc;
 }
 
-static int q6lsm_send_params(struct lsm_client *client,
+static int q6lsm_send_param_opmode(struct lsm_client *client,
 		struct lsm_module_param_ids *opmode_ids,
-		struct lsm_module_param_ids *connectport_ids,
 		u32 set_param_opcode)
 {
 	int rc;
-	struct lsm_cmd_set_opmode_connectport opmode_connectport;
+	struct lsm_cmd_set_params_opmode opmode_params;
 	struct apr_hdr  *msg_hdr;
-	struct lsm_param_connect_to_port *connect_to_port;
+
 	struct lsm_param_op_mode *op_mode;
 	u32 data_payload_size, param_size;
 
-	msg_hdr = &opmode_connectport.msg_hdr;
+	msg_hdr = &opmode_params.msg_hdr;
 	q6lsm_add_hdr(client, msg_hdr,
-		      sizeof(opmode_connectport), true);
+		      sizeof(opmode_params), true);
 	msg_hdr->opcode = set_param_opcode;
-	data_payload_size = sizeof(opmode_connectport) -
+	data_payload_size = sizeof(opmode_params) -
 			    sizeof(*msg_hdr) -
-			    sizeof(opmode_connectport.params_hdr);
-	q6lsm_set_param_hdr_info(&opmode_connectport.params_hdr,
+			    sizeof(opmode_params.params_hdr);
+	q6lsm_set_param_hdr_info(&opmode_params.params_hdr,
 				 data_payload_size, 0, 0, 0);
-	connect_to_port = &opmode_connectport.connect_to_port;
-	op_mode = &opmode_connectport.op_mode;
+	op_mode = &opmode_params.op_mode;
+
 
 	param_size = sizeof(struct lsm_param_op_mode) -
 		     sizeof(op_mode->common);
@@ -740,18 +741,8 @@
 	op_mode->reserved = 0;
 	pr_debug("%s: mode = 0x%x", __func__, op_mode->mode);
 
-	param_size = (sizeof(struct lsm_param_connect_to_port) -
-		      sizeof(connect_to_port->common));
-	q6lsm_set_param_common(&connect_to_port->common,
-			       connectport_ids, param_size,
-			       set_param_opcode);
-	connect_to_port->minor_version = QLSM_PARAM_ID_MINOR_VERSION;
-	connect_to_port->port_id = client->connect_to_port;
-	connect_to_port->reserved = 0;
-	pr_debug("%s: port= %d", __func__, connect_to_port->port_id);
-
 	rc = q6lsm_apr_send_pkt(client, client->apr,
-				&opmode_connectport, true, NULL);
+				&opmode_params, true, NULL);
 	if (rc)
 		pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
 		       __func__, msg_hdr->opcode, rc);
@@ -770,12 +761,241 @@
 	return lsm_afe_port;
 }
 
+int q6lsm_set_port_connected(struct lsm_client *client)
+{
+	int rc;
+	struct lsm_cmd_set_connectport connectport;
+	struct lsm_module_param_ids connectport_ids;
+	struct apr_hdr *msg_hdr;
+	struct lsm_param_connect_to_port *connect_to_port;
+	u32 data_payload_size, param_size, set_param_opcode;
+
+	if (client->use_topology) {
+		set_param_opcode = LSM_SESSION_CMD_SET_PARAMS_V2;
+		connectport_ids.module_id = LSM_MODULE_ID_FRAMEWORK;
+		connectport_ids.param_id = LSM_PARAM_ID_CONNECT_TO_PORT;
+	} else {
+		set_param_opcode = LSM_SESSION_CMD_SET_PARAMS;
+		connectport_ids.module_id = LSM_MODULE_ID_VOICE_WAKEUP;
+		connectport_ids.param_id = LSM_PARAM_ID_CONNECT_TO_PORT;
+	}
+	client->connect_to_port = get_lsm_port();
+
+	msg_hdr = &connectport.msg_hdr;
+	q6lsm_add_hdr(client, msg_hdr,
+		      sizeof(connectport), true);
+	msg_hdr->opcode = set_param_opcode;
+	data_payload_size = sizeof(connectport) -
+			    sizeof(*msg_hdr) -
+			    sizeof(connectport.params_hdr);
+	q6lsm_set_param_hdr_info(&connectport.params_hdr,
+				 data_payload_size, 0, 0, 0);
+	connect_to_port = &connectport.connect_to_port;
+
+	param_size = (sizeof(struct lsm_param_connect_to_port) -
+		      sizeof(connect_to_port->common));
+	q6lsm_set_param_common(&connect_to_port->common,
+			       &connectport_ids, param_size,
+			       set_param_opcode);
+	connect_to_port->minor_version = QLSM_PARAM_ID_MINOR_VERSION;
+	connect_to_port->port_id = client->connect_to_port;
+	connect_to_port->reserved = 0;
+	pr_debug("%s: port= %d", __func__, connect_to_port->port_id);
+
+	rc = q6lsm_apr_send_pkt(client, client->apr,
+				&connectport, true, NULL);
+	if (rc)
+		pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
+			__func__, msg_hdr->opcode, rc);
+
+	return rc;
+}
+static int q6lsm_send_param_polling_enable(struct lsm_client *client,
+		bool poll_en,
+		struct lsm_module_param_ids *poll_enable_ids,
+		u32 set_param_opcode)
+{
+	int rc = 0;
+	struct lsm_cmd_poll_enable cmd;
+	struct apr_hdr  *msg_hdr;
+	struct lsm_param_poll_enable *poll_enable;
+	u32 data_payload_size, param_size;
+
+	msg_hdr = &cmd.msg_hdr;
+	q6lsm_add_hdr(client, msg_hdr,
+		      sizeof(struct lsm_cmd_poll_enable), true);
+	msg_hdr->opcode = set_param_opcode;
+	data_payload_size = sizeof(struct lsm_cmd_poll_enable) -
+			    sizeof(struct apr_hdr) -
+			    sizeof(struct lsm_set_params_hdr);
+	q6lsm_set_param_hdr_info(&cmd.params_hdr,
+				 data_payload_size, 0, 0, 0);
+	poll_enable = &cmd.poll_enable;
+
+	param_size = (sizeof(struct lsm_param_poll_enable) -
+		      sizeof(poll_enable->common));
+	q6lsm_set_param_common(&poll_enable->common,
+			       poll_enable_ids, param_size,
+			       set_param_opcode);
+	poll_enable->minor_version = QLSM_PARAM_ID_MINOR_VERSION;
+	poll_enable->polling_enable = (poll_en) ? 1 : 0;
+	pr_debug("%s: poll enable= %d", __func__, poll_enable->polling_enable);
+
+	rc = q6lsm_apr_send_pkt(client, client->apr,
+				&cmd, true, NULL);
+	if (rc)
+		pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
+		       __func__, msg_hdr->opcode, rc);
+
+	return rc;
+}
+
+int q6lsm_set_fwk_mode_cfg(struct lsm_client *client,
+			   uint32_t event_mode)
+{
+	int rc = 0;
+	struct lsm_cmd_set_fwk_mode_cfg cmd;
+	struct lsm_module_param_ids fwk_mode_cfg_ids;
+	struct apr_hdr  *msg_hdr;
+	struct lsm_param_fwk_mode_cfg *fwk_mode_cfg;
+	u32 data_payload_size, param_size, set_param_opcode;
+
+	if (client->use_topology) {
+		set_param_opcode = LSM_SESSION_CMD_SET_PARAMS_V2;
+		fwk_mode_cfg_ids.module_id = LSM_MODULE_ID_FRAMEWORK;
+		fwk_mode_cfg_ids.param_id = LSM_PARAM_ID_FWK_MODE_CONFIG;
+	} else {
+		pr_debug("%s: Ignore sending event mode\n", __func__);
+		return rc;
+	}
+
+	msg_hdr = &cmd.msg_hdr;
+	q6lsm_add_hdr(client, msg_hdr,
+		      sizeof(struct lsm_cmd_set_fwk_mode_cfg), true);
+	msg_hdr->opcode = set_param_opcode;
+	data_payload_size = sizeof(struct lsm_cmd_set_fwk_mode_cfg) -
+			    sizeof(struct apr_hdr) -
+			    sizeof(struct lsm_set_params_hdr);
+	q6lsm_set_param_hdr_info(&cmd.params_hdr,
+				 data_payload_size, 0, 0, 0);
+	fwk_mode_cfg = &cmd.fwk_mode_cfg;
+
+	param_size = (sizeof(struct lsm_param_fwk_mode_cfg) -
+		      sizeof(fwk_mode_cfg->common));
+	q6lsm_set_param_common(&fwk_mode_cfg->common,
+			       &fwk_mode_cfg_ids, param_size,
+			       set_param_opcode);
+
+	fwk_mode_cfg->minor_version = QLSM_PARAM_ID_MINOR_VERSION;
+	fwk_mode_cfg->mode = event_mode;
+	pr_debug("%s: mode = %d\n", __func__, fwk_mode_cfg->mode);
+
+	rc = q6lsm_apr_send_pkt(client, client->apr,
+				&cmd, true, NULL);
+	if (rc)
+		pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
+		       __func__, msg_hdr->opcode, rc);
+	return rc;
+}
+
+static int q6lsm_arrange_mch_map(struct lsm_param_media_fmt *media_fmt,
+			 int channel_count)
+{
+	int rc = 0;
+
+	memset(media_fmt->channel_mapping, 0, LSM_MAX_NUM_CHANNELS);
+
+	switch (channel_count) {
+	case 1:
+		media_fmt->channel_mapping[0] = PCM_CHANNEL_FC;
+		break;
+	case 2:
+		media_fmt->channel_mapping[0] = PCM_CHANNEL_FL;
+		media_fmt->channel_mapping[1] = PCM_CHANNEL_FR;
+		break;
+	case 3:
+		media_fmt->channel_mapping[0] = PCM_CHANNEL_FL;
+		media_fmt->channel_mapping[1] = PCM_CHANNEL_FR;
+		media_fmt->channel_mapping[2] = PCM_CHANNEL_FC;
+		break;
+	case 4:
+		media_fmt->channel_mapping[0] = PCM_CHANNEL_FL;
+		media_fmt->channel_mapping[1] = PCM_CHANNEL_FR;
+		media_fmt->channel_mapping[2] = PCM_CHANNEL_LS;
+		media_fmt->channel_mapping[3] = PCM_CHANNEL_RS;
+		break;
+	default:
+		pr_err("%s: invalid num_chan %d\n", __func__, channel_count);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+int q6lsm_set_media_fmt_params(struct lsm_client *client)
+{
+	int rc = 0;
+	struct lsm_cmd_set_media_fmt cmd;
+	struct lsm_module_param_ids media_fmt_ids;
+	struct apr_hdr  *msg_hdr;
+	struct lsm_param_media_fmt *media_fmt;
+	u32 data_payload_size, param_size, set_param_opcode;
+	struct lsm_hw_params param = client->hw_params;
+
+	if (client->use_topology) {
+		set_param_opcode = LSM_SESSION_CMD_SET_PARAMS_V2;
+		media_fmt_ids.module_id = LSM_MODULE_ID_FRAMEWORK;
+		media_fmt_ids.param_id = LSM_PARAM_ID_MEDIA_FMT;
+	} else {
+		pr_debug("%s: Ignore sending media format\n", __func__);
+		goto err_ret;
+	}
+
+	msg_hdr = &cmd.msg_hdr;
+	q6lsm_add_hdr(client, msg_hdr,
+		      sizeof(struct lsm_cmd_set_media_fmt), true);
+	msg_hdr->opcode = set_param_opcode;
+	data_payload_size = sizeof(struct lsm_cmd_set_media_fmt) -
+			    sizeof(struct apr_hdr) -
+			    sizeof(struct lsm_set_params_hdr);
+	q6lsm_set_param_hdr_info(&cmd.params_hdr,
+				 data_payload_size, 0, 0, 0);
+	media_fmt = &cmd.media_fmt;
+
+	param_size = (sizeof(struct lsm_param_media_fmt) -
+		      sizeof(media_fmt->common));
+	q6lsm_set_param_common(&media_fmt->common,
+			       &media_fmt_ids, param_size,
+			       set_param_opcode);
+
+	media_fmt->minor_version = QLSM_PARAM_ID_MINOR_VERSION_2;
+	media_fmt->sample_rate = param.sample_rate;
+	media_fmt->num_channels = param.num_chs;
+	media_fmt->bit_width = param.sample_size;
+
+	rc = q6lsm_arrange_mch_map(media_fmt, media_fmt->num_channels);
+	if (rc)
+		goto err_ret;
+
+	pr_debug("%s: sample rate= %d, channels %d bit width %d\n",
+		 __func__, media_fmt->sample_rate, media_fmt->num_channels,
+		 media_fmt->bit_width);
+
+	rc = q6lsm_apr_send_pkt(client, client->apr,
+				&cmd, true, NULL);
+	if (rc)
+		pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
+		       __func__, msg_hdr->opcode, rc);
+err_ret:
+	return rc;
+}
+
 int q6lsm_set_data(struct lsm_client *client,
 			   enum lsm_detection_mode mode,
 			   bool detectfailure)
 {
 	int rc = 0;
-	struct lsm_module_param_ids opmode_ids, connectport_ids;
+	struct lsm_module_param_ids opmode_ids;
 	struct lsm_module_param_ids conf_levels_ids;
 
 	if (!client->confidence_levels) {
@@ -799,16 +1019,12 @@
 		goto err_ret;
 	}
 	client->mode |= detectfailure << 2;
-	client->connect_to_port = get_lsm_port();
 
 	opmode_ids.module_id = LSM_MODULE_ID_VOICE_WAKEUP;
 	opmode_ids.param_id = LSM_PARAM_ID_OPERATION_MODE;
 
-	connectport_ids.module_id = LSM_MODULE_ID_VOICE_WAKEUP;
-	connectport_ids.param_id = LSM_PARAM_ID_CONNECT_TO_PORT;
-
-	rc = q6lsm_send_params(client, &opmode_ids, &connectport_ids,
-			      LSM_SESSION_CMD_SET_PARAMS);
+	rc = q6lsm_send_param_opmode(client, &opmode_ids,
+					LSM_SESSION_CMD_SET_PARAMS);
 	if (rc) {
 		pr_err("%s: Failed to set lsm config params %d\n",
 			__func__, rc);
@@ -1388,7 +1604,7 @@
 
 int q6lsm_set_one_param(struct lsm_client *client,
 	struct lsm_params_info *p_info, void *data,
-	enum LSM_PARAM_TYPE param_type)
+	uint32_t param_type)
 {
 	int rc = 0, pkt_sz;
 	struct lsm_module_param_ids ids;
@@ -1407,7 +1623,6 @@
 	case LSM_OPERATION_MODE: {
 		struct snd_lsm_detect_mode *det_mode = data;
 		struct lsm_module_param_ids opmode_ids;
-		struct lsm_module_param_ids connectport_ids;
 
 		if (det_mode->mode == LSM_MODE_KEYWORD_ONLY_DETECTION) {
 			client->mode = 0x01;
@@ -1420,16 +1635,12 @@
 		}
 
 		client->mode |= det_mode->detect_failure << 2;
-		client->connect_to_port = get_lsm_port();
 
 		opmode_ids.module_id = p_info->module_id;
 		opmode_ids.param_id = p_info->param_id;
 
-		connectport_ids.module_id = LSM_MODULE_ID_FRAMEWORK;
-		connectport_ids.param_id = LSM_PARAM_ID_CONNECT_TO_PORT;
-
-		rc = q6lsm_send_params(client, &opmode_ids, &connectport_ids,
-				       LSM_SESSION_CMD_SET_PARAMS_V2);
+		rc = q6lsm_send_param_opmode(client, &opmode_ids,
+					LSM_SESSION_CMD_SET_PARAMS_V2);
 		if (rc)
 			pr_err("%s: OPERATION_MODE failed, rc %d\n",
 				__func__, rc);
@@ -1457,6 +1668,20 @@
 			pr_err("%s: CONFIDENCE_LEVELS cmd failed, rc %d\n",
 				 __func__, rc);
 		break;
+	case LSM_POLLING_ENABLE: {
+		struct snd_lsm_poll_enable *lsm_poll_enable =
+				(struct snd_lsm_poll_enable *) data;
+		ids.module_id = p_info->module_id;
+		ids.param_id = p_info->param_id;
+		rc = q6lsm_send_param_polling_enable(client,
+				lsm_poll_enable->poll_en, &ids,
+				LSM_SESSION_CMD_SET_PARAMS_V2);
+		if (rc)
+			pr_err("%s: POLLING ENABLE cmd failed, rc %d\n",
+				 __func__, rc);
+		break;
+	}
+
 	case LSM_REG_SND_MODEL: {
 		struct lsm_cmd_set_params model_param;
 		u32 payload_size;
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index 1adb177..b829c65 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -78,7 +78,10 @@
 static int voice_send_cvp_deregister_cal_cmd(struct voice_data *v);
 static int voice_send_cvp_register_vol_cal_cmd(struct voice_data *v);
 static int voice_send_cvp_deregister_vol_cal_cmd(struct voice_data *v);
+static int voice_send_cvp_media_fmt_info_cmd(struct voice_data *v);
 static int voice_send_cvp_device_channels_cmd(struct voice_data *v);
+static int voice_send_cvp_media_format_cmd(struct voice_data *v,
+					   uint32_t param_type);
 static int voice_send_cvp_topology_commit_cmd(struct voice_data *v);
 
 static int voice_cvs_stop_playback(struct voice_data *v);
@@ -2359,7 +2362,7 @@
 		cvp_setdev_cmd.cvp_set_device_v2.vocproc_mode =
 				VSS_IVOCPROC_VOCPROC_MODE_EC_EXT_MIXING;
 		cvp_setdev_cmd.cvp_set_device_v2.ec_ref_port_id =
-				common.ec_port_id;
+				common.ec_media_fmt_info.port_id;
 	} else {
 		cvp_setdev_cmd.cvp_set_device_v2.vocproc_mode =
 				    VSS_IVOCPROC_VOCPROC_MODE_EC_INT_MIXING;
@@ -2715,7 +2718,7 @@
 		cvp_session_cmd.cvp_session.vocproc_mode =
 				VSS_IVOCPROC_VOCPROC_MODE_EC_EXT_MIXING;
 		cvp_session_cmd.cvp_session.ec_ref_port_id =
-					common.ec_port_id;
+				common.ec_media_fmt_info.port_id;
 	} else {
 		cvp_session_cmd.cvp_session.vocproc_mode =
 				 VSS_IVOCPROC_VOCPROC_MODE_EC_INT_MIXING;
@@ -3798,10 +3801,10 @@
 		goto fail;
 	}
 
-	ret = voice_send_cvp_device_channels_cmd(v);
+	ret = voice_send_cvp_media_fmt_info_cmd(v);
 	if (ret < 0) {
-		pr_err("%s: Set device channels failed err:%d\n",
-		       __func__, ret);
+		pr_err("%s: Set media format info failed err:%d\n", __func__,
+		       ret);
 		goto fail;
 	}
 
@@ -3955,6 +3958,160 @@
 	return ret;
 }
 
+static int voice_send_cvp_media_fmt_info_cmd(struct voice_data *v)
+{
+	int ret;
+
+	ret = voice_send_cvp_device_channels_cmd(v);
+	if (ret < 0)
+		goto done;
+
+	if (voice_get_cvd_int_version(common.cvd_version) >=
+	    CVD_INT_VERSION_2_3) {
+		ret = voice_send_cvp_media_format_cmd(v, RX_PATH);
+		if (ret < 0)
+			goto done;
+
+		ret = voice_send_cvp_media_format_cmd(v, TX_PATH);
+		if (ret < 0)
+			goto done;
+
+		if (common.ec_ref_ext)
+			ret = voice_send_cvp_media_format_cmd(v, EC_REF_PATH);
+	}
+
+done:
+	return ret;
+}
+
+static int voice_send_cvp_media_format_cmd(struct voice_data *v,
+					   uint32_t param_type)
+{
+	int ret = 0;
+	struct cvp_set_media_format_cmd cvp_set_media_format_cmd;
+	void *apr_cvp;
+	u16 cvp_handle;
+	struct vss_icommon_param_data_t *media_fmt_param_data =
+		&cvp_set_media_format_cmd.cvp_set_param_v2.param_data;
+	struct vss_param_endpoint_media_format_info_t *media_fmt_info =
+		&media_fmt_param_data->media_format_info;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	apr_cvp = common.apr_q6_cvp;
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	cvp_handle = voice_get_cvp_handle(v);
+	memset(&cvp_set_media_format_cmd, 0, sizeof(cvp_set_media_format_cmd));
+
+	/* Fill header data */
+	cvp_set_media_format_cmd.hdr.hdr_field =
+		APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
+			      APR_PKT_VER);
+	cvp_set_media_format_cmd.hdr.pkt_size =
+		APR_PKT_SIZE(APR_HDR_SIZE,
+			     sizeof(cvp_set_media_format_cmd) - APR_HDR_SIZE);
+	cvp_set_media_format_cmd.hdr.src_svc = 0;
+	cvp_set_media_format_cmd.hdr.src_domain = APR_DOMAIN_APPS;
+	cvp_set_media_format_cmd.hdr.src_port =
+		voice_get_idx_for_session(v->session_id);
+	cvp_set_media_format_cmd.hdr.dest_svc = 0;
+	cvp_set_media_format_cmd.hdr.dest_domain = APR_DOMAIN_ADSP;
+	cvp_set_media_format_cmd.hdr.dest_port = cvp_handle;
+	cvp_set_media_format_cmd.hdr.token = VOC_SET_MEDIA_FORMAT_PARAM_TOKEN;
+	cvp_set_media_format_cmd.hdr.opcode = VSS_ICOMMON_CMD_SET_PARAM_V2;
+
+	/* Fill param data */
+	cvp_set_media_format_cmd.cvp_set_param_v2.mem_size =
+		sizeof(struct vss_icommon_param_data_t);
+	media_fmt_param_data->module_id = VSS_MODULE_CVD_GENERIC;
+	media_fmt_param_data->param_size =
+		sizeof(struct vss_param_endpoint_media_format_info_t);
+
+	/* Fill device specific data */
+	switch (param_type) {
+	case RX_PATH:
+		media_fmt_param_data->param_id =
+			VSS_PARAM_RX_PORT_ENDPOINT_MEDIA_INFO;
+		media_fmt_info->port_id = v->dev_rx.port_id;
+		media_fmt_info->num_channels = v->dev_rx.no_of_channels;
+		media_fmt_info->bits_per_sample = v->dev_rx.bits_per_sample;
+		media_fmt_info->sample_rate = v->dev_rx.sample_rate;
+		memcpy(&media_fmt_info->channel_mapping,
+		       &v->dev_rx.channel_mapping, VSS_CHANNEL_MAPPING_SIZE);
+		break;
+
+	case TX_PATH:
+		media_fmt_param_data->param_id =
+			VSS_PARAM_TX_PORT_ENDPOINT_MEDIA_INFO;
+		media_fmt_info->port_id = v->dev_tx.port_id;
+		media_fmt_info->num_channels = v->dev_tx.no_of_channels;
+		media_fmt_info->bits_per_sample = v->dev_tx.bits_per_sample;
+		media_fmt_info->sample_rate = v->dev_tx.sample_rate;
+		memcpy(&media_fmt_info->channel_mapping,
+		       &v->dev_tx.channel_mapping, VSS_CHANNEL_MAPPING_SIZE);
+		break;
+
+	case EC_REF_PATH:
+		media_fmt_param_data->param_id =
+			VSS_PARAM_EC_REF_PORT_ENDPOINT_MEDIA_INFO;
+		media_fmt_info->port_id = common.ec_media_fmt_info.port_id;
+		media_fmt_info->num_channels =
+			common.ec_media_fmt_info.num_channels;
+		media_fmt_info->bits_per_sample =
+			common.ec_media_fmt_info.bits_per_sample;
+		media_fmt_info->sample_rate =
+			common.ec_media_fmt_info.sample_rate;
+		memcpy(&media_fmt_info->channel_mapping,
+		       &common.ec_media_fmt_info.channel_mapping,
+		       VSS_CHANNEL_MAPPING_SIZE);
+		break;
+
+	default:
+		pr_err("%s: Invalid param type %d\n", __func__, param_type);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* Send command */
+	v->cvp_state = CMD_STATUS_FAIL;
+	v->async_err = 0;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_set_media_format_cmd);
+	if (ret < 0) {
+		pr_err("%s: Fail in sending VSS_ICOMMON_CMD_SET_PARAM_V2\n",
+		       __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = wait_event_timeout(v->cvp_wait,
+				 (v->cvp_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (v->async_err > 0) {
+		pr_err("%s: DSP returned error[%s] handle = %d\n", __func__,
+		       adsp_err_get_err_str(v->async_err), cvp_handle);
+		ret = adsp_err_get_lnx_err_code(v->async_err);
+		goto done;
+	}
+
+done:
+	return ret;
+}
+
 static int voice_send_cvp_topology_commit_cmd(struct voice_data *v)
 {
 	int ret = 0;
@@ -5665,6 +5822,48 @@
 	return ret;
 }
 
+int voc_set_afe_sidetone(uint32_t session_id, bool sidetone_enable)
+{
+	struct voice_data *v = NULL;
+	int ret = -EINVAL;
+	struct voice_session_itr itr;
+	u16 rx_port, tx_port;
+
+	common.sidetone_enable = sidetone_enable;
+	voice_itr_init(&itr, session_id);
+	while (voice_itr_get_next_session(&itr, &v)) {
+		if (v == NULL) {
+			pr_err("%s: invalid session_id 0x%x\n", __func__,
+				  session_id);
+			ret = -EINVAL;
+			break;
+		}
+		mutex_lock(&v->lock);
+		if (v->voc_state != VOC_RUN) {
+			mutex_unlock(&v->lock);
+			continue;
+		}
+		rx_port = v->dev_rx.port_id;
+		tx_port = v->dev_tx.port_id;
+		ret = afe_sidetone_enable(tx_port, rx_port,
+					  sidetone_enable);
+		if (!ret) {
+			mutex_unlock(&v->lock);
+			break;
+		}
+		mutex_unlock(&v->lock);
+	}
+	return ret;
+}
+
+bool voc_get_afe_sidetone(void)
+{
+	bool ret;
+
+	ret = common.sidetone_enable;
+	return ret;
+}
+
 int voc_get_pp_enable(uint32_t session_id, uint32_t module_id)
 {
 	struct voice_data *v = voice_get_session(session_id);
@@ -5716,7 +5915,7 @@
 }
 
 int voc_set_device_config(uint32_t session_id, uint8_t path_dir,
-			  uint8_t no_of_channels, uint32_t port_id)
+			  struct media_format_info *finfo)
 {
 	struct voice_data *v = voice_get_session(session_id);
 
@@ -5726,22 +5925,55 @@
 		return -EINVAL;
 	}
 
-	pr_debug("%s: path_dir=%d port_id=%x, channels=%d\n",
-		 __func__, path_dir, port_id, no_of_channels);
+	pr_debug("%s: path_dir=%d port_id=%x, channels=%d, sample_rate=%d, bits_per_sample=%d\n",
+		__func__, path_dir, finfo->port_id, finfo->num_channels,
+		finfo->sample_rate, finfo->bits_per_sample);
 
 	mutex_lock(&v->lock);
-	if (path_dir == RX_PATH) {
-		v->dev_rx.port_id = q6audio_get_port_id(port_id);
-		v->dev_rx.no_of_channels = no_of_channels;
-	} else {
-		v->dev_tx.port_id = q6audio_get_port_id(port_id);
-		v->dev_tx.no_of_channels = no_of_channels;
+	switch (path_dir) {
+	case RX_PATH:
+		v->dev_rx.port_id = q6audio_get_port_id(finfo->port_id);
+		v->dev_rx.no_of_channels = finfo->num_channels;
+		v->dev_rx.sample_rate = finfo->sample_rate;
+		v->dev_rx.bits_per_sample = finfo->bits_per_sample;
+		memcpy(&v->dev_rx.channel_mapping, &finfo->channel_mapping,
+		       VSS_CHANNEL_MAPPING_SIZE);
+		break;
+	case TX_PATH:
+		v->dev_tx.port_id = q6audio_get_port_id(finfo->port_id);
+		v->dev_tx.no_of_channels = finfo->num_channels;
+		v->dev_tx.sample_rate = finfo->sample_rate;
+		v->dev_tx.bits_per_sample = finfo->bits_per_sample;
+		memcpy(&v->dev_tx.channel_mapping, &finfo->channel_mapping,
+		       VSS_CHANNEL_MAPPING_SIZE);
+		break;
+	default:
+		pr_err("%s: Invalid path_dir %d\n", __func__, path_dir);
+		return -EINVAL;
 	}
+
 	mutex_unlock(&v->lock);
 
 	return 0;
 }
 
+int voc_set_ext_ec_ref_media_fmt_info(struct media_format_info *finfo)
+{
+	mutex_lock(&common.common_lock);
+	if (common.ec_ref_ext) {
+		common.ec_media_fmt_info.num_channels = finfo->num_channels;
+		common.ec_media_fmt_info.bits_per_sample =
+			finfo->bits_per_sample;
+		common.ec_media_fmt_info.sample_rate = finfo->sample_rate;
+		memcpy(&common.ec_media_fmt_info.channel_mapping,
+		       &finfo->channel_mapping, VSS_CHANNEL_MAPPING_SIZE);
+	} else {
+		pr_debug("%s: Ext Ec Ref not active, returning", __func__);
+	}
+	mutex_unlock(&common.common_lock);
+	return 0;
+}
+
 int voc_set_route_flag(uint32_t session_id, uint8_t path_dir, uint8_t set)
 {
 	struct voice_data *v = voice_get_session(session_id);
@@ -5951,9 +6183,9 @@
 			goto done;
 		}
 
-		ret = voice_send_cvp_device_channels_cmd(v);
+		ret = voice_send_cvp_media_fmt_info_cmd(v);
 		if (ret < 0) {
-			pr_err("%s:  Set device channels failed\n", __func__);
+			pr_err("%s: Set format failed err:%d\n", __func__, ret);
 			goto done;
 		}
 
@@ -6148,7 +6380,7 @@
 	return ret;
 }
 
-int voc_set_ext_ec_ref(uint16_t port_id, bool state)
+int voc_set_ext_ec_ref_port_id(uint16_t port_id, bool state)
 {
 	int ret = 0;
 
@@ -6159,17 +6391,25 @@
 			ret = -EINVAL;
 			goto exit;
 		}
-		common.ec_port_id = port_id;
 		common.ec_ref_ext = true;
 	} else {
 		common.ec_ref_ext = false;
-		common.ec_port_id = port_id;
 	}
+	/* Cache EC Fromat Info in common */
+	common.ec_media_fmt_info.port_id = port_id;
 exit:
 	mutex_unlock(&common.common_lock);
 	return ret;
 }
 
+int voc_get_ext_ec_ref_port_id(void)
+{
+	if (common.ec_ref_ext)
+		return common.ec_media_fmt_info.port_id;
+	else
+		return AFE_PORT_INVALID;
+}
+
 void voc_register_mvs_cb(ul_cb_fn ul_cb,
 			   dl_cb_fn dl_cb,
 			   voip_ssr_cb ssr_cb,
@@ -6527,18 +6767,19 @@
 				v->async_err = ptr[1];
 				wake_up(&v->cvs_wait);
 				break;
-			case VOICE_CMD_SET_PARAM:
-				pr_debug("%s: VOICE_CMD_SET_PARAM\n", __func__);
+			case VSS_ICOMMON_CMD_SET_PARAM_V2:
+				pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM_V2\n",
+					 __func__);
 				rtac_make_voice_callback(RTAC_CVS, ptr,
 							data->payload_size);
 				break;
-			case VOICE_CMD_GET_PARAM:
-				pr_debug("%s: VOICE_CMD_GET_PARAM\n",
-					__func__);
+			case VSS_ICOMMON_CMD_GET_PARAM_V2:
+				pr_debug("%s: VSS_ICOMMON_CMD_GET_PARAM_V2\n",
+					 __func__);
 				/* Should only come here if there is an APR */
 				/* error or malformed APR packet. Otherwise */
 				/* response will be returned as */
-				/* VOICE_EVT_GET_PARAM_ACK */
+				/* VSS_ICOMMON_RSP_GET_PARAM */
 				if (ptr[1] != 0) {
 					pr_err("%s: CVP get param error = %d, resuming\n",
 						__func__, ptr[1]);
@@ -6666,12 +6907,12 @@
 		pr_debug("Recd VSS_ISTREAM_EVT_NOT_READY\n");
 	} else if (data->opcode == VSS_ISTREAM_EVT_READY) {
 		pr_debug("Recd VSS_ISTREAM_EVT_READY\n");
-	} else if (data->opcode ==  VOICE_EVT_GET_PARAM_ACK) {
-		pr_debug("%s: VOICE_EVT_GET_PARAM_ACK\n", __func__);
+	} else if (data->opcode == VSS_ICOMMON_RSP_GET_PARAM) {
+		pr_debug("%s: VSS_ICOMMON_RSP_GET_PARAM\n", __func__);
 		ptr = data->payload;
 		if (ptr[0] != 0) {
-			pr_err("%s: VOICE_EVT_GET_PARAM_ACK returned error = 0x%x\n",
-				__func__, ptr[0]);
+			pr_err("%s: VSS_ICOMMON_RSP_GET_PARAM returned error = 0x%x\n",
+			       __func__, ptr[0]);
 		}
 		rtac_make_voice_callback(RTAC_CVS, data->payload,
 					data->payload_size);
@@ -6808,18 +7049,35 @@
 				break;
 			case VSS_IVPCM_EVT_PUSH_BUFFER_V2:
 				break;
-			case VOICE_CMD_SET_PARAM:
-				pr_debug("%s: VOICE_CMD_SET_PARAM\n", __func__);
-				rtac_make_voice_callback(RTAC_CVP, ptr,
-							data->payload_size);
+			case VSS_ICOMMON_CMD_SET_PARAM_V2:
+				switch (data->token) {
+				case VOC_SET_MEDIA_FORMAT_PARAM_TOKEN:
+					pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM_V2 called by voice_send_cvp_media_format_cmd\n",
+						__func__);
+					v->cvp_state = CMD_STATUS_SUCCESS;
+					v->async_err = ptr[1];
+					wake_up(&v->cvp_wait);
+					break;
+				case VOC_RTAC_SET_PARAM_TOKEN:
+					pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM_V2 called by rtac\n",
+						__func__);
+					rtac_make_voice_callback(
+						RTAC_CVP, ptr,
+						data->payload_size);
+					break;
+				default:
+					pr_debug("%s: invalid token for command VSS_ICOMMON_CMD_SET_PARAM_V2: %d\n",
+						__func__, data->token);
+					break;
+				}
 				break;
-			case VOICE_CMD_GET_PARAM:
-				pr_debug("%s: VOICE_CMD_GET_PARAM\n",
-					__func__);
+			case VSS_ICOMMON_CMD_GET_PARAM_V2:
+				pr_debug("%s: VSS_ICOMMON_CMD_GET_PARAM_V2\n",
+					 __func__);
 				/* Should only come here if there is an APR */
 				/* error or malformed APR packet. Otherwise */
 				/* response will be returned as */
-				/* VOICE_EVT_GET_PARAM_ACK */
+				/* VSS_ICOMMON_RSP_GET_PARAM */
 				if (ptr[1] != 0) {
 					pr_err("%s: CVP get param error = %d, resuming\n",
 						__func__, ptr[1]);
@@ -6880,12 +7138,12 @@
 				break;
 			}
 		}
-	} else if (data->opcode ==  VOICE_EVT_GET_PARAM_ACK) {
-		pr_debug("%s: VOICE_EVT_GET_PARAM_ACK\n", __func__);
+	} else if (data->opcode == VSS_ICOMMON_RSP_GET_PARAM) {
+		pr_debug("%s: VSS_ICOMMON_RSP_GET_PARAM\n", __func__);
 		ptr = data->payload;
 		if (ptr[0] != 0) {
-			pr_err("%s: VOICE_EVT_GET_PARAM_ACK returned error = 0x%x\n",
-				__func__, ptr[0]);
+			pr_err("%s: VSS_ICOMMON_RSP_GET_PARAM returned error = 0x%x\n",
+			       __func__, ptr[0]);
 		}
 		rtac_make_voice_callback(RTAC_CVP, data->payload,
 			data->payload_size);
@@ -8306,7 +8564,19 @@
 	common.default_vol_step_val = 0;
 	common.default_vol_ramp_duration_ms = DEFAULT_VOLUME_RAMP_DURATION;
 	common.default_mute_ramp_duration_ms = DEFAULT_MUTE_RAMP_DURATION;
+
+	/* Initialize EC Ref media format info */
 	common.ec_ref_ext = false;
+	common.ec_media_fmt_info.port_id = AFE_PORT_INVALID;
+	common.ec_media_fmt_info.num_channels = 0;
+	common.ec_media_fmt_info.bits_per_sample = 16;
+	common.ec_media_fmt_info.sample_rate = 8000;
+	memset(&common.ec_media_fmt_info.channel_mapping, 0,
+	       VSS_CHANNEL_MAPPING_SIZE);
+
+	/* Initialize AFE Sidetone Enable */
+	common.sidetone_enable = false;
+
 	/* Initialize MVS info. */
 	common.mvs_info.network_type = VSS_NETWORK_ID_DEFAULT;
 
@@ -8344,8 +8614,16 @@
 		common.voice[i].dev_rx.port_id = 0x100A;
 		common.voice[i].dev_tx.dev_id = 0;
 		common.voice[i].dev_rx.dev_id = 0;
-		common.voice[i].dev_rx.no_of_channels = 0;
 		common.voice[i].dev_tx.no_of_channels = 0;
+		common.voice[i].dev_rx.no_of_channels = 0;
+		common.voice[i].dev_tx.sample_rate = 8000;
+		common.voice[i].dev_rx.sample_rate = 8000;
+		common.voice[i].dev_tx.bits_per_sample = 16;
+		common.voice[i].dev_rx.bits_per_sample = 16;
+		memset(&common.voice[i].dev_tx.channel_mapping, 0,
+		       VSS_CHANNEL_MAPPING_SIZE);
+		memset(&common.voice[i].dev_rx.channel_mapping, 0,
+		       VSS_CHANNEL_MAPPING_SIZE);
 		common.voice[i].sidetone_gain = 0x512;
 		common.voice[i].dtmf_rx_detect_en = 0;
 		common.voice[i].lch_mode = 0;
diff --git a/sound/soc/msm/qdsp6v2/q6voice.h b/sound/soc/msm/qdsp6v2/q6voice.h
index aa58574..74d80be 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.h
+++ b/sound/soc/msm/qdsp6v2/q6voice.h
@@ -21,6 +21,8 @@
 #define SESSION_NAME_LEN 20
 #define NUM_OF_MEMORY_BLOCKS 1
 #define NUM_OF_BUFFERS 2
+#define VSS_NUM_CHANNELS_MAX 8
+#define VSS_CHANNEL_MAPPING_SIZE (sizeof(uint8_t) * VSS_NUM_CHANNELS_MAX)
 /*
  * BUFFER BLOCK SIZE based on
  * the supported page size
@@ -97,7 +99,9 @@
 /* Device information payload structure */
 struct device_data {
 	uint32_t dev_mute;
-	uint32_t sample;
+	uint32_t sample_rate;
+	uint16_t bits_per_sample;
+	uint8_t  channel_mapping[VSS_NUM_CHANNELS_MAX];
 	uint32_t enabled;
 	uint32_t dev_id;
 	uint32_t port_id;
@@ -107,6 +111,25 @@
 	uint32_t no_of_channels;
 };
 
+/*
+ * Format information structure to match
+ * vss_param_endpoint_media_format_info_t
+ */
+struct media_format_info {
+	uint32_t port_id;
+	uint16_t num_channels;
+	uint16_t bits_per_sample;
+	uint32_t sample_rate;
+	uint8_t  channel_mapping[VSS_NUM_CHANNELS_MAX];
+};
+
+enum {
+	VOC_NO_SET_PARAM_TOKEN = 0,
+	VOC_RTAC_SET_PARAM_TOKEN,
+	VOC_SET_MEDIA_FORMAT_PARAM_TOKEN,
+	VOC_SET_PARAM_TOKEN_MAX
+};
+
 struct voice_dev_route_state {
 	u16 rx_route_flag;
 	u16 tx_route_flag;
@@ -189,6 +212,81 @@
 	struct vss_icommon_cmd_unmap_memory_t vss_unmap_mem;
 } __packed;
 
+struct vss_param_endpoint_media_format_info_t {
+	/* AFE port ID to which this media format corresponds to. */
+	uint32_t port_id;
+	/*
+	 * Number of channels of data.
+	 * Supported values: 1 to 8
+	 */
+	uint16_t num_channels;
+	/*
+	 * Bits per sample of data.
+	 * Supported values: 16 and 24
+	 */
+	uint16_t bits_per_sample;
+	/*
+	 * Sampling rate in Hz.
+	 * Supported values: 8000, 11025, 16000, 22050, 24000, 32000,
+	 * 44100, 48000, 88200, 96000, 176400, and 192000
+	 */
+	uint32_t sample_rate;
+	/*
+	 * The channel[i] mapping describes channel i. Each element i
+	 * of the array describes channel i inside the data buffer. An
+	 * unused or unknown channel is set to 0.
+	 */
+	uint8_t channel_mapping[VSS_NUM_CHANNELS_MAX];
+} __packed;
+
+struct vss_icommon_param_data_t {
+	/* Valid ID of the module. */
+	uint32_t module_id;
+	/* Valid ID of the parameter. */
+	uint32_t param_id;
+	/*
+	 * Data size of the structure relating to the param_id/module_id
+	 * combination in uint8_t bytes.
+	 */
+	uint16_t param_size;
+	/* This field must be set to zero. */
+	uint16_t reserved;
+	/*
+	 * Parameter data payload when inband. Should have size param_size.
+	 * Bit size of payload must be a multiple of 4.
+	 */
+	union {
+		struct vss_param_endpoint_media_format_info_t media_format_info;
+	};
+} __packed;
+
+/* Payload structure for the VSS_ICOMMON_CMD_SET_PARAM_V2 command. */
+struct vss_icommon_cmd_set_param_v2_t {
+	/*
+	 * Pointer to the unique identifier for an address (physical/virtual).
+	 *
+	 * If the parameter data payload is within the message payload
+	 * (in-band), set this field to 0. The parameter data begins at the
+	 * specified data payload address.
+	 *
+	 * If the parameter data is out-of-band, this field is the handle to
+	 * the physical address in the shared memory that holds the parameter
+	 * data.
+	 */
+	uint32_t mem_handle;
+	/*
+	 * Location of the parameter data payload.
+	 *
+	 * The payload is an array of vss_icommon_param_data_t. If the
+	 * mem_handle is 0, this field is ignored.
+	 */
+	uint64_t mem_address;
+	/* Size of the parameter data payload in bytes. */
+	uint32_t mem_size;
+	/* Parameter data payload when the data is inband. */
+	struct vss_icommon_param_data_t param_data;
+} __packed;
+
 /* TO MVM commands */
 #define VSS_IMVM_CMD_CREATE_PASSIVE_CONTROL_SESSION	0x000110FF
 /**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
@@ -576,6 +674,14 @@
 #define VSS_IRECORD_MODE_TX_RX_MIXING			0x00010F7B
 /* Select mixed Tx and Rx paths. */
 
+#define VSS_PARAM_TX_PORT_ENDPOINT_MEDIA_INFO		0x00013253
+
+#define VSS_PARAM_RX_PORT_ENDPOINT_MEDIA_INFO		0x00013254
+
+#define VSS_PARAM_EC_REF_PORT_ENDPOINT_MEDIA_INFO	0x00013255
+
+#define VSS_MODULE_CVD_GENERIC				0x0001316E
+
 #define VSS_ISTREAM_EVT_NOT_READY			0x000110FD
 
 #define VSS_ISTREAM_EVT_READY				0x000110FC
@@ -1377,6 +1483,11 @@
 	struct vss_ivocproc_cmd_topology_set_dev_channels_t cvp_set_channels;
 } __packed;
 
+struct cvp_set_media_format_cmd {
+	struct apr_hdr hdr;
+	struct vss_icommon_cmd_set_param_v2_t cvp_set_param_v2;
+} __packed;
+
 struct cvp_set_vp3_data_cmd {
 	struct apr_hdr hdr;
 } __packed;
@@ -1610,7 +1721,7 @@
 	uint32_t default_vol_ramp_duration_ms;
 	uint32_t default_mute_ramp_duration_ms;
 	bool ec_ref_ext;
-	uint16_t ec_port_id;
+	struct media_format_info ec_media_fmt_info;
 
 	/* APR to MVM in the Q6 */
 	void *apr_q6_mvm;
@@ -1651,6 +1762,7 @@
 	struct vss_isoundfocus_rsp_get_sectors_t soundFocusResponse;
 	struct shared_mem_info source_tracking_sh_mem;
 	struct vss_isourcetrack_activity_data_t sourceTrackingResponse;
+	bool sidetone_enable;
 };
 
 struct voice_session_itr {
@@ -1681,9 +1793,9 @@
 enum {
 	RX_PATH = 0,
 	TX_PATH,
+	EC_REF_PATH,
 };
 
-
 #define VOC_PATH_PASSIVE 0
 #define VOC_PATH_FULL 1
 #define VOC_PATH_VOLTE_PASSIVE 2
@@ -1769,16 +1881,20 @@
 int voc_start_playback(uint32_t set, uint16_t port_id);
 int voc_start_record(uint32_t port_id, uint32_t set, uint32_t session_id);
 int voice_get_idx_for_session(u32 session_id);
-int voc_set_ext_ec_ref(uint16_t port_id, bool state);
+int voc_set_ext_ec_ref_port_id(uint16_t port_id, bool state);
+int voc_get_ext_ec_ref_port_id(void);
+int voc_set_ext_ec_ref_media_fmt_info(struct media_format_info *finfo);
 int voc_update_amr_vocoder_rate(uint32_t session_id);
 int voc_disable_device(uint32_t session_id);
 int voc_enable_device(uint32_t session_id);
 void voc_set_destroy_cvd_flag(bool is_destroy_cvd);
 int voc_disable_topology(uint32_t session_id, uint32_t disable);
 int voc_set_device_config(uint32_t session_id, uint8_t path_dir,
-			  uint8_t no_of_channels, uint32_t dev_port_id);
+			  struct media_format_info *finfo);
 uint32_t voice_get_topology(uint32_t topology_idx);
 int voc_set_sound_focus(struct sound_focus_param sound_focus_param);
 int voc_get_sound_focus(struct sound_focus_param *soundFocusData);
 int voc_get_source_tracking(struct source_tracking_param *sourceTrackingData);
+int voc_set_afe_sidetone(uint32_t session_id, bool sidetone_enable);
+bool voc_get_afe_sidetone(void);
 #endif
diff --git a/sound/soc/msm/qdsp6v2/rtac.c b/sound/soc/msm/qdsp6v2/rtac.c
index 923908f..cd02501 100644
--- a/sound/soc/msm/qdsp6v2/rtac.c
+++ b/sound/soc/msm/qdsp6v2/rtac.c
@@ -400,6 +400,24 @@
 	return;
 }
 
+void rtac_update_afe_topology(u32 port_id)
+{
+	u32 i = 0;
+
+	mutex_lock(&rtac_adm_mutex);
+	for (i = 0; i < rtac_adm_data.num_of_dev; i++) {
+		if (rtac_adm_data.device[i].afe_port == port_id) {
+			rtac_adm_data.device[i].afe_topology =
+						afe_get_topology(port_id);
+			pr_debug("%s: port_id = 0x%x topology_id = 0x%x copp_id = %d\n",
+				 __func__, port_id,
+				 rtac_adm_data.device[i].afe_topology,
+				 rtac_adm_data.device[i].copp);
+		}
+	}
+	mutex_unlock(&rtac_adm_mutex);
+}
+
 void rtac_add_adm_device(u32 port_id, u32 copp_id, u32 path_id, u32 popp_id,
 			 u32 app_type, u32 acdb_id)
 {
@@ -1496,7 +1514,7 @@
 		goto err;
 	}
 
-	if (opcode == VOICE_CMD_SET_PARAM) {
+	if (opcode == VSS_ICOMMON_CMD_SET_PARAM_V2) {
 		/* set payload size to in-band payload */
 		/* set data size to actual out of band payload size */
 		data_size = payload_size - 4 * sizeof(u32);
@@ -1549,7 +1567,9 @@
 	voice_params.dest_svc = 0;
 	voice_params.dest_domain = APR_DOMAIN_MODEM;
 	voice_params.dest_port = (u16)dest_port;
-	voice_params.token = 0;
+	voice_params.token = (opcode == VSS_ICOMMON_CMD_SET_PARAM_V2) ?
+				     VOC_RTAC_SET_PARAM_TOKEN :
+				     0;
 	voice_params.opcode = opcode;
 
 	/* fill for out-of-band */
@@ -1594,7 +1614,7 @@
 		goto err;
 	}
 
-	if (opcode == VOICE_CMD_GET_PARAM) {
+	if (opcode == VSS_ICOMMON_CMD_GET_PARAM_V2) {
 		bytes_returned = ((u32 *)rtac_cal[VOICE_RTAC_CAL].cal_data.
 			kvaddr)[2] + 3 * sizeof(u32);
 
@@ -1686,20 +1706,20 @@
 			ASM_STREAM_CMD_SET_PP_PARAMS_V2);
 		break;
 	case AUDIO_GET_RTAC_CVS_CAL:
-		result = send_voice_apr(RTAC_CVS, (void *)arg,
-			VOICE_CMD_GET_PARAM);
+		result = send_voice_apr(RTAC_CVS, (void *) arg,
+					VSS_ICOMMON_CMD_GET_PARAM_V2);
 		break;
 	case AUDIO_SET_RTAC_CVS_CAL:
-		result = send_voice_apr(RTAC_CVS, (void *)arg,
-			VOICE_CMD_SET_PARAM);
+		result = send_voice_apr(RTAC_CVS, (void *) arg,
+					VSS_ICOMMON_CMD_SET_PARAM_V2);
 		break;
 	case AUDIO_GET_RTAC_CVP_CAL:
-		result = send_voice_apr(RTAC_CVP, (void *)arg,
-			VOICE_CMD_GET_PARAM);
+		result = send_voice_apr(RTAC_CVP, (void *) arg,
+					VSS_ICOMMON_CMD_GET_PARAM_V2);
 		break;
 	case AUDIO_SET_RTAC_CVP_CAL:
-		result = send_voice_apr(RTAC_CVP, (void *)arg,
-			VOICE_CMD_SET_PARAM);
+		result = send_voice_apr(RTAC_CVP, (void *) arg,
+					VSS_ICOMMON_CMD_SET_PARAM_V2);
 		break;
 	case AUDIO_GET_RTAC_AFE_CAL:
 		result = send_rtac_afe_apr((void *)arg,
diff --git a/sound/soc/msm/msmfalcon-common.c b/sound/soc/msm/sdm660-common.c
similarity index 83%
rename from sound/soc/msm/msmfalcon-common.c
rename to sound/soc/msm/sdm660-common.c
index fba9c28..f1fbce3 100644
--- a/sound/soc/msm/msmfalcon-common.c
+++ b/sound/soc/msm/sdm660-common.c
@@ -16,14 +16,16 @@
 #include <sound/pcm_params.h>
 #include <sound/q6afe-v2.h>
 #include "qdsp6v2/msm-pcm-routing-v2.h"
-#include "msm-audio-pinctrl.h"
-#include "msmfalcon-common.h"
-#include "msmfalcon-internal.h"
-#include "msmfalcon-external.h"
-#include "../codecs/msm8x16/msm8x16-wcd.h"
+#include "sdm660-common.h"
+#include "sdm660-internal.h"
+#include "sdm660-external.h"
+#include "../codecs/sdm660_cdc/msm-analog-cdc.h"
 #include "../codecs/wsa881x.h"
 
-#define DRV_NAME "msmfalcon-asoc-snd"
+#define DRV_NAME "sdm660-asoc-snd"
+
+#define MSM_INT_DIGITAL_CODEC "msm-dig-codec"
+#define PMIC_INT_ANALOG_CODEC "analog-codec"
 
 #define DEV_NAME_STR_LEN  32
 #define DEFAULT_MCLK_RATE 9600000
@@ -34,6 +36,11 @@
 	u32 channels;
 };
 
+enum {
+	DP_RX_IDX,
+	EXT_DISP_RX_IDX_MAX,
+};
+
 /* TDM default config */
 static struct dev_config tdm_rx_cfg[TDM_INTERFACE_MAX][TDM_PORT_MAX] = {
 	{ /* PRI TDM */
@@ -122,6 +129,10 @@
 	}
 };
 
+/* Default configuration of external display BE */
+static struct dev_config ext_disp_rx_cfg[] = {
+	[DP_RX_IDX] =   {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
+};
 static struct dev_config usb_rx_cfg = {
 	.sample_rate = SAMPLING_RATE_48KHZ,
 	.bit_format = SNDRV_PCM_FORMAT_S16_LE,
@@ -158,20 +169,18 @@
 	PCM_I2S_SEL_MAX,
 };
 
-struct mi2s_aux_pcm_common_conf {
-	struct mutex lock;
-	void *pcm_i2s_sel_vt_addr;
-};
-
 struct mi2s_conf {
 	struct mutex lock;
 	u32 ref_cnt;
 	u32 msm_is_mi2s_master;
+	u32 msm_is_ext_mclk;
 };
 
-struct auxpcm_conf {
-	struct mutex lock;
-	u32 ref_cnt;
+static u32 mi2s_ebit_clk[MI2S_MAX] = {
+	Q6AFE_LPASS_CLK_ID_PRI_MI2S_EBIT,
+	Q6AFE_LPASS_CLK_ID_SEC_MI2S_EBIT,
+	Q6AFE_LPASS_CLK_ID_TER_MI2S_EBIT,
+	Q6AFE_LPASS_CLK_ID_QUAD_MI2S_EBIT
 };
 
 struct msm_wsa881x_dev_info {
@@ -189,7 +198,7 @@
 	.detect_extn_cable = true,
 	.mono_stero_detection = false,
 	.swap_gnd_mic = NULL,
-	.hs_ext_micbias = false,
+	.hs_ext_micbias = true,
 	.key_code[0] = KEY_MEDIA,
 	.key_code[1] = KEY_VOICECOMMAND,
 	.key_code[2] = KEY_VOLUMEUP,
@@ -251,6 +260,8 @@
 					   "Eight"};
 static char const *bit_format_text[] = {"S16_LE", "S24_LE", "S24_3LE",
 					  "S32_LE"};
+static char const *mi2s_format_text[] = {"S16_LE", "S24_LE", "S24_3LE",
+					  "S32_LE"};
 static char const *tdm_ch_text[] = {"One", "Two", "Three", "Four",
 				    "Five", "Six", "Seven", "Eight"};
 static char const *tdm_bit_format_text[] = {"S16_LE", "S24_LE", "S32_LE"};
@@ -264,7 +275,11 @@
 					"KHZ_16", "KHZ_22P05",
 					"KHZ_32", "KHZ_44P1", "KHZ_48",
 					"KHZ_96", "KHZ_192", "KHZ_384"};
+static char const *ext_disp_bit_format_text[] = {"S16_LE", "S24_LE"};
+static char const *ext_disp_sample_rate_text[] = {"KHZ_48", "KHZ_96",
+						  "KHZ_192"};
 
+static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_rx_chs, ch_text);
 static SOC_ENUM_SINGLE_EXT_DECL(proxy_rx_chs, ch_text);
 static SOC_ENUM_SINGLE_EXT_DECL(prim_aux_pcm_rx_sample_rate, auxpcm_rate_text);
 static SOC_ENUM_SINGLE_EXT_DECL(sec_aux_pcm_rx_sample_rate, auxpcm_rate_text);
@@ -282,6 +297,14 @@
 static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_tx_sample_rate, mi2s_rate_text);
 static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_tx_sample_rate, mi2s_rate_text);
 static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_tx_sample_rate, mi2s_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_rx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_rx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_rx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_rx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_tx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_tx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_tx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_tx_format, mi2s_format_text);
 static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_rx_chs, mi2s_ch_text);
 static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_tx_chs, mi2s_ch_text);
 static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_rx_chs, mi2s_ch_text);
@@ -294,8 +317,11 @@
 static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_chs, usb_ch_text);
 static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_format, bit_format_text);
 static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_rx_format, ext_disp_bit_format_text);
 static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_sample_rate, usb_sample_rate_text);
 static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_sample_rate, usb_sample_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_rx_sample_rate,
+				ext_disp_sample_rate_text);
 static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_chs, tdm_ch_text);
 static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_format, tdm_bit_format_text);
 static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_sample_rate, tdm_sample_rate_text);
@@ -338,9 +364,42 @@
 	}
 };
 
-static struct mi2s_aux_pcm_common_conf mi2s_auxpcm_conf[PCM_I2S_SEL_MAX];
+static struct afe_clk_set mi2s_mclk[MI2S_MAX] = {
+	{
+		AFE_API_VERSION_I2S_CONFIG,
+		Q6AFE_LPASS_CLK_ID_MCLK_3,
+		Q6AFE_LPASS_OSR_CLK_9_P600_MHZ,
+		Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO,
+		Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+		0,
+	},
+	{
+		AFE_API_VERSION_I2S_CONFIG,
+		Q6AFE_LPASS_CLK_ID_MCLK_4,
+		Q6AFE_LPASS_OSR_CLK_9_P600_MHZ,
+		Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO,
+		Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+		0,
+	},
+	{
+		AFE_API_VERSION_I2S_CONFIG,
+		Q6AFE_LPASS_CLK_ID_MCLK_1,
+		Q6AFE_LPASS_OSR_CLK_9_P600_MHZ,
+		Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO,
+		Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+		0,
+	},
+	{
+		AFE_API_VERSION_I2S_CONFIG,
+		Q6AFE_LPASS_CLK_ID_MCLK_2,
+		Q6AFE_LPASS_OSR_CLK_9_P600_MHZ,
+		Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO,
+		Q6AFE_LPASS_CLK_ROOT_DEFAULT,
+		0,
+	}
+};
+
 static struct mi2s_conf mi2s_intf_conf[MI2S_MAX];
-static struct auxpcm_conf auxpcm_intf_conf[AUX_PCM_MAX];
 
 static int proxy_rx_ch_get(struct snd_kcontrol *kcontrol,
 			       struct snd_ctl_elem_value *ucontrol)
@@ -634,6 +693,54 @@
 	return value;
 }
 
+static int mi2s_get_format(int value)
+{
+	int format = 0;
+
+	switch (value) {
+	case 0:
+		format = SNDRV_PCM_FORMAT_S16_LE;
+		break;
+	case 1:
+		format = SNDRV_PCM_FORMAT_S24_LE;
+		break;
+	case 2:
+		format = SNDRV_PCM_FORMAT_S24_3LE;
+		break;
+	case 3:
+		format = SNDRV_PCM_FORMAT_S32_LE;
+		break;
+	default:
+		format = SNDRV_PCM_FORMAT_S16_LE;
+		break;
+	}
+	return format;
+}
+
+static int mi2s_get_format_value(int format)
+{
+	int value = 0;
+
+	switch (format) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		value = 0;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		value = 1;
+		break;
+	case SNDRV_PCM_FORMAT_S24_3LE:
+		value = 2;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		value = 3;
+		break;
+	default:
+		value = 0;
+		break;
+	}
+	return value;
+}
+
 static int tdm_rx_format_get(struct snd_kcontrol *kcontrol,
 			     struct snd_ctl_elem_value *ucontrol)
 {
@@ -1099,6 +1206,78 @@
 	return 0;
 }
 
+static int mi2s_tx_format_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	mi2s_tx_cfg[idx].bit_format =
+		mi2s_get_format(ucontrol->value.enumerated.item[0]);
+
+	pr_debug("%s: idx[%d] _tx_format = %d, item = %d\n", __func__,
+		  idx, mi2s_tx_cfg[idx].bit_format,
+		  ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int mi2s_tx_format_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	ucontrol->value.enumerated.item[0] =
+		mi2s_get_format_value(mi2s_tx_cfg[idx].bit_format);
+
+	pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+		idx, mi2s_tx_cfg[idx].bit_format,
+		ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int mi2s_rx_format_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	mi2s_rx_cfg[idx].bit_format =
+		mi2s_get_format(ucontrol->value.enumerated.item[0]);
+
+	pr_debug("%s: idx[%d] _rx_format = %d, item = %d\n", __func__,
+		  idx, mi2s_rx_cfg[idx].bit_format,
+		  ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int mi2s_rx_format_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	ucontrol->value.enumerated.item[0] =
+		mi2s_get_format_value(mi2s_rx_cfg[idx].bit_format);
+
+	pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+		idx, mi2s_rx_cfg[idx].bit_format,
+		ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
 static int msm_mi2s_rx_ch_get(struct snd_kcontrol *kcontrol,
 			      struct snd_ctl_elem_value *ucontrol)
 {
@@ -1481,6 +1660,162 @@
 	return rc;
 }
 
+static int ext_disp_get_port_idx(struct snd_kcontrol *kcontrol)
+{
+	int idx;
+
+	if (strnstr(kcontrol->id.name, "Display Port RX",
+			 sizeof("Display Port RX")))
+		idx = DP_RX_IDX;
+	else {
+		pr_err("%s: unsupported BE: %s",
+			__func__, kcontrol->id.name);
+		idx = -EINVAL;
+	}
+
+	return idx;
+}
+
+static int ext_disp_rx_format_get(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = ext_disp_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	switch (ext_disp_rx_cfg[idx].bit_format) {
+	case SNDRV_PCM_FORMAT_S24_LE:
+		ucontrol->value.integer.value[0] = 1;
+		break;
+
+	case SNDRV_PCM_FORMAT_S16_LE:
+	default:
+		ucontrol->value.integer.value[0] = 0;
+		break;
+	}
+
+	pr_debug("%s: ext_disp_rx[%d].format = %d, ucontrol value = %ld\n",
+		 __func__, idx, ext_disp_rx_cfg[idx].bit_format,
+		 ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int ext_disp_rx_format_put(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = ext_disp_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	switch (ucontrol->value.integer.value[0]) {
+	case 1:
+		ext_disp_rx_cfg[idx].bit_format = SNDRV_PCM_FORMAT_S24_LE;
+		break;
+	case 0:
+	default:
+		ext_disp_rx_cfg[idx].bit_format = SNDRV_PCM_FORMAT_S16_LE;
+		break;
+	}
+	pr_debug("%s: ext_disp_rx[%d].format = %d, ucontrol value = %ld\n",
+		 __func__, idx, ext_disp_rx_cfg[idx].bit_format,
+		 ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static int ext_disp_rx_ch_get(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = ext_disp_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	ucontrol->value.integer.value[0] =
+			ext_disp_rx_cfg[idx].channels - 2;
+
+	pr_debug("%s: ext_disp_rx[%d].ch = %d\n", __func__,
+		 idx, ext_disp_rx_cfg[idx].channels);
+
+	return 0;
+}
+
+static int ext_disp_rx_ch_put(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = ext_disp_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	ext_disp_rx_cfg[idx].channels =
+			ucontrol->value.integer.value[0] + 2;
+
+	pr_debug("%s: ext_disp_rx[%d].ch = %d\n", __func__,
+		 idx, ext_disp_rx_cfg[idx].channels);
+	return 1;
+}
+
+static int ext_disp_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_value *ucontrol)
+{
+	int sample_rate_val;
+	int idx = ext_disp_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	switch (ext_disp_rx_cfg[idx].sample_rate) {
+	case SAMPLING_RATE_192KHZ:
+		sample_rate_val = 2;
+		break;
+
+	case SAMPLING_RATE_96KHZ:
+		sample_rate_val = 1;
+		break;
+
+	case SAMPLING_RATE_48KHZ:
+	default:
+		sample_rate_val = 0;
+		break;
+	}
+
+	ucontrol->value.integer.value[0] = sample_rate_val;
+	pr_debug("%s: ext_disp_rx[%d].sample_rate = %d\n", __func__,
+		 idx, ext_disp_rx_cfg[idx].sample_rate);
+
+	return 0;
+}
+
+static int ext_disp_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = ext_disp_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	switch (ucontrol->value.integer.value[0]) {
+	case 2:
+		ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_192KHZ;
+		break;
+	case 1:
+		ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_96KHZ;
+		break;
+	case 0:
+	default:
+		ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_48KHZ;
+		break;
+	}
+
+	pr_debug("%s: control value = %ld, ext_disp_rx[%d].sample_rate = %d\n",
+		 __func__, ucontrol->value.integer.value[0], idx,
+		 ext_disp_rx_cfg[idx].sample_rate);
+	return 0;
+}
+
 const struct snd_kcontrol_new msm_common_snd_controls[] = {
 	SOC_ENUM_EXT("PROXY_RX Channels", proxy_rx_chs,
 			proxy_rx_ch_get, proxy_rx_ch_put),
@@ -1532,6 +1867,30 @@
 	SOC_ENUM_EXT("QUAT_MI2S_TX SampleRate", quat_mi2s_tx_sample_rate,
 			mi2s_tx_sample_rate_get,
 			mi2s_tx_sample_rate_put),
+	SOC_ENUM_EXT("PRIM_MI2S_RX Format", prim_mi2s_rx_format,
+			mi2s_rx_format_get,
+			mi2s_rx_format_put),
+	SOC_ENUM_EXT("SEC_MI2S_RX Format", sec_mi2s_rx_format,
+			mi2s_rx_format_get,
+			mi2s_rx_format_put),
+	SOC_ENUM_EXT("TERT_MI2S_RX Format", tert_mi2s_rx_format,
+			mi2s_rx_format_get,
+			mi2s_rx_format_put),
+	SOC_ENUM_EXT("QUAT_MI2S_RX Format", quat_mi2s_rx_format,
+			mi2s_rx_format_get,
+			mi2s_rx_format_put),
+	SOC_ENUM_EXT("PRIM_MI2S_TX Format", prim_mi2s_tx_format,
+			mi2s_tx_format_get,
+			mi2s_tx_format_put),
+	SOC_ENUM_EXT("SEC_MI2S_TX Format", sec_mi2s_tx_format,
+			mi2s_tx_format_get,
+			mi2s_tx_format_put),
+	SOC_ENUM_EXT("TERT_MI2S_TX Format", tert_mi2s_tx_format,
+			mi2s_tx_format_get,
+			mi2s_tx_format_put),
+	SOC_ENUM_EXT("QUAT_MI2S_TX Format", quat_mi2s_tx_format,
+			mi2s_tx_format_get,
+			mi2s_tx_format_put),
 	SOC_ENUM_EXT("PRIM_MI2S_RX Channels", prim_mi2s_rx_chs,
 			msm_mi2s_rx_ch_get, msm_mi2s_rx_ch_put),
 	SOC_ENUM_EXT("PRIM_MI2S_TX Channels", prim_mi2s_tx_chs,
@@ -1552,16 +1911,23 @@
 			usb_audio_rx_ch_get, usb_audio_rx_ch_put),
 	SOC_ENUM_EXT("USB_AUDIO_TX Channels", usb_tx_chs,
 			usb_audio_tx_ch_get, usb_audio_tx_ch_put),
+	SOC_ENUM_EXT("Display Port RX Channels", ext_disp_rx_chs,
+			ext_disp_rx_ch_get, ext_disp_rx_ch_put),
 	SOC_ENUM_EXT("USB_AUDIO_RX Format", usb_rx_format,
 			usb_audio_rx_format_get, usb_audio_rx_format_put),
 	SOC_ENUM_EXT("USB_AUDIO_TX Format", usb_tx_format,
 			usb_audio_tx_format_get, usb_audio_tx_format_put),
+	SOC_ENUM_EXT("Display Port RX Bit Format", ext_disp_rx_format,
+			ext_disp_rx_format_get, ext_disp_rx_format_put),
 	SOC_ENUM_EXT("USB_AUDIO_RX SampleRate", usb_rx_sample_rate,
 			usb_audio_rx_sample_rate_get,
 			usb_audio_rx_sample_rate_put),
 	SOC_ENUM_EXT("USB_AUDIO_TX SampleRate", usb_tx_sample_rate,
 			usb_audio_tx_sample_rate_get,
 			usb_audio_tx_sample_rate_put),
+	SOC_ENUM_EXT("Display Port RX SampleRate", ext_disp_rx_sample_rate,
+			ext_disp_rx_sample_rate_get,
+			ext_disp_rx_sample_rate_put),
 	SOC_ENUM_EXT("PRI_TDM_RX_0 SampleRate", tdm_rx_sample_rate,
 			tdm_rx_sample_rate_get,
 			tdm_rx_sample_rate_put),
@@ -1636,6 +2002,17 @@
 			tdm_tx_ch_put),
 };
 
+/**
+ * msm_common_snd_controls_size - to return controls size
+ *
+ * Return: returns size of common controls array
+ */
+int msm_common_snd_controls_size(void)
+{
+	return ARRAY_SIZE(msm_common_snd_controls);
+}
+EXPORT_SYMBOL(msm_common_snd_controls_size);
+
 static inline int param_is_mask(int p)
 {
 	return (p >= SNDRV_PCM_HW_PARAM_FIRST_MASK) &&
@@ -1661,6 +2038,23 @@
 	}
 }
 
+static int msm_ext_disp_get_idx_from_beid(int32_t be_id)
+{
+	int idx;
+
+	switch (be_id) {
+	case MSM_BACKEND_DAI_DISPLAY_PORT_RX:
+		idx = DP_RX_IDX;
+		break;
+	default:
+		pr_err("%s: Incorrect ext_disp be_id %d\n", __func__, be_id);
+		idx = -EINVAL;
+		break;
+	}
+
+	return idx;
+}
+
 /**
  * msm_common_be_hw_params_fixup - updates settings of ALSA BE hw params.
  *
@@ -1678,6 +2072,7 @@
 	struct snd_interval *channels = hw_param_interval(params,
 					SNDRV_PCM_HW_PARAM_CHANNELS);
 	int rc = 0;
+	int idx;
 
 	pr_debug("%s: format = %d, rate = %d\n",
 		  __func__, params_format(params), params_rate(params));
@@ -1697,6 +2092,21 @@
 		channels->min = channels->max = usb_tx_cfg.channels;
 		break;
 
+	case MSM_BACKEND_DAI_DISPLAY_PORT_RX:
+		idx = msm_ext_disp_get_idx_from_beid(dai_link->be_id);
+		if (IS_ERR_VALUE(idx)) {
+			pr_err("%s: Incorrect ext disp idx %d\n",
+			       __func__, idx);
+			rc = idx;
+			break;
+		}
+
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+				ext_disp_rx_cfg[idx].bit_format);
+		rate->min = rate->max = ext_disp_rx_cfg[idx].sample_rate;
+		channels->min = channels->max = ext_disp_rx_cfg[idx].channels;
+		break;
+
 	case MSM_BACKEND_DAI_AFE_PCM_RX:
 		channels->min = channels->max = proxy_rx_cfg.channels;
 		rate->min = rate->max = SAMPLING_RATE_48KHZ;
@@ -1826,48 +2236,64 @@
 		rate->min = rate->max = mi2s_rx_cfg[PRIM_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_rx_cfg[PRIM_MI2S].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       mi2s_rx_cfg[PRIM_MI2S].bit_format);
 		break;
 
 	case MSM_BACKEND_DAI_PRI_MI2S_TX:
 		rate->min = rate->max = mi2s_tx_cfg[PRIM_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_tx_cfg[PRIM_MI2S].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       mi2s_tx_cfg[PRIM_MI2S].bit_format);
 		break;
 
 	case MSM_BACKEND_DAI_SECONDARY_MI2S_RX:
 		rate->min = rate->max = mi2s_rx_cfg[SEC_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_rx_cfg[SEC_MI2S].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       mi2s_rx_cfg[SEC_MI2S].bit_format);
 		break;
 
 	case MSM_BACKEND_DAI_SECONDARY_MI2S_TX:
 		rate->min = rate->max = mi2s_tx_cfg[SEC_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_tx_cfg[SEC_MI2S].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       mi2s_tx_cfg[SEC_MI2S].bit_format);
 		break;
 
 	case MSM_BACKEND_DAI_TERTIARY_MI2S_RX:
 		rate->min = rate->max = mi2s_rx_cfg[TERT_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_rx_cfg[TERT_MI2S].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       mi2s_rx_cfg[TERT_MI2S].bit_format);
 		break;
 
 	case MSM_BACKEND_DAI_TERTIARY_MI2S_TX:
 		rate->min = rate->max = mi2s_tx_cfg[TERT_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_tx_cfg[TERT_MI2S].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       mi2s_tx_cfg[TERT_MI2S].bit_format);
 		break;
 
 	case MSM_BACKEND_DAI_QUATERNARY_MI2S_RX:
 		rate->min = rate->max = mi2s_rx_cfg[QUAT_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_rx_cfg[QUAT_MI2S].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       mi2s_rx_cfg[QUAT_MI2S].bit_format);
 		break;
 
 	case MSM_BACKEND_DAI_QUATERNARY_MI2S_TX:
 		rate->min = rate->max = mi2s_tx_cfg[QUAT_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_tx_cfg[QUAT_MI2S].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       mi2s_tx_cfg[QUAT_MI2S].bit_format);
 		break;
 
 	default:
@@ -1887,46 +2313,14 @@
  */
 int msm_aux_pcm_snd_startup(struct snd_pcm_substream *substream)
 {
-	int ret = 0;
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-	int index = cpu_dai->id - 1;
-	return ret = 0;
 
 	dev_dbg(rtd->card->dev,
 		"%s: substream = %s  stream = %d, dai name %s, dai ID %d\n",
 		__func__, substream->name, substream->stream,
-		cpu_dai->name, cpu_dai->id);
+		rtd->cpu_dai->name, rtd->cpu_dai->id);
 
-	if (index < PRIM_AUX_PCM || index > QUAT_AUX_PCM) {
-		ret = -EINVAL;
-		dev_err(rtd->card->dev,
-			"%s: CPU DAI id (%d) out of range\n",
-			__func__, cpu_dai->id);
-		goto done;
-	}
-
-	mutex_lock(&auxpcm_intf_conf[index].lock);
-	if (++auxpcm_intf_conf[index].ref_cnt == 1) {
-		if (mi2s_auxpcm_conf[index].pcm_i2s_sel_vt_addr != NULL) {
-			mutex_lock(&mi2s_auxpcm_conf[index].lock);
-			iowrite32(1,
-				mi2s_auxpcm_conf[index].pcm_i2s_sel_vt_addr);
-			mutex_unlock(&mi2s_auxpcm_conf[index].lock);
-		} else {
-			dev_err(rtd->card->dev,
-				"%s lpaif_tert_muxsel_virt_addr is NULL\n",
-				__func__);
-			ret = -EINVAL;
-		}
-	}
-	if (ret < 0)
-		auxpcm_intf_conf[index].ref_cnt--;
-
-	mutex_unlock(&auxpcm_intf_conf[index].lock);
-
-done:
-	return ret;
+	return 0;
 }
 EXPORT_SYMBOL(msm_aux_pcm_snd_startup);
 
@@ -1938,36 +2332,12 @@
 void msm_aux_pcm_snd_shutdown(struct snd_pcm_substream *substream)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	int index = rtd->cpu_dai->id - 1;
 
 	dev_dbg(rtd->card->dev,
 		"%s: substream = %s  stream = %d, dai name %s, dai ID %d\n",
 		__func__,
 		substream->name, substream->stream,
 		rtd->cpu_dai->name, rtd->cpu_dai->id);
-
-	if (index < PRIM_AUX_PCM || index > QUAT_AUX_PCM) {
-		dev_err(rtd->card->dev,
-			"%s: CPU DAI id (%d) out of range\n",
-			__func__, rtd->cpu_dai->id);
-		return;
-	}
-
-	mutex_lock(&auxpcm_intf_conf[index].lock);
-	if (--auxpcm_intf_conf[index].ref_cnt == 0) {
-		if (mi2s_auxpcm_conf[index].pcm_i2s_sel_vt_addr != NULL) {
-			mutex_lock(&mi2s_auxpcm_conf[index].lock);
-			iowrite32(0,
-				mi2s_auxpcm_conf[index].pcm_i2s_sel_vt_addr);
-			mutex_unlock(&mi2s_auxpcm_conf[index].lock);
-		} else {
-			dev_err(rtd->card->dev,
-				"%s lpaif_tert_muxsel_virt_addr is NULL\n",
-				__func__);
-			auxpcm_intf_conf[index].ref_cnt++;
-		}
-	}
-	mutex_unlock(&auxpcm_intf_conf[index].lock);
 }
 EXPORT_SYMBOL(msm_aux_pcm_snd_shutdown);
 
@@ -2013,6 +2383,7 @@
 	u32 bit_per_sample;
 
 	switch (bit_format) {
+	case SNDRV_PCM_FORMAT_S32_LE:
 	case SNDRV_PCM_FORMAT_S24_3LE:
 	case SNDRV_PCM_FORMAT_S24_LE:
 		bit_per_sample = 32;
@@ -2093,6 +2464,7 @@
 	int ret = 0;
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	int port_id = msm_get_port_id(rtd->dai_link->be_id);
 	int index = cpu_dai->id;
 	unsigned int fmt = SND_SOC_DAIFMT_CBS_CFS;
 
@@ -2115,6 +2487,11 @@
 	 */
 	mutex_lock(&mi2s_intf_conf[index].lock);
 	if (++mi2s_intf_conf[index].ref_cnt == 1) {
+		/* Check if msm needs to provide the clock to the interface */
+		if (!mi2s_intf_conf[index].msm_is_mi2s_master) {
+			mi2s_clk[index].clk_id = mi2s_ebit_clk[index];
+			fmt = SND_SOC_DAIFMT_CBM_CFM;
+		}
 		ret = msm_mi2s_set_sclk(substream, true);
 		if (ret < 0) {
 			dev_err(rtd->card->dev,
@@ -2122,21 +2499,6 @@
 				__func__, ret);
 			goto clean_up;
 		}
-		if (mi2s_auxpcm_conf[index].pcm_i2s_sel_vt_addr != NULL) {
-			mutex_lock(&mi2s_auxpcm_conf[index].lock);
-			iowrite32(0,
-				mi2s_auxpcm_conf[index].pcm_i2s_sel_vt_addr);
-			mutex_unlock(&mi2s_auxpcm_conf[index].lock);
-		} else {
-			dev_err(rtd->card->dev,
-				"%s lpaif_muxsel_virt_addr is NULL for dai %d\n",
-				__func__, index);
-			ret = -EINVAL;
-			goto clk_off;
-		}
-		/* Check if msm needs to provide the clock to the interface */
-		if (!mi2s_intf_conf[index].msm_is_mi2s_master)
-			fmt = SND_SOC_DAIFMT_CBM_CFM;
 		ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
 		if (ret < 0) {
 			dev_err(rtd->card->dev,
@@ -2144,7 +2506,21 @@
 				__func__, index, ret);
 			goto clk_off;
 		}
+		if (mi2s_intf_conf[index].msm_is_ext_mclk) {
+			mi2s_mclk[index].enable = 1;
+			pr_debug("%s: Enabling mclk, clk_freq_in_hz = %u\n",
+				__func__, mi2s_mclk[index].clk_freq_in_hz);
+			ret = afe_set_lpass_clock_v2(port_id,
+						     &mi2s_mclk[index]);
+			if (ret < 0) {
+				pr_err("%s: afe lpass mclk failed, err:%d\n",
+					__func__, ret);
+				goto clk_off;
+			}
+		}
 	}
+	mutex_unlock(&mi2s_intf_conf[index].lock);
+	return 0;
 clk_off:
 	if (ret < 0)
 		msm_mi2s_set_sclk(substream, false);
@@ -2166,6 +2542,7 @@
 {
 	int ret;
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	int port_id = msm_get_port_id(rtd->dai_link->be_id);
 	int index = rtd->cpu_dai->id;
 
 	pr_debug("%s(): substream = %s  stream = %d\n", __func__,
@@ -2183,6 +2560,17 @@
 				__func__, index, ret);
 			mi2s_intf_conf[index].ref_cnt++;
 		}
+		if (mi2s_intf_conf[index].msm_is_ext_mclk) {
+			mi2s_mclk[index].enable = 0;
+			pr_debug("%s: Disabling mclk, clk_freq_in_hz = %u\n",
+				 __func__, mi2s_mclk[index].clk_freq_in_hz);
+			ret = afe_set_lpass_clock_v2(port_id,
+						     &mi2s_mclk[index]);
+			if (ret < 0) {
+				pr_err("%s: mclk disable failed for MCLK (%d); ret=%d\n",
+					__func__, index, ret);
+			}
+		}
 	}
 	mutex_unlock(&mi2s_intf_conf[index].lock);
 }
@@ -2233,6 +2621,7 @@
 }
 
 static int msm_populate_dai_link_component_of_node(
+		struct msm_asoc_mach_data *pdata,
 		struct snd_soc_card *card)
 {
 	int i, index, ret = 0;
@@ -2312,6 +2701,31 @@
 			dai_link[i].codec_of_node = phandle;
 			dai_link[i].codec_name = NULL;
 		}
+		if (pdata->snd_card_val == INT_SND_CARD) {
+			if ((dai_link[i].be_id ==
+					MSM_BACKEND_DAI_INT0_MI2S_RX) ||
+			    (dai_link[i].be_id ==
+					MSM_BACKEND_DAI_INT1_MI2S_RX) ||
+			    (dai_link[i].be_id ==
+					MSM_BACKEND_DAI_INT2_MI2S_TX) ||
+			    (dai_link[i].be_id ==
+					MSM_BACKEND_DAI_INT3_MI2S_TX)) {
+				index = of_property_match_string(cdev->of_node,
+							"asoc-codec-names",
+							MSM_INT_DIGITAL_CODEC);
+				phandle = of_parse_phandle(cdev->of_node,
+							   "asoc-codec",
+							   index);
+				dai_link[i].codecs[DIG_CDC].of_node = phandle;
+				index = of_property_match_string(cdev->of_node,
+							"asoc-codec-names",
+							PMIC_INT_ANALOG_CODEC);
+				phandle = of_parse_phandle(cdev->of_node,
+							   "asoc-codec",
+							   index);
+				dai_link[i].codecs[ANA_CDC].of_node = phandle;
+			}
+		}
 	}
 err:
 	return ret;
@@ -2570,44 +2984,19 @@
 
 static void i2s_auxpcm_init(struct platform_device *pdev)
 {
-	struct resource *muxsel;
 	int count;
 	u32 mi2s_master_slave[MI2S_MAX];
+	u32 mi2s_ext_mclk[MI2S_MAX];
 	int ret;
-	char *str[PCM_I2S_SEL_MAX] = {
-		"lpaif_pri_mode_muxsel",
-		"lpaif_sec_mode_muxsel",
-		"lpaif_tert_mode_muxsel",
-		"lpaif_quat_mode_muxsel"
-	};
 
 	for (count = 0; count < MI2S_MAX; count++) {
 		mutex_init(&mi2s_intf_conf[count].lock);
 		mi2s_intf_conf[count].ref_cnt = 0;
 	}
 
-	for (count = 0; count < AUX_PCM_MAX; count++) {
-		mutex_init(&auxpcm_intf_conf[count].lock);
-		auxpcm_intf_conf[count].ref_cnt = 0;
-	}
-
-	for (count = 0; count < PCM_I2S_SEL_MAX; count++) {
-		mutex_init(&mi2s_auxpcm_conf[count].lock);
-		mi2s_auxpcm_conf[count].pcm_i2s_sel_vt_addr = NULL;
-	}
-
-	for (count = 0; count < PCM_I2S_SEL_MAX; count++) {
-		muxsel = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-						      str[count]);
-		if (muxsel) {
-			mi2s_auxpcm_conf[count].pcm_i2s_sel_vt_addr
-				= ioremap(muxsel->start, resource_size(muxsel));
-		}
-	}
-
 	ret = of_property_read_u32_array(pdev->dev.of_node,
-			"qcom,msm-mi2s-master",
-			mi2s_master_slave, MI2S_MAX);
+					 "qcom,msm-mi2s-master",
+					 mi2s_master_slave, MI2S_MAX);
 	if (ret) {
 		dev_dbg(&pdev->dev, "%s: no qcom,msm-mi2s-master in DT node\n",
 			__func__);
@@ -2617,25 +3006,26 @@
 				mi2s_master_slave[count];
 		}
 	}
+
+	ret = of_property_read_u32_array(pdev->dev.of_node,
+					 "qcom,msm-mi2s-ext-mclk",
+					 mi2s_ext_mclk, MI2S_MAX);
+	if (ret) {
+		dev_dbg(&pdev->dev, "%s: no qcom,msm-mi2s-ext-mclk in DT node\n",
+			__func__);
+	} else {
+		for (count = 0; count < MI2S_MAX; count++)
+			mi2s_intf_conf[count].msm_is_ext_mclk =
+				mi2s_ext_mclk[count];
+	}
 }
 
-static void i2s_auxpcm_deinit(void)
-{
-	int count;
-
-	for (count = 0; count < PCM_I2S_SEL_MAX; count++)
-		if (mi2s_auxpcm_conf[count].pcm_i2s_sel_vt_addr !=
-			NULL)
-			iounmap(
-			mi2s_auxpcm_conf[count].pcm_i2s_sel_vt_addr);
-}
-
-static const struct of_device_id msmfalcon_asoc_machine_of_match[]  = {
-	{ .compatible = "qcom,msmfalcon-asoc-snd",
+static const struct of_device_id sdm660_asoc_machine_of_match[]  = {
+	{ .compatible = "qcom,sdm660-asoc-snd",
 	  .data = "internal_codec"},
-	{ .compatible = "qcom,msmfalcon-asoc-snd-tasha",
+	{ .compatible = "qcom,sdm660-asoc-snd-tasha",
 	  .data = "tasha_codec"},
-	{ .compatible = "qcom,msmfalcon-asoc-snd-tavil",
+	{ .compatible = "qcom,sdm660-asoc-snd-tavil",
 	  .data = "tavil_codec"},
 	{},
 };
@@ -2654,7 +3044,7 @@
 	if (!pdata)
 		return -ENOMEM;
 
-	match = of_match_node(msmfalcon_asoc_machine_of_match,
+	match = of_match_node(sdm660_asoc_machine_of_match,
 			      pdev->dev.of_node);
 	if (!match)
 		goto err;
@@ -2691,13 +3081,14 @@
 
 	if (pdata->snd_card_val == INT_SND_CARD) {
 		/*reading the gpio configurations from dtsi file*/
-		ret = msm_gpioset_initialize(CLIENT_WCD, &pdev->dev);
-		if (ret < 0) {
-			dev_err(&pdev->dev,
-				"%s: error reading dtsi files%d\n",
-				__func__, ret);
-			goto err;
-		}
+		pdata->pdm_gpio_p = of_parse_phandle(pdev->dev.of_node,
+					"qcom,cdc-pdm-gpios", 0);
+		pdata->comp_gpio_p = of_parse_phandle(pdev->dev.of_node,
+					"qcom,cdc-comp-gpios", 0);
+		pdata->dmic_gpio_p = of_parse_phandle(pdev->dev.of_node,
+					"qcom,cdc-dmic-gpios", 0);
+		pdata->ext_spk_gpio_p = of_parse_phandle(pdev->dev.of_node,
+					"qcom,cdc-ext-spk-gpios", 0);
 	}
 
 	/*
@@ -2730,24 +3121,37 @@
 	if (ret)
 		goto err;
 
-	ret = msm_populate_dai_link_component_of_node(card);
+	ret = msm_populate_dai_link_component_of_node(pdata, card);
 	if (ret) {
 		ret = -EPROBE_DEFER;
 		goto err;
 	}
-	ret = msm_init_wsa_dev(pdev, card);
-	if (ret)
-		goto err;
 
+	if (!of_property_read_bool(pdev->dev.of_node, "qcom,wsa-disable")) {
+		ret = msm_init_wsa_dev(pdev, card);
+		if (ret)
+			goto err;
+	}
 
 	ret = devm_snd_soc_register_card(&pdev->dev, card);
-	if (ret) {
+	if (ret == -EPROBE_DEFER) {
+		if (codec_reg_done) {
+			/*
+			 * return failure as EINVAL since other codec
+			 * registered sound card successfully.
+			 * This avoids any further probe calls.
+			 */
+			ret = -EINVAL;
+		}
+		goto err;
+	} else if (ret) {
 		dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
 			ret);
 		goto err;
 	}
 	if (pdata->snd_card_val != INT_SND_CARD)
-		msm_ext_register_audio_notifier();
+		msm_ext_register_audio_notifier(pdev);
+
 	return 0;
 err:
 	if (pdata->us_euro_gpio > 0) {
@@ -2767,6 +3171,8 @@
 		gpio_free(pdata->hph_en0_gpio);
 		pdata->hph_en0_gpio = 0;
 	}
+	if (pdata->snd_card_val != INT_SND_CARD)
+		msm_ext_cdc_deinit(pdata);
 	devm_kfree(&pdev->dev, pdata);
 	return ret;
 }
@@ -2778,29 +3184,30 @@
 
 	if (pdata->snd_card_val == INT_SND_CARD)
 		mutex_destroy(&pdata->cdc_int_mclk0_mutex);
+	else
+		msm_ext_cdc_deinit(pdata);
 	msm_free_auxdev_mem(pdev);
 
 	gpio_free(pdata->us_euro_gpio);
 	gpio_free(pdata->hph_en1_gpio);
 	gpio_free(pdata->hph_en0_gpio);
-	i2s_auxpcm_deinit();
 	snd_soc_unregister_card(card);
 	return 0;
 }
 
-static struct platform_driver msmfalcon_asoc_machine_driver = {
+static struct platform_driver sdm660_asoc_machine_driver = {
 	.driver = {
 		.name = DRV_NAME,
 		.owner = THIS_MODULE,
 		.pm = &snd_soc_pm_ops,
-		.of_match_table = msmfalcon_asoc_machine_of_match,
+		.of_match_table = sdm660_asoc_machine_of_match,
 	},
 	.probe = msm_asoc_machine_probe,
 	.remove = msm_asoc_machine_remove,
 };
-module_platform_driver(msmfalcon_asoc_machine_driver);
+module_platform_driver(sdm660_asoc_machine_driver);
 
 MODULE_DESCRIPTION("ALSA SoC msm");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("platform:" DRV_NAME);
-MODULE_DEVICE_TABLE(of, msmfalcon_asoc_machine_of_match);
+MODULE_DEVICE_TABLE(of, sdm660_asoc_machine_of_match);
diff --git a/sound/soc/msm/msmfalcon-common.h b/sound/soc/msm/sdm660-common.h
similarity index 78%
rename from sound/soc/msm/msmfalcon-common.h
rename to sound/soc/msm/sdm660-common.h
index 5f6b859..bca8cd7 100644
--- a/sound/soc/msm/msmfalcon-common.h
+++ b/sound/soc/msm/sdm660-common.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -59,8 +59,15 @@
 	u32 channel;
 };
 
+enum {
+	DIG_CDC,
+	ANA_CDC,
+	CODECS_MAX,
+};
+
 extern const struct snd_kcontrol_new msm_common_snd_controls[];
-struct msmfalcon_codec {
+extern bool codec_reg_done;
+struct sdm660_codec {
 	void* (*get_afe_config_fn)(struct snd_soc_codec *codec,
 				   enum afe_config_type config_type);
 };
@@ -71,6 +78,14 @@
 	EXT_SND_CARD_TAVIL,
 };
 
+struct msm_snd_interrupt {
+	void __iomem *mpm_wakeup;
+	void __iomem *intr1_cfg_apps;
+	void __iomem *lpi_gpio_intr_cfg;
+	void __iomem *lpi_gpio_cfg;
+	void __iomem *lpi_gpio_inout;
+};
+
 struct msm_asoc_mach_data {
 	int us_euro_gpio; /* used by gpio driver API */
 	int hph_en1_gpio;
@@ -78,11 +93,16 @@
 	struct device_node *us_euro_gpio_p; /* used by pinctrl API */
 	struct device_node *hph_en1_gpio_p; /* used by pinctrl API */
 	struct device_node *hph_en0_gpio_p; /* used by pinctrl API */
+	struct device_node *pdm_gpio_p; /* used by pinctrl API */
+	struct device_node *comp_gpio_p; /* used by pinctrl API */
+	struct device_node *dmic_gpio_p; /* used by pinctrl API */
+	struct device_node *ext_spk_gpio_p; /* used by pinctrl API */
 	struct snd_soc_codec *codec;
-	struct msmfalcon_codec msmfalcon_codec_fn;
+	struct sdm660_codec sdm660_codec_fn;
 	struct snd_info_entry *codec_root;
 	int spk_ext_pa_gpio;
 	int mclk_freq;
+	bool native_clk_set;
 	int lb_mode;
 	int snd_card_val;
 	u8 micbias1_cap_mode;
@@ -92,6 +112,7 @@
 	struct mutex cdc_int_mclk0_mutex;
 	struct delayed_work disable_int_mclk0_work;
 	struct afe_clk_set digital_cdc_core_clk;
+	struct msm_snd_interrupt msm_snd_intr_lpi;
 };
 
 int msm_common_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
@@ -100,4 +121,5 @@
 void msm_aux_pcm_snd_shutdown(struct snd_pcm_substream *substream);
 int msm_mi2s_snd_startup(struct snd_pcm_substream *substream);
 void msm_mi2s_snd_shutdown(struct snd_pcm_substream *substream);
+int msm_common_snd_controls_size(void);
 #endif
diff --git a/sound/soc/msm/msmfalcon-ext-dai-links.c b/sound/soc/msm/sdm660-ext-dai-links.c
similarity index 95%
rename from sound/soc/msm/msmfalcon-ext-dai-links.c
rename to sound/soc/msm/sdm660-ext-dai-links.c
index 6f066c5..f64074d 100644
--- a/sound/soc/msm/msmfalcon-ext-dai-links.c
+++ b/sound/soc/msm/sdm660-ext-dai-links.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,11 +19,11 @@
 #include <sound/pcm_params.h>
 #include "qdsp6v2/msm-pcm-routing-v2.h"
 #include "../codecs/wcd9335.h"
-#include "msmfalcon-common.h"
-#include "msmfalcon-external.h"
+#include "sdm660-common.h"
+#include "sdm660-external.h"
 
 #define DEV_NAME_STR_LEN            32
-#define __CHIPSET__ "MSMFALCON "
+#define __CHIPSET__ "SDM660 "
 #define MSM_DAILINK_NAME(name) (__CHIPSET__#name)
 
 #define WCN_CDC_SLIM_RX_CH_MAX 2
@@ -861,6 +861,7 @@
 		.stream_name = "Compress1",
 		.cpu_dai_name	= "MultiMedia4",
 		.platform_name  = "msm-compress-dsp",
+		.async_ops = ASYNC_DPCM_SND_SOC_HW_PARAMS,
 		.dynamic = 1,
 		.dpcm_capture = 1,
 		.dpcm_playback = 1,
@@ -1301,6 +1302,39 @@
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.codec_name = "snd-soc-dummy",
 	},
+	{/* hw:x,35 */
+		.name = "SLIMBUS7 Hostless",
+		.stream_name = "SLIMBUS7 Hostless",
+		.cpu_dai_name = "SLIMBUS7_HOSTLESS",
+		.platform_name  = "msm-pcm-hostless",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+	},
+	{/* hw:x,36 */
+		.name = "SDM660 HFP TX",
+		.stream_name = "MultiMedia6",
+		.cpu_dai_name = "MultiMedia6",
+		.platform_name  = "msm-pcm-loopback",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.ignore_suspend = 1,
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA6,
+	},
 };
 
 static struct snd_soc_dai_link msm_ext_common_be_dai[] = {
@@ -1827,6 +1861,24 @@
 	},
 };
 
+static struct snd_soc_dai_link ext_disp_be_dai_link[] = {
+	/* DISP PORT BACK END DAI Link */
+	{
+		.name = LPASS_BE_DISPLAY_PORT,
+		.stream_name = "Display Port Playback",
+		.cpu_dai_name = "msm-dai-q6-dp.24608",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-ext-disp-audio-codec-rx",
+		.codec_dai_name = "msm_dp_audio_codec_rx_dai",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+		.be_hw_params_fixup = msm_common_be_hw_params_fixup,
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+	},
+};
+
 static struct snd_soc_dai_link msm_ext_tasha_dai_links[
 ARRAY_SIZE(msm_ext_common_fe_dai) +
 ARRAY_SIZE(msm_ext_tasha_fe_dai) +
@@ -1834,7 +1886,8 @@
 ARRAY_SIZE(msm_ext_tasha_be_dai) +
 ARRAY_SIZE(msm_mi2s_be_dai_links) +
 ARRAY_SIZE(msm_auxpcm_be_dai_links) +
-ARRAY_SIZE(msm_wcn_be_dai_links)];
+ARRAY_SIZE(msm_wcn_be_dai_links) +
+ARRAY_SIZE(ext_disp_be_dai_link)];
 
 static struct snd_soc_dai_link msm_ext_tavil_dai_links[
 ARRAY_SIZE(msm_ext_common_fe_dai) +
@@ -1843,7 +1896,8 @@
 ARRAY_SIZE(msm_ext_tavil_be_dai) +
 ARRAY_SIZE(msm_mi2s_be_dai_links) +
 ARRAY_SIZE(msm_auxpcm_be_dai_links) +
-ARRAY_SIZE(msm_wcn_be_dai_links)];
+ARRAY_SIZE(msm_wcn_be_dai_links) +
+ARRAY_SIZE(ext_disp_be_dai_link)];
 
 /**
  * populate_snd_card_dailinks - prepares dailink array and initializes card.
@@ -1881,7 +1935,7 @@
 	if (strnstr(card->name, "tasha", strlen(card->name))) {
 		codec_ver = tasha_codec_ver();
 		if (codec_ver == WCD9326)
-			card->name = "msmfalcon-tashalite-snd-card";
+			card->name = "sdm660-tashalite-snd-card";
 
 		len1 = ARRAY_SIZE(msm_ext_common_fe_dai);
 		len2 = len1 + ARRAY_SIZE(msm_ext_tasha_fe_dai);
@@ -1917,6 +1971,15 @@
 				   sizeof(msm_wcn_be_dai_links));
 			len4 += ARRAY_SIZE(msm_wcn_be_dai_links);
 		}
+		if (of_property_read_bool(dev->of_node,
+					  "qcom,ext-disp-audio-rx")) {
+			dev_dbg(dev, "%s(): ext disp audio support present\n",
+					__func__);
+			memcpy(msm_ext_tasha_dai_links + len4,
+				ext_disp_be_dai_link,
+				sizeof(ext_disp_be_dai_link));
+			len4 += ARRAY_SIZE(ext_disp_be_dai_link);
+		}
 		msm_ext_dai_links = msm_ext_tasha_dai_links;
 	} else if (strnstr(card->name, "tavil", strlen(card->name))) {
 		len1 = ARRAY_SIZE(msm_ext_common_fe_dai);
@@ -1953,6 +2016,15 @@
 				   sizeof(msm_wcn_be_dai_links));
 			len4 += ARRAY_SIZE(msm_wcn_be_dai_links);
 		}
+		if (of_property_read_bool(dev->of_node,
+					  "qcom,ext-disp-audio-rx")) {
+			dev_dbg(dev, "%s(): ext disp audio support present\n",
+					__func__);
+			memcpy(msm_ext_tavil_dai_links + len4,
+				ext_disp_be_dai_link,
+				sizeof(ext_disp_be_dai_link));
+			len4 += ARRAY_SIZE(ext_disp_be_dai_link);
+		}
 		msm_ext_dai_links = msm_ext_tavil_dai_links;
 	} else {
 		dev_err(dev, "%s: failing as no matching card name\n",
diff --git a/sound/soc/msm/msmfalcon-external.c b/sound/soc/msm/sdm660-external.c
similarity index 91%
rename from sound/soc/msm/msmfalcon-external.c
rename to sound/soc/msm/sdm660-external.c
index d7b002e..b603b8a 100644
--- a/sound/soc/msm/msmfalcon-external.c
+++ b/sound/soc/msm/sdm660-external.c
@@ -21,25 +21,42 @@
 #include <linux/qdsp6v2/audio_notifier.h>
 #include "qdsp6v2/msm-pcm-routing-v2.h"
 #include "msm-audio-pinctrl.h"
-#include "msmfalcon-common.h"
-#include "msmfalcon-external.h"
+#include "sdm660-common.h"
+#include "sdm660-external.h"
 #include "../codecs/wcd9335.h"
 #include "../codecs/wcd934x/wcd934x.h"
 #include "../codecs/wcd934x/wcd934x-mbhc.h"
 
-#define MSMFALCON_SPK_ON     1
-#define MSMFALCON_SPK_OFF    0
+#define SDM660_SPK_ON     1
+#define SDM660_SPK_OFF    0
 
 #define WCD9XXX_MBHC_DEF_BUTTONS    8
 #define WCD9XXX_MBHC_DEF_RLOADS     5
 #define CODEC_EXT_CLK_RATE          9600000
 #define ADSP_STATE_READY_TIMEOUT_MS 3000
 
+#define TLMM_CENTER_MPM_WAKEUP_INT_EN_0 0x03596000
+#define LPI_GPIO_22_WAKEUP_VAL 0x00000002
+
+#define TLMM_LPI_DIR_CONN_INTR1_CFG_APPS 0x0359D004
+#define LPI_GPIO_22_INTR1_CFG_VAL 0x01
+#define LPI_GPIO_22_INTR1_CFG_MASK 0x03
+
+#define TLMM_LPI_GPIO_INTR_CFG1  0x0359B004
+#define LPI_GPIO_INTR_CFG1_VAL 0x00000113
+
+#define TLMM_LPI_GPIO22_CFG  0x15078040
+#define LPI_GPIO22_CFG_VAL 0x0000009
+
+#define TLMM_LPI_GPIO22_INOUT  0x179D1318
+#define LPI_GPIO22_INOUT_VAL 0x0020000
+
 #define WSA8810_NAME_1 "wsa881x.20170211"
 #define WSA8810_NAME_2 "wsa881x.20170212"
 
 static int msm_ext_spk_control = 1;
 static struct wcd_mbhc_config *wcd_mbhc_cfg_ptr;
+bool codec_reg_done;
 
 struct msm_asoc_wcd93xx_codec {
 	void* (*get_afe_config_fn)(struct snd_soc_codec *codec,
@@ -637,7 +654,7 @@
 			snd_soc_codec_get_dapm(codec);
 
 	pr_debug("%s: msm_ext_spk_control = %d", __func__, msm_ext_spk_control);
-	if (msm_ext_spk_control == MSMFALCON_SPK_ON) {
+	if (msm_ext_spk_control == SDM660_SPK_ON) {
 		snd_soc_dapm_enable_pin(dapm, "Lineout_1 amp");
 		snd_soc_dapm_enable_pin(dapm, "Lineout_3 amp");
 	} else {
@@ -659,7 +676,7 @@
 static int msm_ext_set_spk(struct snd_kcontrol *kcontrol,
 			   struct snd_ctl_elem_value *ucontrol)
 {
-	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 
 	pr_debug("%s()\n", __func__);
 	if (msm_ext_spk_control == ucontrol->value.integer.value[0])
@@ -1193,12 +1210,37 @@
 	afe_clear_config(AFE_SLIMBUS_SLAVE_CONFIG);
 }
 
+static void msm_snd_interrupt_config(struct msm_asoc_mach_data *pdata)
+{
+	int val;
+
+	val = ioread32(pdata->msm_snd_intr_lpi.mpm_wakeup);
+	val |= LPI_GPIO_22_WAKEUP_VAL;
+	iowrite32(val, pdata->msm_snd_intr_lpi.mpm_wakeup);
+
+	val = ioread32(pdata->msm_snd_intr_lpi.intr1_cfg_apps);
+	val &= ~(LPI_GPIO_22_INTR1_CFG_MASK);
+	val |= LPI_GPIO_22_INTR1_CFG_VAL;
+	iowrite32(val, pdata->msm_snd_intr_lpi.intr1_cfg_apps);
+
+	iowrite32(LPI_GPIO_INTR_CFG1_VAL,
+			pdata->msm_snd_intr_lpi.lpi_gpio_intr_cfg);
+	iowrite32(LPI_GPIO22_CFG_VAL,
+			pdata->msm_snd_intr_lpi.lpi_gpio_cfg);
+	val = ioread32(pdata->msm_snd_intr_lpi.lpi_gpio_inout);
+	val |= LPI_GPIO22_INOUT_VAL;
+	iowrite32(val, pdata->msm_snd_intr_lpi.lpi_gpio_inout);
+}
+
 static int msm_adsp_power_up_config(struct snd_soc_codec *codec)
 {
 	int ret = 0;
 	unsigned long timeout;
 	int adsp_ready = 0;
+	struct snd_soc_card *card = codec->component.card;
+	struct msm_asoc_mach_data *pdata;
 
+	pdata = snd_soc_card_get_drvdata(card);
 	timeout = jiffies +
 		msecs_to_jiffies(ADSP_STATE_READY_TIMEOUT_MS);
 
@@ -1221,6 +1263,7 @@
 		ret = -ETIMEDOUT;
 		goto err_fail;
 	}
+	msm_snd_interrupt_config(pdata);
 
 	ret = msm_afe_set_config(codec);
 	if (ret)
@@ -1233,7 +1276,7 @@
 	return ret;
 }
 
-static int msmfalcon_notifier_service_cb(struct notifier_block *this,
+static int sdm660_notifier_service_cb(struct notifier_block *this,
 					 unsigned long opcode, void *ptr)
 {
 	int ret;
@@ -1291,7 +1334,7 @@
 }
 
 static struct notifier_block service_nb = {
-	.notifier_call  = msmfalcon_notifier_service_cb,
+	.notifier_call  = sdm660_notifier_service_cb,
 	.priority = -INT_MAX,
 };
 
@@ -1469,6 +1512,17 @@
 					     134, 135, 136, 137, 138, 139,
 					     140, 141, 142, 143};
 
+	/* Tavil Codec SLIMBUS configuration
+	 * RX1, RX2, RX3, RX4, RX5, RX6, RX7, RX8
+	 * TX1, TX2, TX3, TX4, TX5, TX6, TX7, TX8, TX9, TX10, TX11, TX12, TX13
+	 * TX14, TX15, TX16
+	 */
+	unsigned int rx_ch_tavil[WCD934X_RX_MAX] = {144, 145, 146, 147, 148,
+					    149, 150, 151};
+	unsigned int tx_ch_tavil[WCD934X_TX_MAX] = {128, 129, 130, 131, 132,
+					    133, 134, 135, 136, 137, 138,
+					    139, 140, 141, 142, 143};
+
 	pr_debug("%s: dev_name%s\n", __func__, dev_name(cpu_dai->dev));
 
 	rtd->pmdown_time = 0;
@@ -1481,6 +1535,14 @@
 		return ret;
 	}
 
+	ret = snd_soc_add_codec_controls(codec, msm_common_snd_controls,
+					 msm_common_snd_controls_size());
+	if (ret < 0) {
+		pr_err("%s: add_common_snd_controls failed: %d\n",
+			__func__, ret);
+		return ret;
+	}
+
 	snd_soc_dapm_new_controls(dapm, msm_dapm_widgets,
 			ARRAY_SIZE(msm_dapm_widgets));
 
@@ -1521,14 +1583,11 @@
 	snd_soc_dapm_ignore_suspend(dapm, "EAR");
 	snd_soc_dapm_ignore_suspend(dapm, "LINEOUT1");
 	snd_soc_dapm_ignore_suspend(dapm, "LINEOUT2");
-	snd_soc_dapm_ignore_suspend(dapm, "LINEOUT3");
-	snd_soc_dapm_ignore_suspend(dapm, "LINEOUT4");
 	snd_soc_dapm_ignore_suspend(dapm, "AMIC1");
 	snd_soc_dapm_ignore_suspend(dapm, "AMIC2");
 	snd_soc_dapm_ignore_suspend(dapm, "AMIC3");
 	snd_soc_dapm_ignore_suspend(dapm, "AMIC4");
 	snd_soc_dapm_ignore_suspend(dapm, "AMIC5");
-	snd_soc_dapm_ignore_suspend(dapm, "AMIC6");
 	snd_soc_dapm_ignore_suspend(dapm, "DMIC0");
 	snd_soc_dapm_ignore_suspend(dapm, "DMIC1");
 	snd_soc_dapm_ignore_suspend(dapm, "DMIC2");
@@ -1536,21 +1595,33 @@
 	snd_soc_dapm_ignore_suspend(dapm, "DMIC4");
 	snd_soc_dapm_ignore_suspend(dapm, "DMIC5");
 	snd_soc_dapm_ignore_suspend(dapm, "ANC EAR");
-	snd_soc_dapm_ignore_suspend(dapm, "ANC HEADPHONE");
 	snd_soc_dapm_ignore_suspend(dapm, "SPK1 OUT");
 	snd_soc_dapm_ignore_suspend(dapm, "SPK2 OUT");
 	snd_soc_dapm_ignore_suspend(dapm, "HPHL");
 	snd_soc_dapm_ignore_suspend(dapm, "HPHR");
-	snd_soc_dapm_ignore_suspend(dapm, "ANC HPHL");
-	snd_soc_dapm_ignore_suspend(dapm, "ANC HPHR");
-	snd_soc_dapm_ignore_suspend(dapm, "ANC LINEOUT1");
-	snd_soc_dapm_ignore_suspend(dapm, "ANC LINEOUT2");
 	snd_soc_dapm_ignore_suspend(dapm, "AIF4 VI");
 	snd_soc_dapm_ignore_suspend(dapm, "VIINPUT");
 
+	if (!strcmp(dev_name(codec_dai->dev), "tasha_codec")) {
+		snd_soc_dapm_ignore_suspend(dapm, "LINEOUT3");
+		snd_soc_dapm_ignore_suspend(dapm, "LINEOUT4");
+		snd_soc_dapm_ignore_suspend(dapm, "ANC HPHL");
+		snd_soc_dapm_ignore_suspend(dapm, "ANC HPHR");
+		snd_soc_dapm_ignore_suspend(dapm, "ANC LINEOUT1");
+		snd_soc_dapm_ignore_suspend(dapm, "ANC LINEOUT2");
+	}
+
 	snd_soc_dapm_sync(dapm);
-	snd_soc_dai_set_channel_map(codec_dai, ARRAY_SIZE(tx_ch),
-				    tx_ch, ARRAY_SIZE(rx_ch), rx_ch);
+
+	if (!strcmp(dev_name(codec_dai->dev), "tavil_codec")) {
+		snd_soc_dai_set_channel_map(codec_dai, ARRAY_SIZE(tx_ch_tavil),
+					tx_ch_tavil, ARRAY_SIZE(rx_ch_tavil),
+					rx_ch_tavil);
+	} else {
+		snd_soc_dai_set_channel_map(codec_dai, ARRAY_SIZE(tx_ch),
+					tx_ch, ARRAY_SIZE(rx_ch),
+					rx_ch);
+	}
 
 	if (!strcmp(dev_name(codec_dai->dev), "tavil_codec")) {
 		msm_codec_fn.get_afe_config_fn = tavil_get_afe_config;
@@ -1674,6 +1745,7 @@
 		}
 
 	}
+	codec_reg_done = true;
 done:
 	return 0;
 
@@ -1687,11 +1759,13 @@
 /**
  * msm_ext_register_audio_notifier - register SSR notifier.
  */
-void msm_ext_register_audio_notifier(void)
+void msm_ext_register_audio_notifier(struct platform_device *pdev)
 {
 	int ret;
 
-	ret = audio_notifier_register("msmfalcon", AUDIO_NOTIFIER_ADSP_DOMAIN,
+	is_initial_boot = true;
+	spdev = pdev;
+	ret = audio_notifier_register("sdm660", AUDIO_NOTIFIER_ADSP_DOMAIN,
 				      &service_nb);
 	if (ret < 0)
 		pr_err("%s: Audio notifier register failed ret = %d\n",
@@ -1729,10 +1803,8 @@
 		ret = -EPROBE_DEFER;
 		goto err;
 	}
-	spdev = pdev;
 	platform_set_drvdata(pdev, *card);
 	snd_soc_card_set_drvdata(*card, pdata);
-	is_initial_boot = true;
 	pdata->hph_en1_gpio = of_get_named_gpio(pdev->dev.of_node,
 						"qcom,hph-en1-gpio", 0);
 	if (!gpio_is_valid(pdata->hph_en1_gpio))
@@ -1759,7 +1831,36 @@
 			ret);
 		ret = 0;
 	}
+	pdata->msm_snd_intr_lpi.mpm_wakeup =
+			ioremap(TLMM_CENTER_MPM_WAKEUP_INT_EN_0, 4);
+	pdata->msm_snd_intr_lpi.intr1_cfg_apps =
+			ioremap(TLMM_LPI_DIR_CONN_INTR1_CFG_APPS, 4);
+	pdata->msm_snd_intr_lpi.lpi_gpio_intr_cfg =
+			ioremap(TLMM_LPI_GPIO_INTR_CFG1, 4);
+	pdata->msm_snd_intr_lpi.lpi_gpio_cfg =
+			ioremap(TLMM_LPI_GPIO22_CFG, 4);
+	pdata->msm_snd_intr_lpi.lpi_gpio_inout =
+			ioremap(TLMM_LPI_GPIO22_INOUT, 4);
 err:
 	return ret;
 }
 EXPORT_SYMBOL(msm_ext_cdc_init);
+
+/**
+ * msm_ext_cdc_deinit - external codec machine specific deinit.
+ */
+void msm_ext_cdc_deinit(struct msm_asoc_mach_data *pdata)
+{
+	if (pdata->msm_snd_intr_lpi.mpm_wakeup)
+		iounmap(pdata->msm_snd_intr_lpi.mpm_wakeup);
+	if (pdata->msm_snd_intr_lpi.intr1_cfg_apps)
+		iounmap(pdata->msm_snd_intr_lpi.intr1_cfg_apps);
+	if (pdata->msm_snd_intr_lpi.lpi_gpio_intr_cfg)
+		iounmap(pdata->msm_snd_intr_lpi.lpi_gpio_intr_cfg);
+	if (pdata->msm_snd_intr_lpi.lpi_gpio_cfg)
+		iounmap(pdata->msm_snd_intr_lpi.lpi_gpio_cfg);
+	if (pdata->msm_snd_intr_lpi.lpi_gpio_inout)
+		iounmap(pdata->msm_snd_intr_lpi.lpi_gpio_inout);
+
+}
+EXPORT_SYMBOL(msm_ext_cdc_deinit);
diff --git a/sound/soc/msm/msmfalcon-external.h b/sound/soc/msm/sdm660-external.h
similarity index 83%
rename from sound/soc/msm/msmfalcon-external.h
rename to sound/soc/msm/sdm660-external.h
index 654cb70..acf5735 100644
--- a/sound/soc/msm/msmfalcon-external.h
+++ b/sound/soc/msm/sdm660-external.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,8 +10,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef __MSMFALCON_EXTERNAL
-#define __MSMFALCON_EXTERNAL
+#ifndef __SDM660_EXTERNAL
+#define __SDM660_EXTERNAL
 
 int msm_snd_hw_params(struct snd_pcm_substream *substream,
 		      struct snd_pcm_hw_params *params);
@@ -33,7 +33,8 @@
 #ifdef CONFIG_SND_SOC_EXT_CODEC
 int msm_ext_cdc_init(struct platform_device *, struct msm_asoc_mach_data *,
 		     struct snd_soc_card **, struct wcd_mbhc_config *);
-void msm_ext_register_audio_notifier(void);
+void msm_ext_register_audio_notifier(struct platform_device *pdev);
+void msm_ext_cdc_deinit(struct msm_asoc_mach_data *pdata);
 #else
 inline int msm_ext_cdc_init(struct platform_device *pdev,
 			    struct msm_asoc_mach_data *pdata,
@@ -43,7 +44,10 @@
 	return 0;
 }
 
-inline void msm_ext_register_audio_notifier(void)
+inline void msm_ext_register_audio_notifier(struct platform_device *pdev)
+{
+}
+inline void msm_ext_cdc_deinit(void)
 {
 }
 #endif
diff --git a/sound/soc/msm/msmfalcon-internal.c b/sound/soc/msm/sdm660-internal.c
similarity index 90%
rename from sound/soc/msm/msmfalcon-internal.c
rename to sound/soc/msm/sdm660-internal.c
index 50efd69..b924cad 100644
--- a/sound/soc/msm/msmfalcon-internal.c
+++ b/sound/soc/msm/sdm660-internal.c
@@ -13,13 +13,15 @@
 #include <linux/of_gpio.h>
 #include <linux/platform_device.h>
 #include <linux/module.h>
+#include <linux/mfd/msm-cdc-pinctrl.h>
 #include <sound/pcm_params.h>
 #include "qdsp6v2/msm-pcm-routing-v2.h"
-#include "msm-audio-pinctrl.h"
-#include "msmfalcon-common.h"
-#include "../codecs/msm8x16/msm8x16-wcd.h"
+#include "sdm660-common.h"
+#include "../codecs/sdm660_cdc/msm-digital-cdc.h"
+#include "../codecs/sdm660_cdc/msm-analog-cdc.h"
+#include "../codecs/msm_sdw/msm_sdw.h"
 
-#define __CHIPSET__ "MSMFALCON "
+#define __CHIPSET__ "SDM660 "
 #define MSM_DAILINK_NAME(name) (__CHIPSET__#name)
 
 #define DEFAULT_MCLK_RATE 9600000
@@ -30,6 +32,9 @@
 #define WCN_CDC_SLIM_RX_CH_MAX 2
 #define WCN_CDC_SLIM_TX_CH_MAX 3
 
+#define WSA8810_NAME_1 "wsa881x.20170211"
+#define WSA8810_NAME_2 "wsa881x.20170212"
+
 enum {
 	INT0_MI2S = 0,
 	INT1_MI2S,
@@ -131,7 +136,7 @@
 	[INT2_MI2S]  = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
 	[INT3_MI2S] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
 	[INT4_MI2S] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
-	[INT5_MI2S] = {SAMPLING_RATE_8KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
+	[INT5_MI2S] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
 	[INT6_MI2S] = {SAMPLING_RATE_8KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
 };
 
@@ -176,6 +181,7 @@
 static void msm_int_mi2s_snd_shutdown(struct snd_pcm_substream *substream);
 
 static struct wcd_mbhc_config *mbhc_cfg_ptr;
+static struct snd_info_entry *codec_root;
 
 static int int_mi2s_get_bit_format_val(int bit_format)
 {
@@ -443,22 +449,25 @@
 	SND_SOC_DAPM_MIC("Digital Mic4", msm_dmic_event),
 };
 
-static int msm_config_hph_compander_gpio(bool enable)
+static int msm_config_hph_compander_gpio(bool enable,
+					 struct snd_soc_codec *codec)
 {
+	struct snd_soc_card *card = codec->component.card;
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
 	int ret = 0;
 
 	pr_debug("%s: %s HPH Compander\n", __func__,
 		enable ? "Enable" : "Disable");
 
 	if (enable) {
-		ret = msm_gpioset_activate(CLIENT_WCD, "comp_gpio");
+		ret = msm_cdc_pinctrl_select_active_state(pdata->comp_gpio_p);
 		if (ret) {
 			pr_err("%s: gpio set cannot be activated %s\n",
 				__func__, "comp_gpio");
 			goto done;
 		}
 	} else {
-		ret = msm_gpioset_suspend(CLIENT_WCD, "comp_gpio");
+		ret = msm_cdc_pinctrl_select_sleep_state(pdata->comp_gpio_p);
 		if (ret) {
 			pr_err("%s: gpio set cannot be de-activated %s\n",
 				__func__, "comp_gpio");
@@ -509,7 +518,8 @@
 		enable ? "Enable" : "Disable");
 
 	if (enable) {
-		ret = msm_gpioset_activate(CLIENT_WCD, "ext_spk_gpio");
+		ret = msm_cdc_pinctrl_select_active_state(
+						pdata->ext_spk_gpio_p);
 		if (ret) {
 			pr_err("%s: gpio set cannot be de-activated %s\n",
 					__func__, "ext_spk_gpio");
@@ -518,7 +528,8 @@
 		gpio_set_value_cansleep(pdata->spk_ext_pa_gpio, enable);
 	} else {
 		gpio_set_value_cansleep(pdata->spk_ext_pa_gpio, enable);
-		ret = msm_gpioset_suspend(CLIENT_WCD, "ext_spk_gpio");
+		ret = msm_cdc_pinctrl_select_sleep_state(
+						pdata->ext_spk_gpio_p);
 		if (ret) {
 			pr_err("%s: gpio set cannot be de-activated %s\n",
 					__func__, "ext_spk_gpio");
@@ -672,10 +683,13 @@
 		   atomic_read(&pdata->int_mclk0_rsc_ref));
 	if (enable) {
 		if (int_mi2s_cfg[INT0_MI2S].sample_rate ==
-				SAMPLING_RATE_44P1KHZ)
+				SAMPLING_RATE_44P1KHZ) {
 			clk_freq_in_hz = NATIVE_MCLK_RATE;
-		else
+			pdata->native_clk_set = true;
+		} else {
 			clk_freq_in_hz = pdata->mclk_freq;
+			pdata->native_clk_set = false;
+		}
 
 		if (pdata->digital_cdc_core_clk.clk_freq_in_hz
 				!= clk_freq_in_hz)
@@ -687,6 +701,12 @@
 			mutex_lock(&pdata->cdc_int_mclk0_mutex);
 			if (atomic_read(&pdata->int_mclk0_enabled) == false ||
 				int_mclk0_freq_chg) {
+				if (atomic_read(&pdata->int_mclk0_enabled)) {
+					pdata->digital_cdc_core_clk.enable = 0;
+					afe_set_lpass_clock_v2(
+						AFE_PORT_ID_INT0_MI2S_RX,
+						&pdata->digital_cdc_core_clk);
+				}
 				pdata->digital_cdc_core_clk.clk_freq_in_hz =
 							clk_freq_in_hz;
 				pdata->digital_cdc_core_clk.enable = 1;
@@ -744,7 +764,7 @@
 			ucontrol->value.integer.value[0]);
 	switch (ucontrol->value.integer.value[0]) {
 	case 1:
-		ret = msm_gpioset_activate(CLIENT_WCD, "int_pdm");
+		ret = msm_cdc_pinctrl_select_active_state(pdata->pdm_gpio_p);
 		if (ret) {
 			pr_err("%s: failed to enable the pri gpios: %d\n",
 					__func__, ret);
@@ -761,8 +781,8 @@
 				pr_err("%s: failed to enable the MCLK: %d\n",
 						__func__, ret);
 				mutex_unlock(&pdata->cdc_int_mclk0_mutex);
-				ret = msm_gpioset_suspend(CLIENT_WCD,
-								"int_pdm");
+				ret = msm_cdc_pinctrl_select_sleep_state(
+							pdata->pdm_gpio_p);
 				if (ret)
 					pr_err("%s: failed to disable the pri gpios: %d\n",
 							__func__, ret);
@@ -772,12 +792,12 @@
 		}
 		mutex_unlock(&pdata->cdc_int_mclk0_mutex);
 		atomic_inc(&pdata->int_mclk0_rsc_ref);
-		msm8x16_wcd_mclk_enable(codec, 1, true);
+		msm_anlg_cdc_mclk_enable(codec, 1, true);
 		break;
 	case 0:
 		if (atomic_read(&pdata->int_mclk0_rsc_ref) <= 0)
 			break;
-		msm8x16_wcd_mclk_enable(codec, 0, true);
+		msm_anlg_cdc_mclk_enable(codec, 0, true);
 		mutex_lock(&pdata->cdc_int_mclk0_mutex);
 		if ((!atomic_dec_return(&pdata->int_mclk0_rsc_ref)) &&
 				(atomic_read(&pdata->int_mclk0_enabled))) {
@@ -794,7 +814,7 @@
 			atomic_set(&pdata->int_mclk0_enabled, false);
 		}
 		mutex_unlock(&pdata->cdc_int_mclk0_mutex);
-		ret = msm_gpioset_suspend(CLIENT_WCD, "int_pdm");
+		ret = msm_cdc_pinctrl_select_sleep_state(pdata->pdm_gpio_p);
 		if (ret)
 			pr_err("%s: failed to disable the pri gpios: %d\n",
 					__func__, ret);
@@ -871,15 +891,6 @@
 	SOC_ENUM_EXT("INT3_MI2S_TX SampleRate", int3_mi2s_tx_sample_rate,
 			int_mi2s_sample_rate_get,
 			int_mi2s_sample_rate_put),
-	SOC_ENUM_EXT("INT0_MI2S_RX SampleRate", int0_mi2s_rx_sample_rate,
-			int_mi2s_sample_rate_get,
-			int_mi2s_sample_rate_put),
-	SOC_ENUM_EXT("INT2_MI2S_TX SampleRate", int2_mi2s_tx_sample_rate,
-			int_mi2s_sample_rate_get,
-			int_mi2s_sample_rate_put),
-	SOC_ENUM_EXT("INT3_MI2S_TX SampleRate", int3_mi2s_tx_sample_rate,
-			int_mi2s_sample_rate_get,
-			int_mi2s_sample_rate_put),
 	SOC_ENUM_EXT("INT0_MI2S_RX Channels", int0_mi2s_rx_chs,
 			int_mi2s_ch_get, int_mi2s_ch_put),
 	SOC_ENUM_EXT("INT2_MI2S_TX Channels", int2_mi2s_tx_chs,
@@ -893,15 +904,12 @@
 			msm_bt_sample_rate_put),
 };
 
-static const struct snd_kcontrol_new msm_swr_controls[] = {
+static const struct snd_kcontrol_new msm_sdw_controls[] = {
 	SOC_ENUM_EXT("INT4_MI2S_RX Format", int4_mi2s_rx_format,
 		     int_mi2s_bit_format_get, int_mi2s_bit_format_put),
 	SOC_ENUM_EXT("INT4_MI2S_RX SampleRate", int4_mi2s_rx_sample_rate,
 			int_mi2s_sample_rate_get,
 			int_mi2s_sample_rate_put),
-	SOC_ENUM_EXT("INT4_MI2S_RX SampleRate", int4_mi2s_rx_sample_rate,
-			int_mi2s_sample_rate_get,
-			int_mi2s_sample_rate_put),
 	SOC_ENUM_EXT("INT4_MI2S_RX Channels", int4_mi2s_rx_chs,
 			int_mi2s_ch_get, int_mi2s_ch_put),
 	SOC_ENUM_EXT("VI_FEED_TX Channels", int5_mi2s_tx_chs,
@@ -919,7 +927,7 @@
 	pr_debug("%s: event = %d\n", __func__, event);
 	switch (event) {
 	case SND_SOC_DAPM_PRE_PMU:
-		ret = msm_gpioset_activate(CLIENT_WCD, "dmic_gpio");
+		ret = msm_cdc_pinctrl_select_active_state(pdata->dmic_gpio_p);
 		if (ret < 0) {
 			pr_err("%s: gpio set cannot be activated %sd",
 					__func__, "dmic_gpio");
@@ -927,7 +935,7 @@
 		}
 		break;
 	case SND_SOC_DAPM_POST_PMD:
-		ret = msm_gpioset_suspend(CLIENT_WCD, "dmic_gpio");
+		ret = msm_cdc_pinctrl_select_sleep_state(pdata->dmic_gpio_p);
 		if (ret < 0) {
 			pr_err("%s: gpio set cannot be de-activated %sd",
 					__func__, "dmic_gpio");
@@ -954,7 +962,7 @@
 	case SND_SOC_DAPM_POST_PMD:
 		pr_debug("%s: mclk_res_ref = %d\n",
 			__func__, atomic_read(&pdata->int_mclk0_rsc_ref));
-		ret = msm_gpioset_suspend(CLIENT_WCD, "int_pdm");
+		ret = msm_cdc_pinctrl_select_sleep_state(pdata->pdm_gpio_p);
 		if (ret < 0) {
 			pr_err("%s: gpio set cannot be de-activated %sd",
 					__func__, "int_pdm");
@@ -963,7 +971,7 @@
 		if (atomic_read(&pdata->int_mclk0_rsc_ref) == 0) {
 			pr_debug("%s: disabling MCLK\n", __func__);
 			/* disable the codec mclk config*/
-			msm8x16_wcd_mclk_enable(codec, 0, true);
+			msm_anlg_cdc_mclk_enable(codec, 0, true);
 			msm_int_enable_dig_cdc_clk(codec, 0, true);
 		}
 		break;
@@ -1055,8 +1063,7 @@
 	bit_per_sample =
 	    get_int_mi2s_bits_per_sample(int_mi2s_cfg[idx].bit_format);
 	int_mi2s_clk[idx].clk_freq_in_hz =
-	    (int_mi2s_cfg[idx].sample_rate * int_mi2s_cfg[idx].channels
-					* bit_per_sample);
+	    (int_mi2s_cfg[idx].sample_rate * 2 * bit_per_sample);
 }
 
 static int int_mi2s_set_sclk(struct snd_pcm_substream *substream, bool enable)
@@ -1098,7 +1105,7 @@
 	return ret;
 }
 
-static int msm_swr_mi2s_snd_startup(struct snd_pcm_substream *substream)
+static int msm_sdw_mi2s_snd_startup(struct snd_pcm_substream *substream)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
@@ -1113,13 +1120,6 @@
 				__func__, ret);
 		return ret;
 	}
-	/* Enable the codec mclk config */
-	ret = msm_gpioset_activate(CLIENT_WCD, "swr_pin");
-	if (ret < 0) {
-		pr_err("%s: gpio set cannot be activated %sd",
-				__func__, "swr_pin");
-		return ret;
-	}
 	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_CBS_CFS);
 	if (ret < 0)
 		pr_err("%s: set fmt cpu dai failed; ret=%d\n", __func__, ret);
@@ -1127,7 +1127,7 @@
 	return ret;
 }
 
-static void msm_swr_mi2s_snd_shutdown(struct snd_pcm_substream *substream)
+static void msm_sdw_mi2s_snd_shutdown(struct snd_pcm_substream *substream)
 {
 	int ret;
 
@@ -1144,9 +1144,11 @@
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_codec *codec = rtd->codec_dais[ANA_CDC]->codec;
 	int ret = 0;
+	struct msm_asoc_mach_data *pdata = NULL;
 
+	pdata = snd_soc_card_get_drvdata(codec->component.card);
 	pr_debug("%s(): substream = %s  stream = %d\n", __func__,
 		 substream->name, substream->stream);
 
@@ -1162,13 +1164,13 @@
 		return ret;
 	}
 	/* Enable the codec mclk config */
-	ret = msm_gpioset_activate(CLIENT_WCD, "int_pdm");
+	ret = msm_cdc_pinctrl_select_active_state(pdata->pdm_gpio_p);
 	if (ret < 0) {
 		pr_err("%s: gpio set cannot be activated %s\n",
 				__func__, "int_pdm");
 		return ret;
 	}
-	msm8x16_wcd_mclk_enable(codec, 1, true);
+	msm_anlg_cdc_mclk_enable(codec, 1, true);
 	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_CBS_CFS);
 	if (ret < 0)
 		pr_err("%s: set fmt cpu dai failed; ret=%d\n", __func__, ret);
@@ -1249,22 +1251,33 @@
 
 static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
 {
-	struct snd_soc_codec *codec = rtd->codec;
-	struct snd_soc_dapm_context *dapm =
-			snd_soc_codec_get_dapm(codec);
+	struct snd_soc_codec *dig_cdc = rtd->codec_dais[DIG_CDC]->codec;
+	struct snd_soc_codec *ana_cdc = rtd->codec_dais[ANA_CDC]->codec;
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(ana_cdc);
 	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(rtd->card);
+	struct snd_card *card;
 	int ret = -ENOMEM;
 
 	pr_debug("%s(),dev_name%s\n", __func__, dev_name(cpu_dai->dev));
 
-	snd_soc_add_codec_controls(codec, msm_snd_controls,
-			ARRAY_SIZE(msm_snd_controls));
-
-	snd_soc_add_codec_controls(codec, msm_common_snd_controls,
-			ARRAY_SIZE(msm_snd_controls));
+	ret = snd_soc_add_codec_controls(ana_cdc, msm_snd_controls,
+				   ARRAY_SIZE(msm_snd_controls));
+	if (ret < 0) {
+		pr_err("%s: add_codec_controls failed: %d\n",
+			__func__, ret);
+		return ret;
+	}
+	ret = snd_soc_add_codec_controls(ana_cdc, msm_common_snd_controls,
+				   msm_common_snd_controls_size());
+	if (ret < 0) {
+		pr_err("%s: add common snd controls failed: %d\n",
+			__func__, ret);
+		return ret;
+	}
 
 	snd_soc_dapm_new_controls(dapm, msm_int_dapm_widgets,
-			ARRAY_SIZE(msm_int_dapm_widgets));
+				  ARRAY_SIZE(msm_int_dapm_widgets));
 
 	snd_soc_dapm_ignore_suspend(dapm, "Handset Mic");
 	snd_soc_dapm_ignore_suspend(dapm, "Headset Mic");
@@ -1285,39 +1298,78 @@
 
 	snd_soc_dapm_sync(dapm);
 
-	msm8x16_wcd_spk_ext_pa_cb(enable_spk_ext_pa, codec);
-	msm8x16_wcd_hph_comp_cb(msm_config_hph_compander_gpio, codec);
+	msm_anlg_cdc_spk_ext_pa_cb(enable_spk_ext_pa, ana_cdc);
+	msm_dig_cdc_hph_comp_cb(msm_config_hph_compander_gpio, dig_cdc);
 
 	mbhc_cfg_ptr->calibration = def_msm_int_wcd_mbhc_cal();
 	if (mbhc_cfg_ptr->calibration) {
-		ret = msm8x16_wcd_hs_detect(codec, mbhc_cfg_ptr);
+		ret = msm_anlg_cdc_hs_detect(ana_cdc, mbhc_cfg_ptr);
 		if (ret) {
-			pr_err("%s: msm8x16_wcd_hs_detect failed\n", __func__);
+			pr_err("%s: msm_anlg_cdc_hs_detect failed\n", __func__);
 			kfree(mbhc_cfg_ptr->calibration);
 			return ret;
 		}
 	}
+	card = rtd->card->snd_card;
+	if (!codec_root)
+		codec_root = snd_register_module_info(card->module, "codecs",
+						      card->proc_root);
+	if (!codec_root) {
+		pr_debug("%s: Cannot create codecs module entry\n",
+			 __func__);
+		goto done;
+	}
+	pdata->codec_root = codec_root;
+	msm_dig_codec_info_create_codec_entry(codec_root, dig_cdc);
+	msm_anlg_codec_info_create_codec_entry(codec_root, ana_cdc);
+done:
 	return 0;
 }
 
-static int msm_swr_audrx_init(struct snd_soc_pcm_runtime *rtd)
+static int msm_sdw_audrx_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
 	struct snd_soc_dapm_context *dapm =
 			snd_soc_codec_get_dapm(codec);
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(rtd->card);
+	struct snd_soc_pcm_runtime *rtd_aux = rtd->card->rtd_aux;
+	struct snd_card *card;
 
-	snd_soc_add_codec_controls(codec, msm_swr_controls,
-			ARRAY_SIZE(msm_swr_controls));
+	snd_soc_add_codec_controls(codec, msm_sdw_controls,
+			ARRAY_SIZE(msm_sdw_controls));
 
-	snd_soc_dapm_ignore_suspend(dapm, "AIF1_SWR Playback");
-	snd_soc_dapm_ignore_suspend(dapm, "VIfeed_SWR");
+	snd_soc_dapm_ignore_suspend(dapm, "AIF1_SDW Playback");
+	snd_soc_dapm_ignore_suspend(dapm, "VIfeed_SDW");
 	snd_soc_dapm_ignore_suspend(dapm, "SPK1 OUT");
 	snd_soc_dapm_ignore_suspend(dapm, "SPK2 OUT");
-	snd_soc_dapm_ignore_suspend(dapm, "AIF1_SWR VI");
-	snd_soc_dapm_ignore_suspend(dapm, "VIINPUT_SWR");
+	snd_soc_dapm_ignore_suspend(dapm, "AIF1_SDW VI");
+	snd_soc_dapm_ignore_suspend(dapm, "VIINPUT_SDW");
 
 	snd_soc_dapm_sync(dapm);
 
+	/*
+	 * Send speaker configuration only for WSA8810.
+	 * Default configuration is for WSA8815.
+	 */
+	if (rtd_aux && rtd_aux->component)
+		if (!strcmp(rtd_aux->component->name, WSA8810_NAME_1) ||
+		    !strcmp(rtd_aux->component->name, WSA8810_NAME_2)) {
+			msm_sdw_set_spkr_mode(rtd->codec, SPKR_MODE_1);
+			msm_sdw_set_spkr_gain_offset(rtd->codec,
+						   RX_GAIN_OFFSET_M1P5_DB);
+	}
+	card = rtd->card->snd_card;
+	if (!codec_root)
+		codec_root = snd_register_module_info(card->module, "codecs",
+						      card->proc_root);
+	if (!codec_root) {
+		pr_debug("%s: Cannot create codecs module entry\n",
+			 __func__);
+		goto done;
+	}
+	pdata->codec_root = codec_root;
+	msm_sdw_codec_info_create_codec_entry(codec_root, codec);
+done:
 	return 0;
 }
 
@@ -1540,9 +1592,42 @@
 	.shutdown = msm_int_mi2s_snd_shutdown,
 };
 
-static struct snd_soc_ops msm_swr_mi2s_be_ops = {
-	.startup = msm_swr_mi2s_snd_startup,
-	.shutdown = msm_swr_mi2s_snd_shutdown,
+static struct snd_soc_ops msm_sdw_mi2s_be_ops = {
+	.startup = msm_sdw_mi2s_snd_startup,
+	.shutdown = msm_sdw_mi2s_snd_shutdown,
+};
+
+struct snd_soc_dai_link_component dlc_rx1[] = {
+	{
+		.of_node = NULL,
+		.dai_name = "msm_dig_cdc_dai_rx1",
+	},
+	{
+		.of_node = NULL,
+		.dai_name  = "msm_anlg_cdc_i2s_rx1",
+	},
+};
+
+struct snd_soc_dai_link_component dlc_tx1[] = {
+	{
+		.of_node = NULL,
+		.dai_name = "msm_dig_cdc_dai_tx1",
+	},
+	{
+		.of_node = NULL,
+		.dai_name  = "msm_anlg_cdc_i2s_tx1",
+	},
+};
+
+struct snd_soc_dai_link_component dlc_tx2[] = {
+	{
+		.of_node = NULL,
+		.dai_name = "msm_dig_cdc_dai_tx2",
+	},
+	{
+		.of_node = NULL,
+		.dai_name  = "msm_anlg_cdc_i2s_tx2",
+	},
 };
 
 /* Digital audio interface glue - connects codec <---> CPU */
@@ -1675,6 +1760,7 @@
 		.stream_name = "Compress1",
 		.cpu_dai_name	= "MultiMedia4",
 		.platform_name  = "msm-compress-dsp",
+		.async_ops = ASYNC_DPCM_SND_SOC_HW_PARAMS,
 		.dynamic = 1,
 		.dpcm_capture = 1,
 		.dpcm_playback = 1,
@@ -1721,25 +1807,24 @@
 		.codec_name = "snd-soc-dummy",
 	},
 	{/* hw:x,11 */
-		.name = "SLIMBUS_3 Hostless",
-		.stream_name = "SLIMBUS_3 Hostless",
-		.cpu_dai_name = "SLIMBUS3_HOSTLESS",
+		.name = "INT3 MI2S_TX Hostless",
+		.stream_name = "INT3 MI2S_TX Hostless",
+		.cpu_dai_name = "INT3_MI2S_TX_HOSTLESS",
 		.platform_name = "msm-pcm-hostless",
 		.dynamic = 1,
 		.dpcm_capture = 1,
-		.dpcm_playback = 1,
 		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
 			    SND_SOC_DPCM_TRIGGER_POST},
 		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
 		.ignore_suspend = 1,
-		.ignore_pmdown_time = 1, /* dai link has playback support */
+		.ignore_pmdown_time = 1,
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.codec_name = "snd-soc-dummy",
 	},
 	{/* hw:x,12 */
-		.name = "SLIMBUS_4 Hostless",
-		.stream_name = "SLIMBUS_4 Hostless",
-		.cpu_dai_name = "SLIMBUS4_HOSTLESS",
+		.name = "SLIMBUS_7 Hostless",
+		.stream_name = "SLIMBUS_7 Hostless",
+		.cpu_dai_name = "SLIMBUS7_HOSTLESS",
 		.platform_name = "msm-pcm-hostless",
 		.dynamic = 1,
 		.dpcm_capture = 1,
@@ -2114,21 +2199,6 @@
 		.codec_name = "snd-soc-dummy",
 	},
 	{/* hw:x,35 */
-		.name = LPASS_BE_INT5_MI2S_TX,
-		.stream_name = "INT5_mi2s Capture",
-		.cpu_dai_name = "msm-dai-q6-mi2s.12",
-		.platform_name = "msm-pcm-hostless",
-		.codec_name = "msm_swr_codec",
-		.codec_dai_name = "msm_swr_vifeedback",
-		.be_id = MSM_BACKEND_DAI_INT5_MI2S_TX,
-		.be_hw_params_fixup = int_mi2s_be_hw_params_fixup,
-		.ops = &msm_swr_mi2s_be_ops,
-		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
-		.ignore_suspend = 1,
-		.dpcm_capture = 1,
-		.ignore_pmdown_time = 1,
-	},
-	{/* hw:x,36 */
 		.name = "Primary MI2S_RX Hostless",
 		.stream_name = "Primary MI2S_RX Hostless",
 		.cpu_dai_name = "PRI_MI2S_RX_HOSTLESS",
@@ -2145,7 +2215,7 @@
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.codec_name = "snd-soc-dummy",
 	},
-	{/* hw:x,37 */
+	{/* hw:x,36 */
 		.name = "Secondary MI2S_RX Hostless",
 		.stream_name = "Secondary MI2S_RX Hostless",
 		.cpu_dai_name = "SEC_MI2S_RX_HOSTLESS",
@@ -2162,7 +2232,7 @@
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.codec_name = "snd-soc-dummy",
 	},
-	{/* hw:x,38 */
+	{/* hw:x,37 */
 		.name = "Tertiary MI2S_RX Hostless",
 		.stream_name = "Tertiary MI2S_RX Hostless",
 		.cpu_dai_name = "TERT_MI2S_RX_HOSTLESS",
@@ -2179,7 +2249,7 @@
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.codec_name = "snd-soc-dummy",
 	},
-	{/* hw:x,39 */
+	{/* hw:x,38 */
 		.name = "INT0 MI2S_RX Hostless",
 		.stream_name = "INT0 MI2S_RX Hostless",
 		.cpu_dai_name = "INT0_MI2S_RX_HOSTLESS",
@@ -2196,14 +2266,53 @@
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.codec_name = "snd-soc-dummy",
 	},
+	{/* hw:x,39 */
+		.name = "SDM660 HFP TX",
+		.stream_name = "MultiMedia6",
+		.cpu_dai_name = "MultiMedia6",
+		.platform_name  = "msm-pcm-loopback",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.ignore_suspend = 1,
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA6,
+	},
+};
+
+
+static struct snd_soc_dai_link msm_int_wsa_dai[] = {
+	{/* hw:x,40 */
+		.name = LPASS_BE_INT5_MI2S_TX,
+		.stream_name = "INT5_mi2s Capture",
+		.cpu_dai_name = "msm-dai-q6-mi2s.12",
+		.platform_name = "msm-pcm-hostless",
+		.codec_name = "msm_sdw_codec",
+		.codec_dai_name = "msm_sdw_vifeedback",
+		.be_id = MSM_BACKEND_DAI_INT5_MI2S_TX,
+		.be_hw_params_fixup = int_mi2s_be_hw_params_fixup,
+		.ops = &msm_sdw_mi2s_be_ops,
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		.dpcm_capture = 1,
+		.ignore_pmdown_time = 1,
+	},
+};
+
+static struct snd_soc_dai_link msm_int_be_dai[] = {
 	/* Backend I2S DAI Links */
 	{
 		.name = LPASS_BE_INT0_MI2S_RX,
 		.stream_name = "INT0 MI2S Playback",
 		.cpu_dai_name = "msm-dai-q6-mi2s.7",
 		.platform_name = "msm-pcm-routing",
-		.codec_name     = "cajon_codec",
-		.codec_dai_name = "msm8x16_wcd_i2s_rx1",
+		.codecs = dlc_rx1,
+		.num_codecs = CODECS_MAX,
 		.no_pcm = 1,
 		.dpcm_playback = 1,
 		.async_ops = ASYNC_DPCM_SND_SOC_PREPARE |
@@ -2215,18 +2324,19 @@
 		.ignore_suspend = 1,
 	},
 	{
-		.name = LPASS_BE_INT4_MI2S_RX,
-		.stream_name = "INT4 MI2S Playback",
-		.cpu_dai_name = "msm-dai-q6-mi2s.11",
+		.name = LPASS_BE_INT3_MI2S_TX,
+		.stream_name = "INT3 MI2S Capture",
+		.cpu_dai_name = "msm-dai-q6-mi2s.10",
 		.platform_name = "msm-pcm-routing",
-		.codec_name = "msm_swr_codec",
-		.codec_dai_name = "msm_swr_i2s_rx1",
+		.codecs = dlc_tx1,
+		.num_codecs = CODECS_MAX,
 		.no_pcm = 1,
-		.dpcm_playback = 1,
-		.be_id = MSM_BACKEND_DAI_INT4_MI2S_RX,
-		.init = &msm_swr_audrx_init,
+		.dpcm_capture = 1,
+		.async_ops = ASYNC_DPCM_SND_SOC_PREPARE |
+			ASYNC_DPCM_SND_SOC_HW_PARAMS,
+		.be_id = MSM_BACKEND_DAI_INT3_MI2S_TX,
 		.be_hw_params_fixup = int_mi2s_be_hw_params_fixup,
-		.ops = &msm_swr_mi2s_be_ops,
+		.ops = &msm_int_mi2s_be_ops,
 		.ignore_suspend = 1,
 	},
 	{
@@ -2234,8 +2344,8 @@
 		.stream_name = "INT2 MI2S Capture",
 		.cpu_dai_name = "msm-dai-q6-mi2s.9",
 		.platform_name = "msm-pcm-routing",
-		.codec_name     = "cajon_codec",
-		.codec_dai_name = "msm8x16_wcd_i2s_tx2",
+		.codecs = dlc_tx2,
+		.num_codecs = CODECS_MAX,
 		.no_pcm = 1,
 		.dpcm_capture = 1,
 		.async_ops = ASYNC_DPCM_SND_SOC_PREPARE |
@@ -2246,22 +2356,6 @@
 		.ignore_suspend = 1,
 	},
 	{
-		.name = LPASS_BE_INT3_MI2S_TX,
-		.stream_name = "INT3 MI2S Capture",
-		.cpu_dai_name = "msm-dai-q6-mi2s.10",
-		.platform_name = "msm-pcm-routing",
-		.codec_name     = "cajon_codec",
-		.codec_dai_name = "msm8x16_wcd_i2s_tx1",
-		.no_pcm = 1,
-		.dpcm_capture = 1,
-		.async_ops = ASYNC_DPCM_SND_SOC_PREPARE |
-			ASYNC_DPCM_SND_SOC_HW_PARAMS,
-		.be_id = MSM_BACKEND_DAI_INT3_MI2S_TX,
-		.be_hw_params_fixup = int_mi2s_be_hw_params_fixup,
-		.ops = &msm_int_mi2s_be_ops,
-		.ignore_suspend = 1,
-	},
-	{
 		.name = LPASS_BE_AFE_PCM_RX,
 		.stream_name = "AFE Playback",
 		.cpu_dai_name = "msm-dai-q6-dev.224",
@@ -2785,15 +2879,55 @@
 	},
 };
 
+static struct snd_soc_dai_link msm_wsa_be_dai_links[] = {
+	{
+		.name = LPASS_BE_INT4_MI2S_RX,
+		.stream_name = "INT4 MI2S Playback",
+		.cpu_dai_name = "msm-dai-q6-mi2s.11",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm_sdw_codec",
+		.codec_dai_name = "msm_sdw_i2s_rx1",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_INT4_MI2S_RX,
+		.init = &msm_sdw_audrx_init,
+		.be_hw_params_fixup = int_mi2s_be_hw_params_fixup,
+		.ops = &msm_sdw_mi2s_be_ops,
+		.ignore_suspend = 1,
+	},
+};
+
+static struct snd_soc_dai_link ext_disp_be_dai_link[] = {
+	/* DISP PORT BACK END DAI Link */
+	{
+		.name = LPASS_BE_DISPLAY_PORT,
+		.stream_name = "Display Port Playback",
+		.cpu_dai_name = "msm-dai-q6-dp.24608",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-ext-disp-audio-codec-rx",
+		.codec_dai_name = "msm_dp_audio_codec_rx_dai",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.be_id = MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+		.be_hw_params_fixup = msm_common_be_hw_params_fixup,
+		.ignore_pmdown_time = 1,
+		.ignore_suspend = 1,
+	},
+};
+
 static struct snd_soc_dai_link msm_int_dai_links[
 ARRAY_SIZE(msm_int_dai) +
+ARRAY_SIZE(msm_int_wsa_dai) +
+ARRAY_SIZE(msm_int_be_dai) +
 ARRAY_SIZE(msm_mi2s_be_dai_links) +
 ARRAY_SIZE(msm_auxpcm_be_dai_links)+
-ARRAY_SIZE(msm_wcn_be_dai_links)];
+ARRAY_SIZE(msm_wcn_be_dai_links) +
+ARRAY_SIZE(msm_wsa_be_dai_links) +
+ARRAY_SIZE(ext_disp_be_dai_link)];
 
-static struct snd_soc_card msmfalcon_card = {
-	/* snd_soc_card_msmfalcon */
-	.name		= "msmfalcon-snd-card",
+static struct snd_soc_card sdm660_card = {
+	/* snd_soc_card_sdm660 */
+	.name		= "sdm660-snd-card",
 	.dai_link	= msm_int_dai,
 	.num_links	= ARRAY_SIZE(msm_int_dai),
 };
@@ -2844,7 +2978,7 @@
 static struct snd_soc_card *msm_int_populate_sndcard_dailinks(
 						struct device *dev)
 {
-	struct snd_soc_card *card = &msmfalcon_card;
+	struct snd_soc_card *card = &sdm660_card;
 	struct snd_soc_dai_link *dailink;
 	int len1;
 
@@ -2852,6 +2986,16 @@
 	len1 = ARRAY_SIZE(msm_int_dai);
 	memcpy(msm_int_dai_links, msm_int_dai, sizeof(msm_int_dai));
 	dailink = msm_int_dai_links;
+	if (!of_property_read_bool(dev->of_node,
+				  "qcom,wsa-disable")) {
+		memcpy(dailink + len1,
+		       msm_int_wsa_dai,
+		       sizeof(msm_int_wsa_dai));
+		len1 += ARRAY_SIZE(msm_int_wsa_dai);
+	}
+	memcpy(dailink + len1, msm_int_be_dai, sizeof(msm_int_be_dai));
+	len1 += ARRAY_SIZE(msm_int_be_dai);
+
 	if (of_property_read_bool(dev->of_node,
 				  "qcom,mi2s-audio-intf")) {
 		memcpy(dailink + len1,
@@ -2874,6 +3018,20 @@
 		       sizeof(msm_wcn_be_dai_links));
 		len1 += ARRAY_SIZE(msm_wcn_be_dai_links);
 	}
+	if (!of_property_read_bool(dev->of_node, "qcom,wsa-disable")) {
+		memcpy(dailink + len1,
+		       msm_wsa_be_dai_links,
+		       sizeof(msm_wsa_be_dai_links));
+		len1 += ARRAY_SIZE(msm_wsa_be_dai_links);
+	}
+	if (of_property_read_bool(dev->of_node, "qcom,ext-disp-audio-rx")) {
+		dev_dbg(dev, "%s(): ext disp audio support present\n",
+				__func__);
+		memcpy(dailink + len1,
+			ext_disp_be_dai_link,
+			sizeof(ext_disp_be_dai_link));
+		len1 += ARRAY_SIZE(ext_disp_be_dai_link);
+	}
 	card->dai_link = dailink;
 	card->num_links = len1;
 	return card;
@@ -2913,8 +3071,7 @@
 			AFE_API_VERSION_I2S_CONFIG;
 	pdata->digital_cdc_core_clk.clk_id =
 			Q6AFE_LPASS_CLK_ID_INT_MCLK_0;
-	pdata->digital_cdc_core_clk.clk_freq_in_hz =
-			pdata->mclk_freq;
+	pdata->digital_cdc_core_clk.clk_freq_in_hz = pdata->mclk_freq;
 	pdata->digital_cdc_core_clk.clk_attri =
 			Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO;
 	pdata->digital_cdc_core_clk.clk_root =
diff --git a/sound/soc/msm/msmfalcon-internal.h b/sound/soc/msm/sdm660-internal.h
similarity index 94%
rename from sound/soc/msm/msmfalcon-internal.h
rename to sound/soc/msm/sdm660-internal.h
index e5e3e7c..ccc62b8 100644
--- a/sound/soc/msm/msmfalcon-internal.h
+++ b/sound/soc/msm/sdm660-internal.h
@@ -10,8 +10,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef __MSMFALCON_INTERNAL
-#define __MSMFALCON_INTERNAL
+#ifndef __SDM660_INTERNAL
+#define __SDM660_INTERNAL
 
 #include <sound/soc.h>
 
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index e80017f..ad3cc68 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -528,6 +528,11 @@
 				cstream, &async_domain);
 			} else {
 				be_list[j++] = be;
+				if (j == DPCM_MAX_BE_USERS) {
+					dev_dbg(fe->dev,
+						"ASoC: MAX backend users!\n");
+					break;
+				}
 			}
 		}
 		for (i = 0; i < j; i++) {
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 7d505e2..94ea909 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -78,8 +78,7 @@
 	[snd_soc_dapm_dai_link] = 2,
 	[snd_soc_dapm_dai_in] = 4,
 	[snd_soc_dapm_dai_out] = 4,
-	[snd_soc_dapm_aif_in] = 4,
-	[snd_soc_dapm_aif_out] = 4,
+	[snd_soc_dapm_adc] = 4,
 	[snd_soc_dapm_mic] = 5,
 	[snd_soc_dapm_mux] = 6,
 	[snd_soc_dapm_demux] = 6,
@@ -88,7 +87,8 @@
 	[snd_soc_dapm_mixer] = 8,
 	[snd_soc_dapm_mixer_named_ctl] = 8,
 	[snd_soc_dapm_pga] = 9,
-	[snd_soc_dapm_adc] = 10,
+	[snd_soc_dapm_aif_in] = 9,
+	[snd_soc_dapm_aif_out] = 9,
 	[snd_soc_dapm_out_drv] = 11,
 	[snd_soc_dapm_hp] = 11,
 	[snd_soc_dapm_spk] = 11,
@@ -100,7 +100,9 @@
 static int dapm_down_seq[] = {
 	[snd_soc_dapm_pre] = 0,
 	[snd_soc_dapm_kcontrol] = 1,
-	[snd_soc_dapm_adc] = 2,
+	[snd_soc_dapm_aif_in] = 2,
+	[snd_soc_dapm_aif_out] = 2,
+	[snd_soc_dapm_adc] = 5,
 	[snd_soc_dapm_hp] = 3,
 	[snd_soc_dapm_spk] = 3,
 	[snd_soc_dapm_line] = 3,
@@ -114,8 +116,6 @@
 	[snd_soc_dapm_micbias] = 8,
 	[snd_soc_dapm_mux] = 9,
 	[snd_soc_dapm_demux] = 9,
-	[snd_soc_dapm_aif_in] = 10,
-	[snd_soc_dapm_aif_out] = 10,
 	[snd_soc_dapm_dai_in] = 10,
 	[snd_soc_dapm_dai_out] = 10,
 	[snd_soc_dapm_dai_link] = 11,
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 4f4d230..be6290d 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1931,14 +1931,14 @@
 
 	dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
 
-	/* shutdown the BEs */
-	dpcm_be_dai_shutdown(fe, substream->stream);
-
 	dev_dbg(fe->dev, "ASoC: close FE %s\n", fe->dai_link->name);
 
 	/* now shutdown the frontend */
 	soc_pcm_close(substream);
 
+	/* shutdown the BEs */
+	dpcm_be_dai_shutdown(fe, substream->stream);
+
 	/* run the stream event for each BE */
 	dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
 
@@ -2521,6 +2521,10 @@
 							    dpcm, domain);
 		} else {
 			dpcm_async[i++] = dpcm;
+			if (i == DPCM_MAX_BE_USERS) {
+				dev_dbg(fe->dev, "ASoC: MAX backend users!\n");
+				break;
+			}
 		}
 	}
 
diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
index a452ad7..f32cfa4 100644
--- a/sound/usb/Kconfig
+++ b/sound/usb/Kconfig
@@ -162,5 +162,13 @@
 
 source "sound/usb/line6/Kconfig"
 
+config SND_USB_AUDIO_QMI
+	tristate "USB Audio QMI Service driver"
+	depends on MSM_QMI_INTERFACE
+	help
+	  Starts USB Audio QMI server to communicate with remote entity
+	  to perform operations like enable or disable particular audio
+	  stream on a connected USB device.
+
 endif	# SND_USB
 
diff --git a/sound/usb/Makefile b/sound/usb/Makefile
index 2d2d122..d2ac038 100644
--- a/sound/usb/Makefile
+++ b/sound/usb/Makefile
@@ -26,3 +26,4 @@
 
 obj-$(CONFIG_SND) += misc/ usx2y/ caiaq/ 6fire/ hiface/ bcd2000/
 obj-$(CONFIG_SND_USB_LINE6)	+= line6/
+obj-$(CONFIG_SND_USB_AUDIO_QMI) += usb_audio_qmi_v01.o usb_audio_qmi_svc.o
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 90a4e68..ccf06de 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -113,6 +113,71 @@
 static struct snd_usb_audio *usb_chip[SNDRV_CARDS];
 static struct usb_driver usb_audio_driver;
 
+struct snd_usb_substream *find_snd_usb_substream(unsigned int card_num,
+	unsigned int pcm_idx, unsigned int direction, struct snd_usb_audio
+	**uchip, void (*disconnect_cb)(struct snd_usb_audio *chip))
+{
+	int idx;
+	struct snd_usb_stream *as;
+	struct snd_usb_substream *subs = NULL;
+	struct snd_usb_audio *chip = NULL;
+
+	mutex_lock(&register_mutex);
+	/*
+	 * legacy audio snd card number assignment is dynamic. Hence
+	 * search using chip->card->number
+	 */
+	for (idx = 0; idx < SNDRV_CARDS; idx++) {
+		if (!usb_chip[idx])
+			continue;
+		if (usb_chip[idx]->card->number == card_num) {
+			chip = usb_chip[idx];
+			break;
+		}
+	}
+
+	if (!chip || atomic_read(&chip->shutdown)) {
+		pr_debug("%s: instance of usb crad # %d does not exist\n",
+			__func__, card_num);
+		goto err;
+	}
+
+	if (pcm_idx >= chip->pcm_devs) {
+		pr_err("%s: invalid pcm dev number %u > %d\n", __func__,
+			pcm_idx, chip->pcm_devs);
+		goto err;
+	}
+
+	if (direction > SNDRV_PCM_STREAM_CAPTURE) {
+		pr_err("%s: invalid direction %u\n", __func__, direction);
+		goto err;
+	}
+
+	list_for_each_entry(as, &chip->pcm_list, list) {
+		if (as->pcm_index == pcm_idx) {
+			subs = &as->substream[direction];
+			if (subs->interface < 0 && !subs->data_endpoint &&
+				!subs->sync_endpoint) {
+				pr_debug("%s: stream disconnected, bail out\n",
+					__func__);
+				subs = NULL;
+				goto err;
+			}
+			goto done;
+		}
+	}
+
+done:
+	chip->card_num = card_num;
+	chip->disconnect_cb = disconnect_cb;
+err:
+	*uchip = chip;
+	if (!subs)
+		pr_debug("%s: substream instance not found\n", __func__);
+	mutex_unlock(&register_mutex);
+	return subs;
+}
+
 /*
  * disconnect streams
  * called from usb_audio_disconnect()
@@ -325,6 +390,7 @@
 	list_for_each_entry_safe(ep, n, &chip->ep_list, list)
 		snd_usb_endpoint_free(ep);
 
+	mutex_destroy(&chip->dev_lock);
 	mutex_destroy(&chip->mutex);
 	if (!atomic_read(&chip->shutdown))
 		dev_set_drvdata(&chip->dev->dev, NULL);
@@ -383,6 +449,7 @@
 	}
 
 	mutex_init(&chip->mutex);
+	mutex_init(&chip->dev_lock);
 	init_waitqueue_head(&chip->shutdown_wait);
 	chip->index = idx;
 	chip->dev = dev;
@@ -630,6 +697,8 @@
 	usb_chip[chip->index] = chip;
 	chip->num_interfaces++;
 	usb_set_intfdata(intf, chip);
+	intf->needs_remote_wakeup = 1;
+	usb_enable_autosuspend(chip->dev);
 	atomic_dec(&chip->active);
 	mutex_unlock(&register_mutex);
 	return 0;
@@ -659,6 +728,9 @@
 
 	card = chip->card;
 
+	if (chip->disconnect_cb)
+		chip->disconnect_cb(chip);
+
 	mutex_lock(&register_mutex);
 	if (atomic_inc_return(&chip->shutdown) == 1) {
 		struct snd_usb_stream *as;
diff --git a/sound/usb/card.h b/sound/usb/card.h
index 111b0f0..25cddcc 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -167,4 +167,8 @@
 	struct list_head list;
 };
 
+struct snd_usb_substream *find_snd_usb_substream(unsigned int card_num,
+	unsigned int pcm_idx, unsigned int direction, struct snd_usb_audio
+	**uchip, void (*disconnect_cb)(struct snd_usb_audio *chip));
+
 #endif /* __USBAUDIO_CARD_H */
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index c5251aa..70e1477 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -357,7 +357,7 @@
 		err = usb_submit_urb(ctx->urb, GFP_ATOMIC);
 		if (err < 0)
 			usb_audio_err(ep->chip,
-				"Unable to submit urb #%d: %d (urb %p)\n",
+				"Unable to submit urb #%d: %d (urb %pK)\n",
 				ctx->index, err, ctx->urb);
 		else
 			set_bit(ctx->index, &ep->active_mask);
@@ -459,7 +459,7 @@
 		    ep->iface == alts->desc.bInterfaceNumber &&
 		    ep->altsetting == alts->desc.bAlternateSetting) {
 			usb_audio_dbg(ep->chip,
-				      "Re-using EP %x in iface %d,%d @%p\n",
+				      "Re-using EP %x in iface %d,%d @%pK\n",
 					ep_num, ep->iface, ep->altsetting, ep);
 			goto __exit_unlock;
 		}
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 48afae0..db85d92 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -228,7 +228,7 @@
 	if (!test_and_set_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags)) {
 		struct snd_usb_endpoint *ep = subs->data_endpoint;
 
-		dev_dbg(&subs->dev->dev, "Starting data EP @%p\n", ep);
+		dev_dbg(&subs->dev->dev, "Starting data EP @%pK\n", ep);
 
 		ep->data_subs = subs;
 		err = snd_usb_endpoint_start(ep);
@@ -257,7 +257,7 @@
 			}
 		}
 
-		dev_dbg(&subs->dev->dev, "Starting sync EP @%p\n", ep);
+		dev_dbg(&subs->dev->dev, "Starting sync EP @%pK\n", ep);
 
 		ep->sync_slave = subs->data_endpoint;
 		err = snd_usb_endpoint_start(ep);
@@ -554,6 +554,70 @@
 	return 0;
 }
 
+int snd_usb_enable_audio_stream(struct snd_usb_substream *subs,
+	bool enable)
+{
+	struct audioformat *fmt;
+	struct usb_host_interface *alts;
+	struct usb_interface *iface;
+	int ret;
+
+	if (!enable) {
+		if (subs->interface >= 0) {
+			usb_set_interface(subs->dev, subs->interface, 0);
+			subs->altset_idx = 0;
+			subs->interface = -1;
+			subs->cur_audiofmt = NULL;
+		}
+
+		snd_usb_autosuspend(subs->stream->chip);
+		return 0;
+	}
+
+	snd_usb_autoresume(subs->stream->chip);
+	fmt = find_format(subs);
+	if (!fmt) {
+		dev_err(&subs->dev->dev,
+		"cannot set format: format = %#x, rate = %d, channels = %d\n",
+			   subs->pcm_format, subs->cur_rate, subs->channels);
+		return -EINVAL;
+	}
+
+	subs->altset_idx = 0;
+	subs->interface = -1;
+	if (atomic_read(&subs->stream->chip->shutdown)) {
+		ret = -ENODEV;
+	} else {
+		ret = set_format(subs, fmt);
+		if (ret < 0)
+			return ret;
+
+		iface = usb_ifnum_to_if(subs->dev, subs->cur_audiofmt->iface);
+		if (!iface) {
+			dev_err(&subs->dev->dev, "Could not get iface %d\n",
+				subs->cur_audiofmt->iface);
+			return -ENODEV;
+		}
+
+		alts = &iface->altsetting[subs->cur_audiofmt->altset_idx];
+		ret = snd_usb_init_sample_rate(subs->stream->chip,
+					       subs->cur_audiofmt->iface,
+					       alts,
+					       subs->cur_audiofmt,
+					       subs->cur_rate);
+		if (ret < 0) {
+			dev_err(&subs->dev->dev, "failed to set rate %d\n",
+				subs->cur_rate);
+			return ret;
+		}
+	}
+
+	subs->interface = fmt->iface;
+	subs->altset_idx = fmt->altset_idx;
+
+	return 0;
+}
+
 /*
  * Return the score of matching two audioformats.
  * Veto the audioformat if:
@@ -571,13 +635,13 @@
 
 	if (fp->channels < 1) {
 		dev_dbg(&subs->dev->dev,
-			"%s: (fmt @%p) no channels\n", __func__, fp);
+			"%s: (fmt @%pK) no channels\n", __func__, fp);
 		return 0;
 	}
 
 	if (!(fp->formats & pcm_format_to_bits(pcm_format))) {
 		dev_dbg(&subs->dev->dev,
-			"%s: (fmt @%p) no match for format %d\n", __func__,
+			"%s: (fmt @%pK) no match for format %d\n", __func__,
 			fp, pcm_format);
 		return 0;
 	}
@@ -590,7 +654,7 @@
 	}
 	if (!score) {
 		dev_dbg(&subs->dev->dev,
-			"%s: (fmt @%p) no match for rate %d\n", __func__,
+			"%s: (fmt @%pK) no match for rate %d\n", __func__,
 			fp, rate);
 		return 0;
 	}
@@ -599,7 +663,7 @@
 		score++;
 
 	dev_dbg(&subs->dev->dev,
-		"%s: (fmt @%p) score %d\n", __func__, fp, score);
+		"%s: (fmt @%pK) score %d\n", __func__, fp, score);
 
 	return score;
 }
diff --git a/sound/usb/pcm.h b/sound/usb/pcm.h
index df7a003..d581f94 100644
--- a/sound/usb/pcm.h
+++ b/sound/usb/pcm.h
@@ -9,6 +9,7 @@
 int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
 		       struct usb_host_interface *alts,
 		       struct audioformat *fmt);
-
+int snd_usb_enable_audio_stream(struct snd_usb_substream *subs,
+	bool enable);
 
 #endif /* __USBAUDIO_PCM_H */
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 8e9548bc..7437cd5 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -69,9 +69,14 @@
 static void snd_usb_audio_pcm_free(struct snd_pcm *pcm)
 {
 	struct snd_usb_stream *stream = pcm->private_data;
+	struct snd_usb_audio *chip;
+
 	if (stream) {
+		mutex_lock(&stream->chip->dev_lock);
+		chip = stream->chip;
 		stream->pcm = NULL;
 		snd_usb_audio_stream_free(stream);
+		mutex_unlock(&chip->dev_lock);
 	}
 }
 
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
new file mode 100644
index 0000000..5a1974e
--- /dev/null
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -0,0 +1,1325 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/audio-v2.h>
+#include <linux/uaccess.h>
+#include <sound/pcm.h>
+#include <sound/core.h>
+#include <sound/asound.h>
+#include <linux/usb.h>
+#include <linux/qmi_encdec.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+
+#include "usbaudio.h"
+#include "card.h"
+#include "helper.h"
+#include "pcm.h"
+#include "usb_audio_qmi_v01.h"
+
+#define SND_PCM_CARD_NUM_MASK 0xffff0000
+#define SND_PCM_DEV_NUM_MASK 0xff00
+#define SND_PCM_STREAM_DIRECTION 0xff
+
+#define PREPEND_SID_TO_IOVA(iova, sid) (u64)(((u64)(iova)) | \
+					(((u64)sid) << 32))
+
+/*  event ring iova base address */
+#define IOVA_BASE 0x1000
+
+#define IOVA_DCBA_BASE 0x2000
+#define IOVA_XFER_RING_BASE (IOVA_DCBA_BASE + PAGE_SIZE * (SNDRV_CARDS + 1))
+#define IOVA_XFER_BUF_BASE (IOVA_XFER_RING_BASE + PAGE_SIZE * SNDRV_CARDS * 32)
+#define IOVA_XFER_RING_MAX (IOVA_XFER_BUF_BASE - PAGE_SIZE)
+#define IOVA_XFER_BUF_MAX (0xfffff000 - PAGE_SIZE)
+
+#define MAX_XFER_BUFF_LEN (24 * PAGE_SIZE)
+
+struct iova_info {
+	struct list_head list;
+	unsigned long start_iova;
+	size_t size;
+	bool in_use;
+};
+
+struct intf_info {
+	unsigned long data_xfer_ring_va;
+	size_t data_xfer_ring_size;
+	unsigned long sync_xfer_ring_va;
+	size_t sync_xfer_ring_size;
+	unsigned long xfer_buf_va;
+	size_t xfer_buf_size;
+	phys_addr_t xfer_buf_pa;
+	u8 *xfer_buf;
+	u8 intf_num;
+	u8 pcm_card_num;
+	u8 pcm_dev_num;
+	u8 direction;
+	bool in_use;
+};
+
+struct uaudio_dev {
+	struct usb_device *udev;
+	/* audio control interface */
+	struct usb_host_interface *ctrl_intf;
+	unsigned int card_num;
+	atomic_t in_use;
+	struct kref kref;
+	unsigned long dcba_iova;
+	size_t dcba_size;
+	wait_queue_head_t disconnect_wq;
+
+	/* interface specific */
+	int num_intf;
+	struct intf_info *info;
+};
+
+static struct uaudio_dev uadev[SNDRV_CARDS];
+
+struct uaudio_qmi_dev {
+	struct device *dev;
+	u32 sid;
+	u32 intr_num;
+	struct iommu_domain *domain;
+
+	/* list to keep track of available iova */
+	struct list_head dcba_list;
+	size_t dcba_iova_size;
+	unsigned long curr_dcba_iova;
+	struct list_head xfer_ring_list;
+	size_t xfer_ring_iova_size;
+	unsigned long curr_xfer_ring_iova;
+	struct list_head xfer_buf_list;
+	size_t xfer_buf_iova_size;
+	unsigned long curr_xfer_buf_iova;
+	/* bit fields representing pcm card enabled */
+	unsigned long card_slot;
+	/* cache event ring phys addr */
+	u64 er_phys_addr;
+};
+
+static struct uaudio_qmi_dev *uaudio_qdev;
+
+struct uaudio_qmi_svc {
+	struct qmi_handle *uaudio_svc_hdl;
+	void *curr_conn;
+	struct work_struct recv_msg_work;
+	struct work_struct qmi_disconnect_work;
+	struct workqueue_struct *uaudio_wq;
+	ktime_t t_request_recvd;
+	ktime_t t_resp_sent;
+};
+
+static struct uaudio_qmi_svc *uaudio_svc;
+
+static struct msg_desc uaudio_stream_req_desc = {
+	.max_msg_len = QMI_UAUDIO_STREAM_REQ_MSG_V01_MAX_MSG_LEN,
+	.msg_id = QMI_UAUDIO_STREAM_REQ_V01,
+	.ei_array = qmi_uaudio_stream_req_msg_v01_ei,
+};
+
+static struct msg_desc uaudio_stream_resp_desc = {
+	.max_msg_len = QMI_UAUDIO_STREAM_RESP_MSG_V01_MAX_MSG_LEN,
+	.msg_id = QMI_UAUDIO_STREAM_RESP_V01,
+	.ei_array = qmi_uaudio_stream_resp_msg_v01_ei,
+};
+
+static struct msg_desc uaudio_stream_ind_desc = {
+	.max_msg_len = QMI_UAUDIO_STREAM_IND_MSG_V01_MAX_MSG_LEN,
+	.msg_id = QMI_UADUIO_STREAM_IND_V01,
+	.ei_array = qmi_uaudio_stream_ind_msg_v01_ei,
+};
+
+enum mem_type {
+	MEM_EVENT_RING,
+	MEM_DCBA,
+	MEM_XFER_RING,
+	MEM_XFER_BUF,
+};
+
+enum usb_qmi_audio_format {
+	USB_QMI_PCM_FORMAT_S8 = 0,
+	USB_QMI_PCM_FORMAT_U8,
+	USB_QMI_PCM_FORMAT_S16_LE,
+	USB_QMI_PCM_FORMAT_S16_BE,
+	USB_QMI_PCM_FORMAT_U16_LE,
+	USB_QMI_PCM_FORMAT_U16_BE,
+	USB_QMI_PCM_FORMAT_S24_LE,
+	USB_QMI_PCM_FORMAT_S24_BE,
+	USB_QMI_PCM_FORMAT_U24_LE,
+	USB_QMI_PCM_FORMAT_U24_BE,
+	USB_QMI_PCM_FORMAT_S24_3LE,
+	USB_QMI_PCM_FORMAT_S24_3BE,
+	USB_QMI_PCM_FORMAT_U24_3LE,
+	USB_QMI_PCM_FORMAT_U24_3BE,
+	USB_QMI_PCM_FORMAT_S32_LE,
+	USB_QMI_PCM_FORMAT_S32_BE,
+	USB_QMI_PCM_FORMAT_U32_LE,
+	USB_QMI_PCM_FORMAT_U32_BE,
+};
+
+static unsigned long uaudio_get_iova(unsigned long *curr_iova,
+	size_t *curr_iova_size, struct list_head *head, size_t size)
+{
+	struct iova_info *info, *new_info = NULL;
+	struct list_head *curr_head;
+	unsigned long va = 0;
+	size_t tmp_size = size;
+	bool found = false;
+
+	if (size % PAGE_SIZE) {
+		pr_err("%s: size %zu is not page size multiple\n", __func__,
+			size);
+		goto done;
+	}
+
+	if (size > *curr_iova_size) {
+		pr_err("%s: size %zu > curr size %zu\n", __func__, size,
+			*curr_iova_size);
+		goto done;
+	}
+	if (*curr_iova_size == 0) {
+		pr_err("%s: iova mapping is full\n", __func__);
+		goto done;
+	}
+
+	list_for_each_entry(info, head, list) {
+		/* exact size iova_info */
+		if (!info->in_use && info->size == size) {
+			info->in_use = true;
+			va = info->start_iova;
+			*curr_iova_size -= size;
+			found = true;
+			pr_debug("%s: exact size :%zu found\n", __func__, size);
+			goto done;
+		} else if (!info->in_use && tmp_size >= info->size) {
+			if (!new_info)
+				new_info = info;
+			pr_debug("%s: partial size: %zu found\n", __func__,
+				info->size);
+			tmp_size -= info->size;
+			if (tmp_size)
+				continue;
+
+			va = new_info->start_iova;
+			for (curr_head = &new_info->list; curr_head !=
+			&info->list; curr_head = curr_head->next) {
+				new_info = list_entry(curr_head, struct
+						iova_info, list);
+				new_info->in_use = true;
+			}
+			info->in_use = true;
+			*curr_iova_size -= size;
+			found = true;
+			goto done;
+		} else {
+			/* iova region in use */
+			new_info = NULL;
+			tmp_size = size;
+		}
+	}
+
+	info = kzalloc(sizeof(struct iova_info), GFP_KERNEL);
+	if (!info) {
+		va = 0;
+		goto done;
+	}
+
+	va = info->start_iova = *curr_iova;
+	info->size = size;
+	info->in_use = true;
+	*curr_iova += size;
+	*curr_iova_size -= size;
+	found = true;
+	list_add_tail(&info->list, head);
+
+done:
+	if (!found)
+		pr_err("%s: unable to find %zu size iova\n", __func__, size);
+	else
+		pr_debug("%s: va:%lu curr_iova:%lu curr_iova_size:%zu\n",
+		__func__, va, *curr_iova, *curr_iova_size);
+
+	return va;
+}
+
+static unsigned long uaudio_iommu_map(enum mem_type mtype, phys_addr_t pa,
+		size_t size)
+{
+	unsigned long va = 0;
+	bool map = true;
+	int ret;
+
+	switch (mtype) {
+	case MEM_EVENT_RING:
+		va = IOVA_BASE;
+		/* er already mapped */
+		if (uaudio_qdev->er_phys_addr == pa)
+			map = false;
+		break;
+	case MEM_DCBA:
+		va = uaudio_get_iova(&uaudio_qdev->curr_dcba_iova,
+		&uaudio_qdev->dcba_iova_size, &uaudio_qdev->dcba_list, size);
+		break;
+	case MEM_XFER_RING:
+		va = uaudio_get_iova(&uaudio_qdev->curr_xfer_ring_iova,
+		&uaudio_qdev->xfer_ring_iova_size, &uaudio_qdev->xfer_ring_list,
+		size);
+		break;
+	case MEM_XFER_BUF:
+		va = uaudio_get_iova(&uaudio_qdev->curr_xfer_buf_iova,
+		&uaudio_qdev->xfer_buf_iova_size, &uaudio_qdev->xfer_buf_list,
+		size);
+		break;
+	default:
+		pr_err("%s: unknown mem type %d\n", __func__, mtype);
+	}
+
+	if (!va)
+		map = false;
+
+	if (!map)
+		goto done;
+
+	pr_debug("%s: map pa %pa to iova %lu for memtype %d\n", __func__, &pa,
+		va, mtype);
+	ret = iommu_map(uaudio_qdev->domain, va, pa, size,
+		IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
+	if (ret)
+		pr_err("%s:failed to map pa:%pa iova:%lu memtype:%d ret:%d\n",
+			__func__, &pa, va, mtype, ret);
+done:
+	return va;
+}
+
+static void uaudio_put_iova(unsigned long va, size_t size, struct list_head
+	*head, size_t *curr_iova_size)
+{
+	struct iova_info *info;
+	size_t tmp_size = size;
+	bool found = false;
+
+	list_for_each_entry(info, head, list) {
+		if (info->start_iova == va) {
+			if (!info->in_use) {
+				pr_err("%s: va %lu is not in use\n", __func__,
+					va);
+				return;
+			}
+			found = true;
+			info->in_use = false;
+			if (info->size == size)
+				goto done;
+		}
+
+		if (found && tmp_size >= info->size) {
+			info->in_use = false;
+			tmp_size -= info->size;
+			if (!tmp_size)
+				goto done;
+		}
+	}
+
+	if (!found) {
+		pr_err("%s: unable to find the va %lu\n", __func__, va);
+		return;
+	}
+done:
+	*curr_iova_size += size;
+	pr_debug("%s: curr_iova_size %zu\n", __func__, *curr_iova_size);
+}
+
+static void uaudio_iommu_unmap(enum mem_type mtype, unsigned long va,
+	size_t size)
+{
+	size_t umap_size;
+	bool unmap = true;
+
+	if (!va || !size)
+		return;
+
+	switch (mtype) {
+	case MEM_EVENT_RING:
+		if (uaudio_qdev->er_phys_addr)
+			uaudio_qdev->er_phys_addr = 0;
+		else
+			unmap = false;
+		break;
+	case MEM_DCBA:
+		uaudio_put_iova(va, size, &uaudio_qdev->dcba_list,
+		&uaudio_qdev->dcba_iova_size);
+		break;
+	case MEM_XFER_RING:
+		uaudio_put_iova(va, size, &uaudio_qdev->xfer_ring_list,
+		&uaudio_qdev->xfer_ring_iova_size);
+		break;
+	case MEM_XFER_BUF:
+		uaudio_put_iova(va, size, &uaudio_qdev->xfer_buf_list,
+		&uaudio_qdev->xfer_buf_iova_size);
+		break;
+	default:
+		pr_err("%s: unknown mem type %d\n", __func__, mtype);
+		unmap = false;
+	}
+
+	if (!unmap)
+		return;
+
+	pr_debug("%s: unmap iova %lu for memtype %d\n", __func__, va, mtype);
+
+	umap_size = iommu_unmap(uaudio_qdev->domain, va, size);
+	if (umap_size != size)
+		pr_err("%s: unmapped size %zu for iova %lu\n", __func__,
+		umap_size, va);
+}
+
+static int prepare_qmi_response(struct snd_usb_substream *subs,
+		struct qmi_uaudio_stream_req_msg_v01 *req_msg,
+		struct qmi_uaudio_stream_resp_msg_v01 *resp, int info_idx)
+{
+	struct usb_interface *iface;
+	struct usb_host_interface *alts;
+	struct usb_interface_descriptor *altsd;
+	struct usb_host_endpoint *ep;
+	struct uac_format_type_i_continuous_descriptor *fmt;
+	struct uac_format_type_i_discrete_descriptor *fmt_v1;
+	struct uac_format_type_i_ext_descriptor *fmt_v2;
+	struct uac1_as_header_descriptor *as;
+	int ret = -ENODEV;
+	int protocol, card_num, pcm_dev_num;
+	void *hdr_ptr;
+	u8 *xfer_buf;
+	u32 len, mult, remainder, xfer_buf_len;
+	unsigned long va, tr_data_va = 0, tr_sync_va = 0, dcba_va = 0,
+	xfer_buf_va = 0;
+	phys_addr_t xhci_pa, xfer_buf_pa;
+
+	iface = usb_ifnum_to_if(subs->dev, subs->interface);
+	if (!iface) {
+		pr_err("%s: interface # %d does not exist\n", __func__,
+			subs->interface);
+		goto err;
+	}
+
+	pcm_dev_num = (req_msg->usb_token & SND_PCM_DEV_NUM_MASK) >> 8;
+	card_num = (req_msg->usb_token & SND_PCM_CARD_NUM_MASK) >> 16;
+	xfer_buf_len = req_msg->xfer_buff_size;
+
+	alts = &iface->altsetting[subs->altset_idx];
+	altsd = get_iface_desc(alts);
+	protocol = altsd->bInterfaceProtocol;
+
+	/* get format type */
+	fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL,
+			UAC_FORMAT_TYPE);
+	if (!fmt) {
+		pr_err("%s: %u:%d : no UAC_FORMAT_TYPE desc\n", __func__,
+			subs->interface, subs->altset_idx);
+		goto err;
+	}
+
+	if (!uadev[card_num].ctrl_intf) {
+		pr_err("%s: audio ctrl intf info not cached\n", __func__);
+		goto err;
+	}
+
+	hdr_ptr = snd_usb_find_csint_desc(uadev[card_num].ctrl_intf->extra,
+					uadev[card_num].ctrl_intf->extralen,
+					NULL, UAC_HEADER);
+	if (!hdr_ptr) {
+		pr_err("%s: no UAC_HEADER desc\n", __func__);
+		goto err;
+	}
+
+	if (protocol == UAC_VERSION_1) {
+		as = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL,
+			UAC_AS_GENERAL);
+		if (!as) {
+			pr_err("%s: %u:%d : no UAC_AS_GENERAL desc\n", __func__,
+				subs->interface, subs->altset_idx);
+			goto err;
+		}
+		resp->data_path_delay = as->bDelay;
+		resp->data_path_delay_valid = 1;
+		fmt_v1 = (struct uac_format_type_i_discrete_descriptor *)fmt;
+		resp->usb_audio_subslot_size = fmt_v1->bSubframeSize;
+		resp->usb_audio_subslot_size_valid = 1;
+
+		resp->usb_audio_spec_revision =
+			((struct uac1_ac_header_descriptor *)hdr_ptr)->bcdADC;
+		resp->usb_audio_spec_revision_valid = 1;
+	} else if (protocol == UAC_VERSION_2) {
+		fmt_v2 = (struct uac_format_type_i_ext_descriptor *)fmt;
+		resp->usb_audio_subslot_size = fmt_v2->bSubslotSize;
+		resp->usb_audio_subslot_size_valid = 1;
+
+		resp->usb_audio_spec_revision =
+			((struct uac2_ac_header_descriptor *)hdr_ptr)->bcdADC;
+		resp->usb_audio_spec_revision_valid = 1;
+	} else {
+		pr_err("%s: unknown protocol version %x\n", __func__, protocol);
+		goto err;
+	}
+
+	resp->slot_id = subs->dev->slot_id;
+	resp->slot_id_valid = 1;
+
+	memcpy(&resp->std_as_opr_intf_desc, &alts->desc, sizeof(alts->desc));
+	resp->std_as_opr_intf_desc_valid = 1;
+
+	ep = usb_pipe_endpoint(subs->dev, subs->data_endpoint->pipe);
+	if (!ep) {
+		pr_err("%s: data ep # %d context is null\n", __func__,
+			subs->data_endpoint->ep_num);
+		goto err;
+	}
+	memcpy(&resp->std_as_data_ep_desc, &ep->desc, sizeof(ep->desc));
+	resp->std_as_data_ep_desc_valid = 1;
+
+	xhci_pa = usb_get_xfer_ring_dma_addr(subs->dev, ep);
+	if (!xhci_pa) {
+		pr_err("%s:failed to get data ep ring dma address\n", __func__);
+		goto err;
+	}
+
+	resp->xhci_mem_info.tr_data.pa = xhci_pa;
+
+	if (subs->sync_endpoint) {
+		ep = usb_pipe_endpoint(subs->dev, subs->sync_endpoint->pipe);
+		if (!ep) {
+			pr_debug("%s: implicit fb on data ep\n", __func__);
+			goto skip_sync_ep;
+		}
+		memcpy(&resp->std_as_sync_ep_desc, &ep->desc, sizeof(ep->desc));
+		resp->std_as_sync_ep_desc_valid = 1;
+
+		xhci_pa = usb_get_xfer_ring_dma_addr(subs->dev, ep);
+		if (!xhci_pa) {
+			pr_err("%s:failed to get sync ep ring dma address\n",
+				__func__);
+			goto err;
+		}
+		resp->xhci_mem_info.tr_sync.pa = xhci_pa;
+	}
+
+skip_sync_ep:
+	resp->interrupter_num = uaudio_qdev->intr_num;
+	resp->interrupter_num_valid = 1;
+
+	/*  map xhci data structures PA memory to iova */
+
+	/* event ring */
+	ret = usb_sec_event_ring_setup(subs->dev, resp->interrupter_num);
+	if (ret) {
+		pr_err("%s: failed to setup sec event ring ret %d\n", __func__,
+			ret);
+		goto err;
+	}
+	xhci_pa = usb_get_sec_event_ring_dma_addr(subs->dev,
+			resp->interrupter_num);
+	if (!xhci_pa) {
+		pr_err("%s: failed to get sec event ring dma address\n",
+		__func__);
+		goto err;
+	}
+
+	va = uaudio_iommu_map(MEM_EVENT_RING, xhci_pa, PAGE_SIZE);
+	if (!va)
+		goto err;
+
+	resp->xhci_mem_info.evt_ring.va = PREPEND_SID_TO_IOVA(va,
+						uaudio_qdev->sid);
+	resp->xhci_mem_info.evt_ring.pa = xhci_pa;
+	resp->xhci_mem_info.evt_ring.size = PAGE_SIZE;
+	uaudio_qdev->er_phys_addr = xhci_pa;
+
+	/* dcba */
+	xhci_pa = usb_get_dcba_dma_addr(subs->dev);
+	if (!xhci_pa) {
+		pr_err("%s:failed to get dcba dma address\n", __func__);
+		goto unmap_er;
+	}
+
+	if (!uadev[card_num].dcba_iova) { /* mappped per usb device */
+		va = uaudio_iommu_map(MEM_DCBA, xhci_pa, PAGE_SIZE);
+		if (!va)
+			goto unmap_er;
+
+		uadev[card_num].dcba_iova = va;
+		uadev[card_num].dcba_size = PAGE_SIZE;
+	}
+
+	dcba_va = uadev[card_num].dcba_iova;
+	resp->xhci_mem_info.dcba.va = PREPEND_SID_TO_IOVA(dcba_va,
+						uaudio_qdev->sid);
+	resp->xhci_mem_info.dcba.pa = xhci_pa;
+	resp->xhci_mem_info.dcba.size = PAGE_SIZE;
+
+	/* data transfer ring */
+	xhci_pa = resp->xhci_mem_info.tr_data.pa;
+	va = uaudio_iommu_map(MEM_XFER_RING, xhci_pa, PAGE_SIZE);
+	if (!va)
+		goto unmap_dcba;
+
+	tr_data_va = va;
+	resp->xhci_mem_info.tr_data.va = PREPEND_SID_TO_IOVA(va,
+						uaudio_qdev->sid);
+	resp->xhci_mem_info.tr_data.size = PAGE_SIZE;
+
+	/* sync transfer ring */
+	if (!resp->xhci_mem_info.tr_sync.pa)
+		goto skip_sync;
+
+	xhci_pa = resp->xhci_mem_info.tr_sync.pa;
+	va = uaudio_iommu_map(MEM_XFER_RING, xhci_pa, PAGE_SIZE);
+	if (!va)
+		goto unmap_data;
+
+	tr_sync_va = va;
+	resp->xhci_mem_info.tr_sync.va = PREPEND_SID_TO_IOVA(va,
+						uaudio_qdev->sid);
+	resp->xhci_mem_info.tr_sync.size = PAGE_SIZE;
+
+skip_sync:
+	/* xfer buffer, multiple of 4K only */
+	if (!xfer_buf_len)
+		xfer_buf_len = PAGE_SIZE;
+
+	mult = xfer_buf_len / PAGE_SIZE;
+	remainder = xfer_buf_len % PAGE_SIZE;
+	len = mult * PAGE_SIZE;
+	len += remainder ? PAGE_SIZE : 0;
+
+	if (len > MAX_XFER_BUFF_LEN) {
+		pr_err("%s: req buf len %d > max buf len %lu, setting %lu\n",
+		__func__, len, MAX_XFER_BUFF_LEN, MAX_XFER_BUFF_LEN);
+		len = MAX_XFER_BUFF_LEN;
+	}
+
+	xfer_buf = usb_alloc_coherent(subs->dev, len, GFP_KERNEL, &xfer_buf_pa);
+	if (!xfer_buf)
+		goto unmap_sync;
+
+	resp->xhci_mem_info.xfer_buff.pa = xfer_buf_pa;
+	resp->xhci_mem_info.xfer_buff.size = len;
+
+	va = uaudio_iommu_map(MEM_XFER_BUF, xfer_buf_pa, len);
+	if (!va)
+		goto unmap_sync;
+
+	xfer_buf_va = va;
+	resp->xhci_mem_info.xfer_buff.va = PREPEND_SID_TO_IOVA(va,
+						uaudio_qdev->sid);
+
+	resp->xhci_mem_info_valid = 1;
+
+	if (!atomic_read(&uadev[card_num].in_use)) {
+		kref_init(&uadev[card_num].kref);
+		init_waitqueue_head(&uadev[card_num].disconnect_wq);
+		uadev[card_num].num_intf =
+			subs->dev->config->desc.bNumInterfaces;
+		uadev[card_num].info =
+			kzalloc(sizeof(struct intf_info) *
+			uadev[card_num].num_intf, GFP_KERNEL);
+		if (!uadev[card_num].info) {
+			ret = -ENOMEM;
+			goto unmap_xfer_buf;
+		}
+		uadev[card_num].udev = subs->dev;
+		atomic_set(&uadev[card_num].in_use, 1);
+	} else {
+		kref_get(&uadev[card_num].kref);
+	}
+
+	uadev[card_num].card_num = card_num;
+
+	/* cache intf specific info to use it for unmap and free xfer buf */
+	uadev[card_num].info[info_idx].data_xfer_ring_va = tr_data_va;
+	uadev[card_num].info[info_idx].data_xfer_ring_size = PAGE_SIZE;
+	uadev[card_num].info[info_idx].sync_xfer_ring_va = tr_sync_va;
+	uadev[card_num].info[info_idx].sync_xfer_ring_size = PAGE_SIZE;
+	uadev[card_num].info[info_idx].xfer_buf_va = xfer_buf_va;
+	uadev[card_num].info[info_idx].xfer_buf_pa = xfer_buf_pa;
+	uadev[card_num].info[info_idx].xfer_buf_size = len;
+	uadev[card_num].info[info_idx].xfer_buf = xfer_buf;
+	uadev[card_num].info[info_idx].pcm_card_num = card_num;
+	uadev[card_num].info[info_idx].pcm_dev_num = pcm_dev_num;
+	uadev[card_num].info[info_idx].direction = subs->direction;
+	uadev[card_num].info[info_idx].intf_num = subs->interface;
+	uadev[card_num].info[info_idx].in_use = true;
+
+	set_bit(card_num, &uaudio_qdev->card_slot);
+
+	return 0;
+
+unmap_xfer_buf:
+	uaudio_iommu_unmap(MEM_XFER_BUF, xfer_buf_va, len);
+unmap_sync:
+	usb_free_coherent(subs->dev, len, xfer_buf, xfer_buf_pa);
+	uaudio_iommu_unmap(MEM_XFER_RING, tr_sync_va, PAGE_SIZE);
+unmap_data:
+	uaudio_iommu_unmap(MEM_XFER_RING, tr_data_va, PAGE_SIZE);
+unmap_dcba:
+	uaudio_iommu_unmap(MEM_DCBA, dcba_va, PAGE_SIZE);
+unmap_er:
+	uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE);
+err:
+	return ret;
+}
+
+static void uaudio_dev_intf_cleanup(struct usb_device *udev,
+	struct intf_info *info)
+{
+	uaudio_iommu_unmap(MEM_XFER_RING, info->data_xfer_ring_va,
+		info->data_xfer_ring_size);
+	info->data_xfer_ring_va = 0;
+	info->data_xfer_ring_size = 0;
+
+	uaudio_iommu_unmap(MEM_XFER_RING, info->sync_xfer_ring_va,
+		info->sync_xfer_ring_size);
+	info->sync_xfer_ring_va = 0;
+	info->sync_xfer_ring_size = 0;
+
+	uaudio_iommu_unmap(MEM_XFER_BUF, info->xfer_buf_va,
+		info->xfer_buf_size);
+	info->xfer_buf_va = 0;
+
+	usb_free_coherent(udev, info->xfer_buf_size,
+		info->xfer_buf, info->xfer_buf_pa);
+	info->xfer_buf_size = 0;
+	info->xfer_buf = NULL;
+	info->xfer_buf_pa = 0;
+
+	info->in_use = false;
+}
+
+static void uaudio_dev_cleanup(struct uaudio_dev *dev)
+{
+	int if_idx;
+
+	/* free xfer buffer and unmap xfer ring and buf per interface */
+	for (if_idx = 0; if_idx < dev->num_intf; if_idx++) {
+		if (!dev->info[if_idx].in_use)
+			continue;
+		uaudio_dev_intf_cleanup(dev->udev, &dev->info[if_idx]);
+		pr_debug("%s: release resources: intf# %d card# %d\n", __func__,
+			dev->info[if_idx].intf_num, dev->card_num);
+	}
+
+	/* iommu_unmap dcba iova for a usb device */
+	uaudio_iommu_unmap(MEM_DCBA, dev->dcba_iova, dev->dcba_size);
+
+	dev->dcba_iova = 0;
+	dev->dcba_size = 0;
+	dev->num_intf = 0;
+
+	/* free interface info */
+	kfree(dev->info);
+	dev->info = NULL;
+
+	clear_bit(dev->card_num, &uaudio_qdev->card_slot);
+
+	/* all audio devices are disconnected */
+	if (!uaudio_qdev->card_slot) {
+		uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE);
+		usb_sec_event_ring_cleanup(dev->udev, uaudio_qdev->intr_num);
+		pr_debug("%s: all audio devices disconnected\n", __func__);
+	}
+
+	dev->udev = NULL;
+}
+
+static void uaudio_disconnect_cb(struct snd_usb_audio *chip)
+{
+	int ret;
+	struct uaudio_dev *dev;
+	int card_num = chip->card_num;
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+	struct qmi_uaudio_stream_ind_msg_v01 disconnect_ind = {0};
+
+	pr_debug("%s: for card# %d\n", __func__, card_num);
+
+	if (card_num >=  SNDRV_CARDS) {
+		pr_err("%s: invalid card number\n", __func__);
+		return;
+	}
+
+	mutex_lock(&chip->dev_lock);
+	dev = &uadev[card_num];
+
+	/* clean up */
+	if (!dev->udev) {
+		pr_debug("%s: no clean up required\n", __func__);
+		goto done;
+	}
+
+	if (atomic_read(&dev->in_use)) {
+		mutex_unlock(&chip->dev_lock);
+
+		pr_debug("%s: sending qmi indication disconnect\n", __func__);
+		disconnect_ind.dev_event = USB_AUDIO_DEV_DISCONNECT_V01;
+		disconnect_ind.slot_id = dev->udev->slot_id;
+		ret = qmi_send_ind(svc->uaudio_svc_hdl, svc->curr_conn,
+				&uaudio_stream_ind_desc, &disconnect_ind,
+				sizeof(disconnect_ind));
+		if (ret < 0) {
+			pr_err("%s: qmi send failed wiht err: %d\n",
+					__func__, ret);
+			return;
+		}
+
+		ret = wait_event_interruptible(dev->disconnect_wq,
+				!atomic_read(&dev->in_use));
+		if (ret < 0) {
+			pr_debug("%s: failed with ret %d\n", __func__, ret);
+			return;
+		}
+		mutex_lock(&chip->dev_lock);
+	}
+
+	uaudio_dev_cleanup(dev);
+done:
+	mutex_unlock(&chip->dev_lock);
+}
+
+static void uaudio_dev_release(struct kref *kref)
+{
+	struct uaudio_dev *dev = container_of(kref, struct uaudio_dev, kref);
+
+	pr_debug("%s for dev %pK\n", __func__, dev);
+
+	atomic_set(&dev->in_use, 0);
+
+	clear_bit(dev->card_num, &uaudio_qdev->card_slot);
+
+	/* all audio devices are disconnected */
+	if (!uaudio_qdev->card_slot) {
+		usb_sec_event_ring_cleanup(dev->udev, uaudio_qdev->intr_num);
+		uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE);
+		pr_debug("%s: all audio devices disconnected\n", __func__);
+	}
+
+	wake_up(&dev->disconnect_wq);
+}
+
+/* maps audio format received over QMI to asound.h based pcm format */
+static int map_pcm_format(unsigned int fmt_received)
+{
+	switch (fmt_received) {
+	case USB_QMI_PCM_FORMAT_S8:
+		return SNDRV_PCM_FORMAT_S8;
+	case USB_QMI_PCM_FORMAT_U8:
+		return SNDRV_PCM_FORMAT_U8;
+	case USB_QMI_PCM_FORMAT_S16_LE:
+		return SNDRV_PCM_FORMAT_S16_LE;
+	case USB_QMI_PCM_FORMAT_S16_BE:
+		return SNDRV_PCM_FORMAT_S16_BE;
+	case USB_QMI_PCM_FORMAT_U16_LE:
+		return SNDRV_PCM_FORMAT_U16_LE;
+	case USB_QMI_PCM_FORMAT_U16_BE:
+		return SNDRV_PCM_FORMAT_U16_BE;
+	case USB_QMI_PCM_FORMAT_S24_LE:
+		return SNDRV_PCM_FORMAT_S24_LE;
+	case USB_QMI_PCM_FORMAT_S24_BE:
+		return SNDRV_PCM_FORMAT_S24_BE;
+	case USB_QMI_PCM_FORMAT_U24_LE:
+		return SNDRV_PCM_FORMAT_U24_LE;
+	case USB_QMI_PCM_FORMAT_U24_BE:
+		return SNDRV_PCM_FORMAT_U24_BE;
+	case USB_QMI_PCM_FORMAT_S24_3LE:
+		return SNDRV_PCM_FORMAT_S24_3LE;
+	case USB_QMI_PCM_FORMAT_S24_3BE:
+		return SNDRV_PCM_FORMAT_S24_3BE;
+	case USB_QMI_PCM_FORMAT_U24_3LE:
+		return SNDRV_PCM_FORMAT_U24_3LE;
+	case USB_QMI_PCM_FORMAT_U24_3BE:
+		return SNDRV_PCM_FORMAT_U24_3BE;
+	case USB_QMI_PCM_FORMAT_S32_LE:
+		return SNDRV_PCM_FORMAT_S32_LE;
+	case USB_QMI_PCM_FORMAT_S32_BE:
+		return SNDRV_PCM_FORMAT_S32_BE;
+	case USB_QMI_PCM_FORMAT_U32_LE:
+		return SNDRV_PCM_FORMAT_U32_LE;
+	case USB_QMI_PCM_FORMAT_U32_BE:
+		return SNDRV_PCM_FORMAT_U32_BE;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int info_idx_from_ifnum(int card_num, int intf_num, bool enable)
+{
+	int i;
+
+	/*
+	 * default index 0 is used when info is allocated upon
+	 * first enable audio stream req for a pcm device
+	 */
+	if (enable && !uadev[card_num].info)
+		return 0;
+
+	for (i = 0; i < uadev[card_num].num_intf; i++) {
+		if (enable && !uadev[card_num].info[i].in_use)
+			return i;
+		else if (!enable &&
+				uadev[card_num].info[i].intf_num == intf_num)
+			return i;
+	}
+
+	return -EINVAL;
+}
+
+static int handle_uaudio_stream_req(void *req_h, void *req)
+{
+	struct qmi_uaudio_stream_req_msg_v01 *req_msg;
+	struct qmi_uaudio_stream_resp_msg_v01 resp = {{0}, 0};
+	struct snd_usb_substream *subs;
+	struct snd_usb_audio *chip = NULL;
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+	struct intf_info *info;
+	int pcm_format;
+	u8 pcm_card_num, pcm_dev_num, direction;
+	int info_idx = -EINVAL, ret = 0;
+
+	req_msg = (struct qmi_uaudio_stream_req_msg_v01 *)req;
+
+	if (!req_msg->audio_format_valid || !req_msg->bit_rate_valid ||
+	!req_msg->number_of_ch_valid || !req_msg->xfer_buff_size_valid) {
+		pr_err("%s: invalid request msg\n", __func__);
+		ret = -EINVAL;
+		goto response;
+	}
+
+	direction = req_msg->usb_token & SND_PCM_STREAM_DIRECTION;
+	pcm_dev_num = (req_msg->usb_token & SND_PCM_DEV_NUM_MASK) >> 8;
+	pcm_card_num = (req_msg->usb_token & SND_PCM_CARD_NUM_MASK) >> 16;
+
+	pr_debug("%s:card#:%d dev#:%d dir:%d en:%d fmt:%d rate:%d #ch:%d\n",
+		__func__, pcm_card_num, pcm_dev_num, direction, req_msg->enable,
+		req_msg->audio_format, req_msg->bit_rate,
+		req_msg->number_of_ch);
+
+	if (pcm_card_num >= SNDRV_CARDS) {
+		pr_err("%s: invalid card # %u", __func__, pcm_card_num);
+		ret = -EINVAL;
+		goto response;
+	}
+
+	pcm_format = map_pcm_format(req_msg->audio_format);
+	if (pcm_format == -EINVAL) {
+		pr_err("%s: unsupported pcm format received %d\n",
+		__func__, req_msg->audio_format);
+		ret = -EINVAL;
+		goto response;
+	}
+
+	subs = find_snd_usb_substream(pcm_card_num, pcm_dev_num, direction,
+					&chip, uaudio_disconnect_cb);
+	if (!subs || !chip || atomic_read(&chip->shutdown)) {
+		pr_err("%s: can't find substream for card# %u, dev# %u dir%u\n",
+			__func__, pcm_card_num, pcm_dev_num, direction);
+		ret = -ENODEV;
+		goto response;
+	}
+
+	mutex_lock(&chip->dev_lock);
+	info_idx = info_idx_from_ifnum(pcm_card_num, subs->interface,
+		req_msg->enable);
+	if (atomic_read(&chip->shutdown) || !subs->stream || !subs->stream->pcm
+			|| !subs->stream->chip) {
+		ret = -ENODEV;
+		mutex_unlock(&chip->dev_lock);
+		goto response;
+	}
+
+	if (req_msg->enable) {
+		if (info_idx < 0) {
+			pr_err("%s interface# %d already in use card# %d\n",
+				__func__, subs->interface, pcm_card_num);
+			ret = -EBUSY;
+			mutex_unlock(&chip->dev_lock);
+			goto response;
+		}
+	}
+
+	subs->pcm_format = pcm_format;
+	subs->channels = req_msg->number_of_ch;
+	subs->cur_rate = req_msg->bit_rate;
+	uadev[pcm_card_num].ctrl_intf = chip->ctrl_intf;
+
+	ret = snd_usb_enable_audio_stream(subs, req_msg->enable);
+
+	if (!ret && req_msg->enable)
+		ret = prepare_qmi_response(subs, req_msg, &resp, info_idx);
+
+	mutex_unlock(&chip->dev_lock);
+
+response:
+	if (!req_msg->enable && ret != -EINVAL) {
+		if (info_idx >= 0) {
+			mutex_lock(&chip->dev_lock);
+			info = &uadev[pcm_card_num].info[info_idx];
+			uaudio_dev_intf_cleanup(uadev[pcm_card_num].udev, info);
+			pr_debug("%s:release resources: intf# %d card# %d\n",
+				__func__, subs->interface, pcm_card_num);
+			mutex_unlock(&chip->dev_lock);
+		}
+		if (atomic_read(&uadev[pcm_card_num].in_use))
+			kref_put(&uadev[pcm_card_num].kref,
+					uaudio_dev_release);
+	}
+
+	resp.usb_token = req_msg->usb_token;
+	resp.usb_token_valid = 1;
+	resp.internal_status = ret;
+	resp.internal_status_valid = 1;
+	resp.status = ret ? USB_AUDIO_STREAM_REQ_FAILURE_V01 : ret;
+	resp.status_valid = 1;
+	ret = qmi_send_resp_from_cb(svc->uaudio_svc_hdl, svc->curr_conn, req_h,
+			&uaudio_stream_resp_desc, &resp, sizeof(resp));
+
+	svc->t_resp_sent = ktime_get();
+
+	pr_debug("%s: t_resp sent - t_req recvd (in ms) %lld\n", __func__,
+		ktime_to_ms(ktime_sub(svc->t_resp_sent, svc->t_request_recvd)));
+
+	return ret;
+}
+
+static int uaudio_qmi_svc_connect_cb(struct qmi_handle *handle,
+			       void *conn_h)
+{
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+
+	if (svc->uaudio_svc_hdl != handle || !conn_h) {
+		pr_err("%s: handle mismatch\n", __func__);
+		return -EINVAL;
+	}
+	if (svc->curr_conn) {
+		pr_err("%s: Service is busy\n", __func__);
+		return -ECONNREFUSED;
+	}
+	svc->curr_conn = conn_h;
+	return 0;
+}
+
+static void uaudio_qmi_disconnect_work(struct work_struct *w)
+{
+	struct intf_info *info;
+	int idx, if_idx;
+	struct snd_usb_substream *subs;
+	struct snd_usb_audio *chip = NULL;
+
+	/* find all active intf for set alt 0 and cleanup usb audio dev */
+	for (idx = 0; idx < SNDRV_CARDS; idx++) {
+		if (!atomic_read(&uadev[idx].in_use))
+			continue;
+
+		for (if_idx = 0; if_idx < uadev[idx].num_intf; if_idx++) {
+			if (!uadev[idx].info || !uadev[idx].info[if_idx].in_use)
+				continue;
+			info = &uadev[idx].info[if_idx];
+			subs = find_snd_usb_substream(info->pcm_card_num,
+							info->pcm_dev_num,
+							info->direction,
+							&chip,
+							uaudio_disconnect_cb);
+			if (!subs || !chip || atomic_read(&chip->shutdown)) {
+				pr_debug("%s:no subs for c#%u, dev#%u dir%u\n",
+					__func__, info->pcm_card_num,
+					info->pcm_dev_num,
+					info->direction);
+				continue;
+			}
+			snd_usb_enable_audio_stream(subs, 0);
+		}
+		atomic_set(&uadev[idx].in_use, 0);
+		mutex_lock(&chip->dev_lock);
+		uaudio_dev_cleanup(&uadev[idx]);
+		mutex_unlock(&chip->dev_lock);
+	}
+}
+
+static int uaudio_qmi_svc_disconnect_cb(struct qmi_handle *handle,
+				  void *conn_h)
+{
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+
+	if (svc->uaudio_svc_hdl != handle || svc->curr_conn != conn_h) {
+		pr_err("%s: handle mismatch\n", __func__);
+		return -EINVAL;
+	}
+
+	svc->curr_conn = NULL;
+	queue_work(svc->uaudio_wq, &svc->qmi_disconnect_work);
+
+	return 0;
+}
+
+static int uaudio_qmi_svc_req_cb(struct qmi_handle *handle, void *conn_h,
+			void *req_h, unsigned int msg_id, void *req)
+{
+	int ret;
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+
+	if (svc->uaudio_svc_hdl != handle || svc->curr_conn != conn_h) {
+		pr_err("%s: handle mismatch\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (msg_id) {
+	case QMI_UAUDIO_STREAM_REQ_V01:
+		ret = handle_uaudio_stream_req(req_h, req);
+		break;
+
+	default:
+		ret = -ENOTSUPP;
+		break;
+	}
+	return ret;
+}
+
+static int uaudio_qmi_svc_req_desc_cb(unsigned int msg_id,
+	struct msg_desc **req_desc)
+{
+	int ret;
+
+	pr_debug("%s: msg_id %d\n", __func__, msg_id);
+
+	switch (msg_id) {
+	case QMI_UAUDIO_STREAM_REQ_V01:
+		*req_desc = &uaudio_stream_req_desc;
+		ret = sizeof(struct qmi_uaudio_stream_req_msg_v01);
+		break;
+
+	default:
+		ret = -ENOTSUPP;
+		break;
+	}
+	return ret;
+}
+
+static void uaudio_qmi_svc_recv_msg(struct work_struct *w)
+{
+	int ret;
+	struct uaudio_qmi_svc *svc = container_of(w, struct uaudio_qmi_svc,
+		recv_msg_work);
+
+	do {
+		pr_debug("%s: Notified about a Receive Event", __func__);
+	} while ((ret = qmi_recv_msg(svc->uaudio_svc_hdl)) == 0);
+
+	if (ret != -ENOMSG)
+		pr_err("%s: Error receiving message\n", __func__);
+}
+
+static void uaudio_qmi_svc_ntfy(struct qmi_handle *handle,
+		enum qmi_event_type event, void *priv)
+{
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+
+	pr_debug("%s: event %d", __func__, event);
+
+	svc->t_request_recvd = ktime_get();
+
+	switch (event) {
+	case QMI_RECV_MSG:
+		queue_work(svc->uaudio_wq, &svc->recv_msg_work);
+		break;
+	default:
+		break;
+	}
+}
+
+static struct qmi_svc_ops_options uaudio_svc_ops_options = {
+	.version = 1,
+	.service_id = UAUDIO_STREAM_SERVICE_ID_V01,
+	.service_vers = UAUDIO_STREAM_SERVICE_VERS_V01,
+	.connect_cb = uaudio_qmi_svc_connect_cb,
+	.disconnect_cb = uaudio_qmi_svc_disconnect_cb,
+	.req_desc_cb = uaudio_qmi_svc_req_desc_cb,
+	.req_cb = uaudio_qmi_svc_req_cb,
+};
+
+static int uaudio_qmi_plat_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct device_node *node = pdev->dev.of_node;
+
+	uaudio_qdev = devm_kzalloc(&pdev->dev, sizeof(struct uaudio_qmi_dev),
+		GFP_KERNEL);
+	if (!uaudio_qdev)
+		return -ENOMEM;
+
+	uaudio_qdev->dev = &pdev->dev;
+
+	ret = of_property_read_u32(node, "qcom,usb-audio-stream-id",
+				&uaudio_qdev->sid);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to read sid.\n");
+		return -ENODEV;
+	}
+
+	ret = of_property_read_u32(node, "qcom,usb-audio-intr-num",
+				&uaudio_qdev->intr_num);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to read intr num.\n");
+		return -ENODEV;
+	}
+
+	uaudio_qdev->domain = iommu_domain_alloc(pdev->dev.bus);
+	if (!uaudio_qdev->domain) {
+		dev_err(&pdev->dev, "failed to allocate iommu domain\n");
+		return -ENODEV;
+	}
+
+	/* attach to external processor iommu */
+	ret = iommu_attach_device(uaudio_qdev->domain, &pdev->dev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to attach device ret = %d\n", ret);
+		goto free_domain;
+	}
+
+	/* initialize dcba, xfer ring and xfer buf iova list */
+	INIT_LIST_HEAD(&uaudio_qdev->dcba_list);
+	uaudio_qdev->curr_dcba_iova = IOVA_DCBA_BASE;
+	uaudio_qdev->dcba_iova_size = SNDRV_CARDS * PAGE_SIZE;
+
+	INIT_LIST_HEAD(&uaudio_qdev->xfer_ring_list);
+	uaudio_qdev->curr_xfer_ring_iova = IOVA_XFER_RING_BASE;
+	uaudio_qdev->xfer_ring_iova_size =
+			IOVA_XFER_RING_MAX - IOVA_XFER_RING_BASE;
+
+	INIT_LIST_HEAD(&uaudio_qdev->xfer_buf_list);
+	uaudio_qdev->curr_xfer_buf_iova = IOVA_XFER_BUF_BASE;
+	uaudio_qdev->xfer_buf_iova_size =
+		IOVA_XFER_BUF_MAX - IOVA_XFER_BUF_BASE;
+
+	return 0;
+
+free_domain:
+	iommu_domain_free(uaudio_qdev->domain);
+	return ret;
+}
+
+static int uaudio_qmi_plat_remove(struct platform_device *pdev)
+{
+	iommu_detach_device(uaudio_qdev->domain, &pdev->dev);
+	iommu_domain_free(uaudio_qdev->domain);
+	uaudio_qdev->domain = NULL;
+
+	return 0;
+}
+
+static const struct of_device_id of_uaudio_matach[] = {
+	{
+		.compatible = "qcom,usb-audio-qmi-dev",
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, of_uaudio_matach);
+
+static struct platform_driver uaudio_qmi_driver = {
+	.probe		= uaudio_qmi_plat_probe,
+	.remove		= uaudio_qmi_plat_remove,
+	.driver		= {
+		.name	= "uaudio-qmi",
+		.of_match_table	= of_uaudio_matach,
+	},
+};
+
+static int uaudio_qmi_svc_init(void)
+{
+	int ret;
+	struct uaudio_qmi_svc *svc;
+
+	svc = kzalloc(sizeof(struct uaudio_qmi_svc), GFP_KERNEL);
+	if (!svc)
+		return -ENOMEM;
+
+	svc->uaudio_wq = create_singlethread_workqueue("uaudio_svc");
+	if (!svc->uaudio_wq) {
+		ret = -ENOMEM;
+		goto free_svc;
+	}
+
+	svc->uaudio_svc_hdl = qmi_handle_create(uaudio_qmi_svc_ntfy, NULL);
+	if (!svc->uaudio_svc_hdl) {
+		pr_err("%s: Error creating svc_hdl\n", __func__);
+		ret = -EFAULT;
+		goto destroy_uaudio_wq;
+	}
+
+	ret = qmi_svc_register(svc->uaudio_svc_hdl, &uaudio_svc_ops_options);
+	if (ret < 0) {
+		pr_err("%s:Error registering uaudio svc %d\n", __func__, ret);
+		goto destroy_svc_handle;
+	}
+
+	INIT_WORK(&svc->recv_msg_work, uaudio_qmi_svc_recv_msg);
+	INIT_WORK(&svc->qmi_disconnect_work, uaudio_qmi_disconnect_work);
+
+	uaudio_svc = svc;
+
+	return 0;
+
+destroy_svc_handle:
+	qmi_handle_destroy(svc->uaudio_svc_hdl);
+destroy_uaudio_wq:
+	destroy_workqueue(svc->uaudio_wq);
+free_svc:
+	kfree(svc);
+	return ret;
+}
+
+static void uaudio_qmi_svc_exit(void)
+{
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+
+	qmi_svc_unregister(svc->uaudio_svc_hdl);
+	flush_workqueue(svc->uaudio_wq);
+	qmi_handle_destroy(svc->uaudio_svc_hdl);
+	destroy_workqueue(svc->uaudio_wq);
+	kfree(svc);
+	uaudio_svc = NULL;
+}
+
+static int __init uaudio_qmi_plat_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&uaudio_qmi_driver);
+	if (ret)
+		return ret;
+
+	return uaudio_qmi_svc_init();
+}
+
+static void __exit uaudio_qmi_plat_exit(void)
+{
+	uaudio_qmi_svc_exit();
+	platform_driver_unregister(&uaudio_qmi_driver);
+}
+
+module_init(uaudio_qmi_plat_init);
+module_exit(uaudio_qmi_plat_exit);
+
+MODULE_DESCRIPTION("USB AUDIO QMI Service Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/usb/usb_audio_qmi_v01.c b/sound/usb/usb_audio_qmi_v01.c
new file mode 100644
index 0000000..fef7505
--- /dev/null
+++ b/sound/usb/usb_audio_qmi_v01.c
@@ -0,0 +1,833 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/qmi_encdec.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#include "usb_audio_qmi_v01.h"
+
+static struct elem_info mem_info_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct mem_info_v01,
+					   va),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct mem_info_v01,
+					   pa),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct mem_info_v01,
+					   size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info apps_mem_info_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct apps_mem_info_v01,
+					   evt_ring),
+		.ei_array      = mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct apps_mem_info_v01,
+					   tr_data),
+		.ei_array      = mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct apps_mem_info_v01,
+					   tr_sync),
+		.ei_array      = mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct apps_mem_info_v01,
+					   xfer_buff),
+		.ei_array      = mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct apps_mem_info_v01,
+					   dcba),
+		.ei_array      = mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info usb_endpoint_descriptor_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bLength),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bDescriptorType),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bEndpointAddress),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bmAttributes),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   wMaxPacketSize),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bInterval),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bRefresh),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bSynchAddress),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info usb_interface_descriptor_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bLength),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bDescriptorType),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bInterfaceNumber),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bAlternateSetting),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bNumEndpoints),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bInterfaceClass),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bInterfaceSubClass),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bInterfaceProtocol),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   iInterface),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_uaudio_stream_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   enable),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   usb_token),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   audio_format_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   audio_format),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   number_of_ch_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   number_of_ch),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   bit_rate_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   bit_rate),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   xfer_buff_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   xfer_buff_size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_uaudio_stream_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					status_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum usb_audio_stream_status_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					status),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					internal_status_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					internal_status),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					slot_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					slot_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_token_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_token),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_opr_intf_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_interface_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_opr_intf_desc),
+		.ei_array      = usb_interface_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_data_ep_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_endpoint_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_data_ep_desc),
+		.ei_array      = usb_endpoint_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_sync_ep_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_endpoint_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_sync_ep_desc),
+		.ei_array      = usb_endpoint_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_audio_spec_revision_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_audio_spec_revision),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					data_path_delay_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					data_path_delay),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x19,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_audio_subslot_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x19,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_audio_subslot_size),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1A,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					xhci_mem_info_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct apps_mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1A,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					xhci_mem_info),
+		.ei_array      = apps_mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1B,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					interrupter_num_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1B,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					interrupter_num),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_uaudio_stream_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(
+				enum usb_audio_device_indication_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   dev_event),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   slot_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_token_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_token),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_opr_intf_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_interface_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_opr_intf_desc),
+		.ei_array      = usb_interface_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_data_ep_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_endpoint_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_data_ep_desc),
+		.ei_array      = usb_endpoint_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_sync_ep_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_endpoint_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_sync_ep_desc),
+		.ei_array      = usb_endpoint_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_audio_spec_revision_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_audio_spec_revision),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   data_path_delay_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   data_path_delay),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_audio_subslot_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_audio_subslot_size),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   xhci_mem_info_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct apps_mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   xhci_mem_info),
+		.ei_array      = apps_mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   interrupter_num_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   interrupter_num),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
diff --git a/sound/usb/usb_audio_qmi_v01.h b/sound/usb/usb_audio_qmi_v01.h
new file mode 100644
index 0000000..83a966c
--- /dev/null
+++ b/sound/usb/usb_audio_qmi_v01.h
@@ -0,0 +1,150 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef USB_QMI_V01_H
+#define USB_QMI_V01_H
+
+#define UAUDIO_STREAM_SERVICE_ID_V01 0x41D
+#define UAUDIO_STREAM_SERVICE_VERS_V01 0x01
+
+#define QMI_UAUDIO_STREAM_RESP_V01 0x0001
+#define QMI_UAUDIO_STREAM_REQ_V01 0x0001
+#define QMI_UADUIO_STREAM_IND_V01 0x0001
+
+
+struct mem_info_v01 {
+	uint64_t va;
+	uint64_t pa;
+	uint32_t size;
+};
+
+struct apps_mem_info_v01 {
+	struct mem_info_v01 evt_ring;
+	struct mem_info_v01 tr_data;
+	struct mem_info_v01 tr_sync;
+	struct mem_info_v01 xfer_buff;
+	struct mem_info_v01 dcba;
+};
+
+struct usb_endpoint_descriptor_v01 {
+	uint8_t bLength;
+	uint8_t bDescriptorType;
+	uint8_t bEndpointAddress;
+	uint8_t bmAttributes;
+	uint16_t wMaxPacketSize;
+	uint8_t bInterval;
+	uint8_t bRefresh;
+	uint8_t bSynchAddress;
+};
+
+struct usb_interface_descriptor_v01 {
+	uint8_t bLength;
+	uint8_t bDescriptorType;
+	uint8_t bInterfaceNumber;
+	uint8_t bAlternateSetting;
+	uint8_t bNumEndpoints;
+	uint8_t bInterfaceClass;
+	uint8_t bInterfaceSubClass;
+	uint8_t bInterfaceProtocol;
+	uint8_t iInterface;
+};
+
+enum usb_audio_stream_status_enum_v01 {
+	USB_AUDIO_STREAM_STATUS_ENUM_MIN_VAL_V01 = INT_MIN,
+	USB_AUDIO_STREAM_REQ_SUCCESS_V01 = 0,
+	USB_AUDIO_STREAM_REQ_FAILURE_V01 = 1,
+	USB_AUDIO_STREAM_REQ_FAILURE_NOT_FOUND_V01 = 2,
+	USB_AUDIO_STREAM_REQ_FAILURE_INVALID_PARAM_V01 = 3,
+	USB_AUDIO_STREAM_REQ_FAILURE_MEMALLOC_V01 = 4,
+	USB_AUDIO_STREAM_STATUS_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum usb_audio_device_indication_enum_v01 {
+	USB_AUDIO_DEVICE_INDICATION_ENUM_MIN_VAL_V01 = INT_MIN,
+	USB_AUDIO_DEV_CONNECT_V01 = 0,
+	USB_AUDIO_DEV_DISCONNECT_V01 = 1,
+	USB_AUDIO_DEV_SUSPEND_V01 = 2,
+	USB_AUDIO_DEV_RESUME_V01 = 3,
+	USB_AUDIO_DEVICE_INDICATION_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+struct qmi_uaudio_stream_req_msg_v01 {
+	uint8_t enable;
+	uint32_t usb_token;
+	uint8_t audio_format_valid;
+	uint32_t audio_format;
+	uint8_t number_of_ch_valid;
+	uint32_t number_of_ch;
+	uint8_t bit_rate_valid;
+	uint32_t bit_rate;
+	uint8_t xfer_buff_size_valid;
+	uint32_t xfer_buff_size;
+};
+#define QMI_UAUDIO_STREAM_REQ_MSG_V01_MAX_MSG_LEN 39
+extern struct elem_info qmi_uaudio_stream_req_msg_v01_ei[];
+
+struct qmi_uaudio_stream_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	uint8_t status_valid;
+	enum usb_audio_stream_status_enum_v01 status;
+	uint8_t internal_status_valid;
+	uint32_t internal_status;
+	uint8_t slot_id_valid;
+	uint32_t slot_id;
+	uint8_t usb_token_valid;
+	uint32_t usb_token;
+	uint8_t std_as_opr_intf_desc_valid;
+	struct usb_interface_descriptor_v01 std_as_opr_intf_desc;
+	uint8_t std_as_data_ep_desc_valid;
+	struct usb_endpoint_descriptor_v01 std_as_data_ep_desc;
+	uint8_t std_as_sync_ep_desc_valid;
+	struct usb_endpoint_descriptor_v01 std_as_sync_ep_desc;
+	uint8_t usb_audio_spec_revision_valid;
+	uint16_t usb_audio_spec_revision;
+	uint8_t data_path_delay_valid;
+	uint8_t data_path_delay;
+	uint8_t usb_audio_subslot_size_valid;
+	uint8_t usb_audio_subslot_size;
+	uint8_t xhci_mem_info_valid;
+	struct apps_mem_info_v01 xhci_mem_info;
+	uint8_t interrupter_num_valid;
+	uint8_t interrupter_num;
+};
+#define QMI_UAUDIO_STREAM_RESP_MSG_V01_MAX_MSG_LEN 191
+extern struct elem_info qmi_uaudio_stream_resp_msg_v01_ei[];
+
+struct qmi_uaudio_stream_ind_msg_v01 {
+	enum usb_audio_device_indication_enum_v01 dev_event;
+	uint32_t slot_id;
+	uint8_t usb_token_valid;
+	uint32_t usb_token;
+	uint8_t std_as_opr_intf_desc_valid;
+	struct usb_interface_descriptor_v01 std_as_opr_intf_desc;
+	uint8_t std_as_data_ep_desc_valid;
+	struct usb_endpoint_descriptor_v01 std_as_data_ep_desc;
+	uint8_t std_as_sync_ep_desc_valid;
+	struct usb_endpoint_descriptor_v01 std_as_sync_ep_desc;
+	uint8_t usb_audio_spec_revision_valid;
+	uint16_t usb_audio_spec_revision;
+	uint8_t data_path_delay_valid;
+	uint8_t data_path_delay;
+	uint8_t usb_audio_subslot_size_valid;
+	uint8_t usb_audio_subslot_size;
+	uint8_t xhci_mem_info_valid;
+	struct apps_mem_info_v01 xhci_mem_info;
+	uint8_t interrupter_num_valid;
+	uint8_t interrupter_num;
+};
+#define QMI_UAUDIO_STREAM_IND_MSG_V01_MAX_MSG_LEN 177
+extern struct elem_info qmi_uaudio_stream_ind_msg_v01_ei[];
+
+#endif
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index 4d5c89a..93c4bed 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -61,6 +61,10 @@
 	bool autoclock;			/* from the 'autoclock' module param */
 
 	struct usb_host_interface *ctrl_intf;	/* the audio control interface */
+
+	struct mutex dev_lock;	/* to protect any race with disconnect */
+	int card_num;	/* cache pcm card number to use upon disconnect */
+	void (*disconnect_cb)(struct snd_usb_audio *chip);
 };
 
 #define usb_audio_err(chip, fmt, args...) \
diff --git a/tools/include/linux/log2.h b/tools/include/linux/log2.h
index 4144666..d5677d3 100644
--- a/tools/include/linux/log2.h
+++ b/tools/include/linux/log2.h
@@ -13,12 +13,6 @@
 #define _TOOLS_LINUX_LOG2_H
 
 /*
- * deal with unrepresentable constant logarithms
- */
-extern __attribute__((const, noreturn))
-int ____ilog2_NaN(void);
-
-/*
  * non-constant log of base 2 calculators
  * - the arch may override these in asm/bitops.h if they can be implemented
  *   more efficiently than using fls() and fls64()
@@ -78,7 +72,7 @@
 #define ilog2(n)				\
 (						\
 	__builtin_constant_p(n) ? (		\
-		(n) < 1 ? ____ilog2_NaN() :	\
+		(n) < 2 ? 0 :			\
 		(n) & (1ULL << 63) ? 63 :	\
 		(n) & (1ULL << 62) ? 62 :	\
 		(n) & (1ULL << 61) ? 61 :	\
@@ -141,10 +135,7 @@
 		(n) & (1ULL <<  4) ?  4 :	\
 		(n) & (1ULL <<  3) ?  3 :	\
 		(n) & (1ULL <<  2) ?  2 :	\
-		(n) & (1ULL <<  1) ?  1 :	\
-		(n) & (1ULL <<  0) ?  0 :	\
-		____ilog2_NaN()			\
-				   ) :		\
+		1 ) :				\
 	(sizeof(n) <= 4) ?			\
 	__ilog2_u32(n) :			\
 	__ilog2_u64(n)				\